Remove support for non-threaded VNC server
[qemu/cris-port.git] / exec.c
blob8244d54a85bc412512bfc1118d22ca528a43b736
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "osdep.h"
33 #include "kvm.h"
34 #include "hw/xen.h"
35 #include "qemu-timer.h"
36 #include "memory.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include <qemu.h>
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <machine/profile.h>
48 #define _KERNEL
49 #include <sys/user.h>
50 #undef _KERNEL
51 #undef sigqueue
52 #include <libutil.h>
53 #endif
54 #endif
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
57 #include "trace.h"
58 #endif
60 #include "cputlb.h"
62 #define WANT_EXEC_OBSOLETE
63 #include "exec-obsolete.h"
65 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_FLUSH
67 //#define DEBUG_UNASSIGNED
69 /* make various TB consistency checks */
70 //#define DEBUG_TB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
77 #undef DEBUG_TB_CHECK
78 #endif
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock *tbs;
83 static int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 static int nb_tbs;
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32) && !defined(_WIN64)
97 #define code_gen_section \
98 __attribute__((aligned (16)))
99 #else
100 #define code_gen_section \
101 __attribute__((aligned (32)))
102 #endif
104 uint8_t code_gen_prologue[1024] code_gen_section;
105 static uint8_t *code_gen_buffer;
106 static unsigned long code_gen_buffer_size;
107 /* threshold to flush the translated code buffer */
108 static unsigned long code_gen_buffer_max_size;
109 static uint8_t *code_gen_ptr;
111 #if !defined(CONFIG_USER_ONLY)
112 int phys_ram_fd;
113 static int in_migration;
115 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
117 static MemoryRegion *system_memory;
118 static MemoryRegion *system_io;
120 MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
121 static MemoryRegion io_mem_subpage_ram;
123 #endif
125 CPUArchState *first_cpu;
126 /* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
128 DEFINE_TLS(CPUArchState *,cpu_single_env);
129 /* 0 = Do not count executed instructions.
130 1 = Precise instruction counting.
131 2 = Adaptive rate instruction counting. */
132 int use_icount = 0;
134 typedef struct PageDesc {
135 /* list of TBs intersecting this ram page */
136 TranslationBlock *first_tb;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141 #if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143 #endif
144 } PageDesc;
146 /* In system mode we want L1_MAP to be based on ram offsets,
147 while in user mode we want it to be based on virtual addresses. */
148 #if !defined(CONFIG_USER_ONLY)
149 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151 #else
152 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
153 #endif
154 #else
155 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
156 #endif
158 /* Size of the L2 (and L3, etc) page tables. */
159 #define L2_BITS 10
160 #define L2_SIZE (1 << L2_BITS)
162 #define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165 /* The bits remaining after N lower levels of page tables. */
166 #define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169 #if V_L1_BITS_REM < 4
170 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171 #else
172 #define V_L1_BITS V_L1_BITS_REM
173 #endif
175 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179 uintptr_t qemu_real_host_page_size;
180 uintptr_t qemu_host_page_size;
181 uintptr_t qemu_host_page_mask;
183 /* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185 static void *l1_map[V_L1_SIZE];
187 #if !defined(CONFIG_USER_ONLY)
188 typedef struct PhysPageEntry PhysPageEntry;
190 static MemoryRegionSection *phys_sections;
191 static unsigned phys_sections_nb, phys_sections_nb_alloc;
192 static uint16_t phys_section_unassigned;
193 static uint16_t phys_section_notdirty;
194 static uint16_t phys_section_rom;
195 static uint16_t phys_section_watch;
197 struct PhysPageEntry {
198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
203 /* Simple allocator for PhysPageEntry nodes */
204 static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205 static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
207 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
209 /* This is a multi-level map on the physical address space.
210 The bottom level has pointers to MemoryRegionSections. */
211 static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
213 static void io_mem_init(void);
214 static void memory_map_init(void);
216 static MemoryRegion io_mem_watch;
217 #endif
219 /* statistics */
220 static int tb_flush_count;
221 static int tb_phys_invalidate_count;
223 #ifdef _WIN32
224 static void map_exec(void *addr, long size)
226 DWORD old_protect;
227 VirtualProtect(addr, size,
228 PAGE_EXECUTE_READWRITE, &old_protect);
231 #else
232 static void map_exec(void *addr, long size)
234 unsigned long start, end, page_size;
236 page_size = getpagesize();
237 start = (unsigned long)addr;
238 start &= ~(page_size - 1);
240 end = (unsigned long)addr + size;
241 end += page_size - 1;
242 end &= ~(page_size - 1);
244 mprotect((void *)start, end - start,
245 PROT_READ | PROT_WRITE | PROT_EXEC);
247 #endif
249 static void page_init(void)
251 /* NOTE: we can always suppose that qemu_host_page_size >=
252 TARGET_PAGE_SIZE */
253 #ifdef _WIN32
255 SYSTEM_INFO system_info;
257 GetSystemInfo(&system_info);
258 qemu_real_host_page_size = system_info.dwPageSize;
260 #else
261 qemu_real_host_page_size = getpagesize();
262 #endif
263 if (qemu_host_page_size == 0)
264 qemu_host_page_size = qemu_real_host_page_size;
265 if (qemu_host_page_size < TARGET_PAGE_SIZE)
266 qemu_host_page_size = TARGET_PAGE_SIZE;
267 qemu_host_page_mask = ~(qemu_host_page_size - 1);
269 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
271 #ifdef HAVE_KINFO_GETVMMAP
272 struct kinfo_vmentry *freep;
273 int i, cnt;
275 freep = kinfo_getvmmap(getpid(), &cnt);
276 if (freep) {
277 mmap_lock();
278 for (i = 0; i < cnt; i++) {
279 unsigned long startaddr, endaddr;
281 startaddr = freep[i].kve_start;
282 endaddr = freep[i].kve_end;
283 if (h2g_valid(startaddr)) {
284 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
286 if (h2g_valid(endaddr)) {
287 endaddr = h2g(endaddr);
288 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
289 } else {
290 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
291 endaddr = ~0ul;
292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
293 #endif
297 free(freep);
298 mmap_unlock();
300 #else
301 FILE *f;
303 last_brk = (unsigned long)sbrk(0);
305 f = fopen("/compat/linux/proc/self/maps", "r");
306 if (f) {
307 mmap_lock();
309 do {
310 unsigned long startaddr, endaddr;
311 int n;
313 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
315 if (n == 2 && h2g_valid(startaddr)) {
316 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
318 if (h2g_valid(endaddr)) {
319 endaddr = h2g(endaddr);
320 } else {
321 endaddr = ~0ul;
323 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
325 } while (!feof(f));
327 fclose(f);
328 mmap_unlock();
330 #endif
332 #endif
335 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
337 PageDesc *pd;
338 void **lp;
339 int i;
341 #if defined(CONFIG_USER_ONLY)
342 /* We can't use g_malloc because it may recurse into a locked mutex. */
343 # define ALLOC(P, SIZE) \
344 do { \
345 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
346 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
347 } while (0)
348 #else
349 # define ALLOC(P, SIZE) \
350 do { P = g_malloc0(SIZE); } while (0)
351 #endif
353 /* Level 1. Always allocated. */
354 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
356 /* Level 2..N-1. */
357 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
358 void **p = *lp;
360 if (p == NULL) {
361 if (!alloc) {
362 return NULL;
364 ALLOC(p, sizeof(void *) * L2_SIZE);
365 *lp = p;
368 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
371 pd = *lp;
372 if (pd == NULL) {
373 if (!alloc) {
374 return NULL;
376 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
377 *lp = pd;
380 #undef ALLOC
382 return pd + (index & (L2_SIZE - 1));
385 static inline PageDesc *page_find(tb_page_addr_t index)
387 return page_find_alloc(index, 0);
390 #if !defined(CONFIG_USER_ONLY)
392 static void phys_map_node_reserve(unsigned nodes)
394 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
395 typedef PhysPageEntry Node[L2_SIZE];
396 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
397 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
398 phys_map_nodes_nb + nodes);
399 phys_map_nodes = g_renew(Node, phys_map_nodes,
400 phys_map_nodes_nb_alloc);
404 static uint16_t phys_map_node_alloc(void)
406 unsigned i;
407 uint16_t ret;
409 ret = phys_map_nodes_nb++;
410 assert(ret != PHYS_MAP_NODE_NIL);
411 assert(ret != phys_map_nodes_nb_alloc);
412 for (i = 0; i < L2_SIZE; ++i) {
413 phys_map_nodes[ret][i].is_leaf = 0;
414 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
416 return ret;
419 static void phys_map_nodes_reset(void)
421 phys_map_nodes_nb = 0;
425 static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
426 target_phys_addr_t *nb, uint16_t leaf,
427 int level)
429 PhysPageEntry *p;
430 int i;
431 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
433 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
434 lp->ptr = phys_map_node_alloc();
435 p = phys_map_nodes[lp->ptr];
436 if (level == 0) {
437 for (i = 0; i < L2_SIZE; i++) {
438 p[i].is_leaf = 1;
439 p[i].ptr = phys_section_unassigned;
442 } else {
443 p = phys_map_nodes[lp->ptr];
445 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
447 while (*nb && lp < &p[L2_SIZE]) {
448 if ((*index & (step - 1)) == 0 && *nb >= step) {
449 lp->is_leaf = true;
450 lp->ptr = leaf;
451 *index += step;
452 *nb -= step;
453 } else {
454 phys_page_set_level(lp, index, nb, leaf, level - 1);
456 ++lp;
460 static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
461 uint16_t leaf)
463 /* Wildly overreserve - it doesn't matter much. */
464 phys_map_node_reserve(3 * P_L2_LEVELS);
466 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
469 MemoryRegionSection *phys_page_find(target_phys_addr_t index)
471 PhysPageEntry lp = phys_map;
472 PhysPageEntry *p;
473 int i;
474 uint16_t s_index = phys_section_unassigned;
476 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
477 if (lp.ptr == PHYS_MAP_NODE_NIL) {
478 goto not_found;
480 p = phys_map_nodes[lp.ptr];
481 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
484 s_index = lp.ptr;
485 not_found:
486 return &phys_sections[s_index];
489 bool memory_region_is_unassigned(MemoryRegion *mr)
491 return mr != &io_mem_ram && mr != &io_mem_rom
492 && mr != &io_mem_notdirty && !mr->rom_device
493 && mr != &io_mem_watch;
496 #define mmap_lock() do { } while(0)
497 #define mmap_unlock() do { } while(0)
498 #endif
500 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
502 #if defined(CONFIG_USER_ONLY)
503 /* Currently it is not recommended to allocate big chunks of data in
504 user mode. It will change when a dedicated libc will be used */
505 #define USE_STATIC_CODE_GEN_BUFFER
506 #endif
508 #ifdef USE_STATIC_CODE_GEN_BUFFER
509 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
510 __attribute__((aligned (CODE_GEN_ALIGN)));
511 #endif
513 static void code_gen_alloc(unsigned long tb_size)
515 #ifdef USE_STATIC_CODE_GEN_BUFFER
516 code_gen_buffer = static_code_gen_buffer;
517 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
518 map_exec(code_gen_buffer, code_gen_buffer_size);
519 #else
520 code_gen_buffer_size = tb_size;
521 if (code_gen_buffer_size == 0) {
522 #if defined(CONFIG_USER_ONLY)
523 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
524 #else
525 /* XXX: needs adjustments */
526 code_gen_buffer_size = (unsigned long)(ram_size / 4);
527 #endif
529 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
530 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
531 /* The code gen buffer location may have constraints depending on
532 the host cpu and OS */
533 #if defined(__linux__)
535 int flags;
536 void *start = NULL;
538 flags = MAP_PRIVATE | MAP_ANONYMOUS;
539 #if defined(__x86_64__)
540 flags |= MAP_32BIT;
541 /* Cannot map more than that */
542 if (code_gen_buffer_size > (800 * 1024 * 1024))
543 code_gen_buffer_size = (800 * 1024 * 1024);
544 #elif defined(__sparc_v9__)
545 // Map the buffer below 2G, so we can use direct calls and branches
546 flags |= MAP_FIXED;
547 start = (void *) 0x60000000UL;
548 if (code_gen_buffer_size > (512 * 1024 * 1024))
549 code_gen_buffer_size = (512 * 1024 * 1024);
550 #elif defined(__arm__)
551 /* Keep the buffer no bigger than 16MB to branch between blocks */
552 if (code_gen_buffer_size > 16 * 1024 * 1024)
553 code_gen_buffer_size = 16 * 1024 * 1024;
554 #elif defined(__s390x__)
555 /* Map the buffer so that we can use direct calls and branches. */
556 /* We have a +- 4GB range on the branches; leave some slop. */
557 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
558 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
560 start = (void *)0x90000000UL;
561 #endif
562 code_gen_buffer = mmap(start, code_gen_buffer_size,
563 PROT_WRITE | PROT_READ | PROT_EXEC,
564 flags, -1, 0);
565 if (code_gen_buffer == MAP_FAILED) {
566 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
567 exit(1);
570 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
571 || defined(__DragonFly__) || defined(__OpenBSD__) \
572 || defined(__NetBSD__)
574 int flags;
575 void *addr = NULL;
576 flags = MAP_PRIVATE | MAP_ANONYMOUS;
577 #if defined(__x86_64__)
578 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
579 * 0x40000000 is free */
580 flags |= MAP_FIXED;
581 addr = (void *)0x40000000;
582 /* Cannot map more than that */
583 if (code_gen_buffer_size > (800 * 1024 * 1024))
584 code_gen_buffer_size = (800 * 1024 * 1024);
585 #elif defined(__sparc_v9__)
586 // Map the buffer below 2G, so we can use direct calls and branches
587 flags |= MAP_FIXED;
588 addr = (void *) 0x60000000UL;
589 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
590 code_gen_buffer_size = (512 * 1024 * 1024);
592 #endif
593 code_gen_buffer = mmap(addr, code_gen_buffer_size,
594 PROT_WRITE | PROT_READ | PROT_EXEC,
595 flags, -1, 0);
596 if (code_gen_buffer == MAP_FAILED) {
597 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
598 exit(1);
601 #else
602 code_gen_buffer = g_malloc(code_gen_buffer_size);
603 map_exec(code_gen_buffer, code_gen_buffer_size);
604 #endif
605 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
606 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
607 code_gen_buffer_max_size = code_gen_buffer_size -
608 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
609 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
610 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
613 /* Must be called before using the QEMU cpus. 'tb_size' is the size
614 (in bytes) allocated to the translation buffer. Zero means default
615 size. */
616 void tcg_exec_init(unsigned long tb_size)
618 cpu_gen_init();
619 code_gen_alloc(tb_size);
620 code_gen_ptr = code_gen_buffer;
621 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
622 page_init();
623 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
624 /* There's no guest base to take into account, so go ahead and
625 initialize the prologue now. */
626 tcg_prologue_init(&tcg_ctx);
627 #endif
630 bool tcg_enabled(void)
632 return code_gen_buffer != NULL;
635 void cpu_exec_init_all(void)
637 #if !defined(CONFIG_USER_ONLY)
638 memory_map_init();
639 io_mem_init();
640 #endif
643 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
645 static int cpu_common_post_load(void *opaque, int version_id)
647 CPUArchState *env = opaque;
649 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
650 version_id is increased. */
651 env->interrupt_request &= ~0x01;
652 tlb_flush(env, 1);
654 return 0;
657 static const VMStateDescription vmstate_cpu_common = {
658 .name = "cpu_common",
659 .version_id = 1,
660 .minimum_version_id = 1,
661 .minimum_version_id_old = 1,
662 .post_load = cpu_common_post_load,
663 .fields = (VMStateField []) {
664 VMSTATE_UINT32(halted, CPUArchState),
665 VMSTATE_UINT32(interrupt_request, CPUArchState),
666 VMSTATE_END_OF_LIST()
669 #endif
671 CPUArchState *qemu_get_cpu(int cpu)
673 CPUArchState *env = first_cpu;
675 while (env) {
676 if (env->cpu_index == cpu)
677 break;
678 env = env->next_cpu;
681 return env;
684 void cpu_exec_init(CPUArchState *env)
686 CPUArchState **penv;
687 int cpu_index;
689 #if defined(CONFIG_USER_ONLY)
690 cpu_list_lock();
691 #endif
692 env->next_cpu = NULL;
693 penv = &first_cpu;
694 cpu_index = 0;
695 while (*penv != NULL) {
696 penv = &(*penv)->next_cpu;
697 cpu_index++;
699 env->cpu_index = cpu_index;
700 env->numa_node = 0;
701 QTAILQ_INIT(&env->breakpoints);
702 QTAILQ_INIT(&env->watchpoints);
703 #ifndef CONFIG_USER_ONLY
704 env->thread_id = qemu_get_thread_id();
705 #endif
706 *penv = env;
707 #if defined(CONFIG_USER_ONLY)
708 cpu_list_unlock();
709 #endif
710 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
711 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
712 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
713 cpu_save, cpu_load, env);
714 #endif
717 /* Allocate a new translation block. Flush the translation buffer if
718 too many translation blocks or too much generated code. */
719 static TranslationBlock *tb_alloc(target_ulong pc)
721 TranslationBlock *tb;
723 if (nb_tbs >= code_gen_max_blocks ||
724 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
725 return NULL;
726 tb = &tbs[nb_tbs++];
727 tb->pc = pc;
728 tb->cflags = 0;
729 return tb;
732 void tb_free(TranslationBlock *tb)
734 /* In practice this is mostly used for single use temporary TB
735 Ignore the hard cases and just back up if this TB happens to
736 be the last one generated. */
737 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
738 code_gen_ptr = tb->tc_ptr;
739 nb_tbs--;
743 static inline void invalidate_page_bitmap(PageDesc *p)
745 if (p->code_bitmap) {
746 g_free(p->code_bitmap);
747 p->code_bitmap = NULL;
749 p->code_write_count = 0;
752 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
754 static void page_flush_tb_1 (int level, void **lp)
756 int i;
758 if (*lp == NULL) {
759 return;
761 if (level == 0) {
762 PageDesc *pd = *lp;
763 for (i = 0; i < L2_SIZE; ++i) {
764 pd[i].first_tb = NULL;
765 invalidate_page_bitmap(pd + i);
767 } else {
768 void **pp = *lp;
769 for (i = 0; i < L2_SIZE; ++i) {
770 page_flush_tb_1 (level - 1, pp + i);
775 static void page_flush_tb(void)
777 int i;
778 for (i = 0; i < V_L1_SIZE; i++) {
779 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
783 /* flush all the translation blocks */
784 /* XXX: tb_flush is currently not thread safe */
785 void tb_flush(CPUArchState *env1)
787 CPUArchState *env;
788 #if defined(DEBUG_FLUSH)
789 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
790 (unsigned long)(code_gen_ptr - code_gen_buffer),
791 nb_tbs, nb_tbs > 0 ?
792 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
793 #endif
794 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
795 cpu_abort(env1, "Internal error: code buffer overflow\n");
797 nb_tbs = 0;
799 for(env = first_cpu; env != NULL; env = env->next_cpu) {
800 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
803 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
804 page_flush_tb();
806 code_gen_ptr = code_gen_buffer;
807 /* XXX: flush processor icache at this point if cache flush is
808 expensive */
809 tb_flush_count++;
812 #ifdef DEBUG_TB_CHECK
814 static void tb_invalidate_check(target_ulong address)
816 TranslationBlock *tb;
817 int i;
818 address &= TARGET_PAGE_MASK;
819 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
820 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
821 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
822 address >= tb->pc + tb->size)) {
823 printf("ERROR invalidate: address=" TARGET_FMT_lx
824 " PC=%08lx size=%04x\n",
825 address, (long)tb->pc, tb->size);
831 /* verify that all the pages have correct rights for code */
832 static void tb_page_check(void)
834 TranslationBlock *tb;
835 int i, flags1, flags2;
837 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
838 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
839 flags1 = page_get_flags(tb->pc);
840 flags2 = page_get_flags(tb->pc + tb->size - 1);
841 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
842 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
843 (long)tb->pc, tb->size, flags1, flags2);
849 #endif
851 /* invalidate one TB */
852 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
853 int next_offset)
855 TranslationBlock *tb1;
856 for(;;) {
857 tb1 = *ptb;
858 if (tb1 == tb) {
859 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
860 break;
862 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
866 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
868 TranslationBlock *tb1;
869 unsigned int n1;
871 for(;;) {
872 tb1 = *ptb;
873 n1 = (uintptr_t)tb1 & 3;
874 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
875 if (tb1 == tb) {
876 *ptb = tb1->page_next[n1];
877 break;
879 ptb = &tb1->page_next[n1];
883 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
885 TranslationBlock *tb1, **ptb;
886 unsigned int n1;
888 ptb = &tb->jmp_next[n];
889 tb1 = *ptb;
890 if (tb1) {
891 /* find tb(n) in circular list */
892 for(;;) {
893 tb1 = *ptb;
894 n1 = (uintptr_t)tb1 & 3;
895 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
896 if (n1 == n && tb1 == tb)
897 break;
898 if (n1 == 2) {
899 ptb = &tb1->jmp_first;
900 } else {
901 ptb = &tb1->jmp_next[n1];
904 /* now we can suppress tb(n) from the list */
905 *ptb = tb->jmp_next[n];
907 tb->jmp_next[n] = NULL;
911 /* reset the jump entry 'n' of a TB so that it is not chained to
912 another TB */
913 static inline void tb_reset_jump(TranslationBlock *tb, int n)
915 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
918 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
920 CPUArchState *env;
921 PageDesc *p;
922 unsigned int h, n1;
923 tb_page_addr_t phys_pc;
924 TranslationBlock *tb1, *tb2;
926 /* remove the TB from the hash list */
927 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
928 h = tb_phys_hash_func(phys_pc);
929 tb_remove(&tb_phys_hash[h], tb,
930 offsetof(TranslationBlock, phys_hash_next));
932 /* remove the TB from the page list */
933 if (tb->page_addr[0] != page_addr) {
934 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
935 tb_page_remove(&p->first_tb, tb);
936 invalidate_page_bitmap(p);
938 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
939 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
940 tb_page_remove(&p->first_tb, tb);
941 invalidate_page_bitmap(p);
944 tb_invalidated_flag = 1;
946 /* remove the TB from the hash list */
947 h = tb_jmp_cache_hash_func(tb->pc);
948 for(env = first_cpu; env != NULL; env = env->next_cpu) {
949 if (env->tb_jmp_cache[h] == tb)
950 env->tb_jmp_cache[h] = NULL;
953 /* suppress this TB from the two jump lists */
954 tb_jmp_remove(tb, 0);
955 tb_jmp_remove(tb, 1);
957 /* suppress any remaining jumps to this TB */
958 tb1 = tb->jmp_first;
959 for(;;) {
960 n1 = (uintptr_t)tb1 & 3;
961 if (n1 == 2)
962 break;
963 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
964 tb2 = tb1->jmp_next[n1];
965 tb_reset_jump(tb1, n1);
966 tb1->jmp_next[n1] = NULL;
967 tb1 = tb2;
969 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
971 tb_phys_invalidate_count++;
974 static inline void set_bits(uint8_t *tab, int start, int len)
976 int end, mask, end1;
978 end = start + len;
979 tab += start >> 3;
980 mask = 0xff << (start & 7);
981 if ((start & ~7) == (end & ~7)) {
982 if (start < end) {
983 mask &= ~(0xff << (end & 7));
984 *tab |= mask;
986 } else {
987 *tab++ |= mask;
988 start = (start + 8) & ~7;
989 end1 = end & ~7;
990 while (start < end1) {
991 *tab++ = 0xff;
992 start += 8;
994 if (start < end) {
995 mask = ~(0xff << (end & 7));
996 *tab |= mask;
1001 static void build_page_bitmap(PageDesc *p)
1003 int n, tb_start, tb_end;
1004 TranslationBlock *tb;
1006 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1008 tb = p->first_tb;
1009 while (tb != NULL) {
1010 n = (uintptr_t)tb & 3;
1011 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1012 /* NOTE: this is subtle as a TB may span two physical pages */
1013 if (n == 0) {
1014 /* NOTE: tb_end may be after the end of the page, but
1015 it is not a problem */
1016 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1017 tb_end = tb_start + tb->size;
1018 if (tb_end > TARGET_PAGE_SIZE)
1019 tb_end = TARGET_PAGE_SIZE;
1020 } else {
1021 tb_start = 0;
1022 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1024 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1025 tb = tb->page_next[n];
1029 TranslationBlock *tb_gen_code(CPUArchState *env,
1030 target_ulong pc, target_ulong cs_base,
1031 int flags, int cflags)
1033 TranslationBlock *tb;
1034 uint8_t *tc_ptr;
1035 tb_page_addr_t phys_pc, phys_page2;
1036 target_ulong virt_page2;
1037 int code_gen_size;
1039 phys_pc = get_page_addr_code(env, pc);
1040 tb = tb_alloc(pc);
1041 if (!tb) {
1042 /* flush must be done */
1043 tb_flush(env);
1044 /* cannot fail at this point */
1045 tb = tb_alloc(pc);
1046 /* Don't forget to invalidate previous TB info. */
1047 tb_invalidated_flag = 1;
1049 tc_ptr = code_gen_ptr;
1050 tb->tc_ptr = tc_ptr;
1051 tb->cs_base = cs_base;
1052 tb->flags = flags;
1053 tb->cflags = cflags;
1054 cpu_gen_code(env, tb, &code_gen_size);
1055 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1056 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1058 /* check next page if needed */
1059 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1060 phys_page2 = -1;
1061 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1062 phys_page2 = get_page_addr_code(env, virt_page2);
1064 tb_link_page(tb, phys_pc, phys_page2);
1065 return tb;
1069 * Invalidate all TBs which intersect with the target physical address range
1070 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1071 * 'is_cpu_write_access' should be true if called from a real cpu write
1072 * access: the virtual CPU will exit the current TB if code is modified inside
1073 * this TB.
1075 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1076 int is_cpu_write_access)
1078 while (start < end) {
1079 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1080 start &= TARGET_PAGE_MASK;
1081 start += TARGET_PAGE_SIZE;
1086 * Invalidate all TBs which intersect with the target physical address range
1087 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1088 * 'is_cpu_write_access' should be true if called from a real cpu write
1089 * access: the virtual CPU will exit the current TB if code is modified inside
1090 * this TB.
1092 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1093 int is_cpu_write_access)
1095 TranslationBlock *tb, *tb_next, *saved_tb;
1096 CPUArchState *env = cpu_single_env;
1097 tb_page_addr_t tb_start, tb_end;
1098 PageDesc *p;
1099 int n;
1100 #ifdef TARGET_HAS_PRECISE_SMC
1101 int current_tb_not_found = is_cpu_write_access;
1102 TranslationBlock *current_tb = NULL;
1103 int current_tb_modified = 0;
1104 target_ulong current_pc = 0;
1105 target_ulong current_cs_base = 0;
1106 int current_flags = 0;
1107 #endif /* TARGET_HAS_PRECISE_SMC */
1109 p = page_find(start >> TARGET_PAGE_BITS);
1110 if (!p)
1111 return;
1112 if (!p->code_bitmap &&
1113 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1114 is_cpu_write_access) {
1115 /* build code bitmap */
1116 build_page_bitmap(p);
1119 /* we remove all the TBs in the range [start, end[ */
1120 /* XXX: see if in some cases it could be faster to invalidate all the code */
1121 tb = p->first_tb;
1122 while (tb != NULL) {
1123 n = (uintptr_t)tb & 3;
1124 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1125 tb_next = tb->page_next[n];
1126 /* NOTE: this is subtle as a TB may span two physical pages */
1127 if (n == 0) {
1128 /* NOTE: tb_end may be after the end of the page, but
1129 it is not a problem */
1130 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1131 tb_end = tb_start + tb->size;
1132 } else {
1133 tb_start = tb->page_addr[1];
1134 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1136 if (!(tb_end <= start || tb_start >= end)) {
1137 #ifdef TARGET_HAS_PRECISE_SMC
1138 if (current_tb_not_found) {
1139 current_tb_not_found = 0;
1140 current_tb = NULL;
1141 if (env->mem_io_pc) {
1142 /* now we have a real cpu fault */
1143 current_tb = tb_find_pc(env->mem_io_pc);
1146 if (current_tb == tb &&
1147 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1148 /* If we are modifying the current TB, we must stop
1149 its execution. We could be more precise by checking
1150 that the modification is after the current PC, but it
1151 would require a specialized function to partially
1152 restore the CPU state */
1154 current_tb_modified = 1;
1155 cpu_restore_state(current_tb, env, env->mem_io_pc);
1156 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1157 &current_flags);
1159 #endif /* TARGET_HAS_PRECISE_SMC */
1160 /* we need to do that to handle the case where a signal
1161 occurs while doing tb_phys_invalidate() */
1162 saved_tb = NULL;
1163 if (env) {
1164 saved_tb = env->current_tb;
1165 env->current_tb = NULL;
1167 tb_phys_invalidate(tb, -1);
1168 if (env) {
1169 env->current_tb = saved_tb;
1170 if (env->interrupt_request && env->current_tb)
1171 cpu_interrupt(env, env->interrupt_request);
1174 tb = tb_next;
1176 #if !defined(CONFIG_USER_ONLY)
1177 /* if no code remaining, no need to continue to use slow writes */
1178 if (!p->first_tb) {
1179 invalidate_page_bitmap(p);
1180 if (is_cpu_write_access) {
1181 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1184 #endif
1185 #ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb_modified) {
1187 /* we generate a block containing just the instruction
1188 modifying the memory. It will ensure that it cannot modify
1189 itself */
1190 env->current_tb = NULL;
1191 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1192 cpu_resume_from_signal(env, NULL);
1194 #endif
1197 /* len must be <= 8 and start must be a multiple of len */
1198 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1200 PageDesc *p;
1201 int offset, b;
1202 #if 0
1203 if (1) {
1204 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1205 cpu_single_env->mem_io_vaddr, len,
1206 cpu_single_env->eip,
1207 cpu_single_env->eip +
1208 (intptr_t)cpu_single_env->segs[R_CS].base);
1210 #endif
1211 p = page_find(start >> TARGET_PAGE_BITS);
1212 if (!p)
1213 return;
1214 if (p->code_bitmap) {
1215 offset = start & ~TARGET_PAGE_MASK;
1216 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1217 if (b & ((1 << len) - 1))
1218 goto do_invalidate;
1219 } else {
1220 do_invalidate:
1221 tb_invalidate_phys_page_range(start, start + len, 1);
1225 #if !defined(CONFIG_SOFTMMU)
1226 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1227 uintptr_t pc, void *puc)
1229 TranslationBlock *tb;
1230 PageDesc *p;
1231 int n;
1232 #ifdef TARGET_HAS_PRECISE_SMC
1233 TranslationBlock *current_tb = NULL;
1234 CPUArchState *env = cpu_single_env;
1235 int current_tb_modified = 0;
1236 target_ulong current_pc = 0;
1237 target_ulong current_cs_base = 0;
1238 int current_flags = 0;
1239 #endif
1241 addr &= TARGET_PAGE_MASK;
1242 p = page_find(addr >> TARGET_PAGE_BITS);
1243 if (!p)
1244 return;
1245 tb = p->first_tb;
1246 #ifdef TARGET_HAS_PRECISE_SMC
1247 if (tb && pc != 0) {
1248 current_tb = tb_find_pc(pc);
1250 #endif
1251 while (tb != NULL) {
1252 n = (uintptr_t)tb & 3;
1253 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1254 #ifdef TARGET_HAS_PRECISE_SMC
1255 if (current_tb == tb &&
1256 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1257 /* If we are modifying the current TB, we must stop
1258 its execution. We could be more precise by checking
1259 that the modification is after the current PC, but it
1260 would require a specialized function to partially
1261 restore the CPU state */
1263 current_tb_modified = 1;
1264 cpu_restore_state(current_tb, env, pc);
1265 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1266 &current_flags);
1268 #endif /* TARGET_HAS_PRECISE_SMC */
1269 tb_phys_invalidate(tb, addr);
1270 tb = tb->page_next[n];
1272 p->first_tb = NULL;
1273 #ifdef TARGET_HAS_PRECISE_SMC
1274 if (current_tb_modified) {
1275 /* we generate a block containing just the instruction
1276 modifying the memory. It will ensure that it cannot modify
1277 itself */
1278 env->current_tb = NULL;
1279 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1280 cpu_resume_from_signal(env, puc);
1282 #endif
1284 #endif
1286 /* add the tb in the target page and protect it if necessary */
1287 static inline void tb_alloc_page(TranslationBlock *tb,
1288 unsigned int n, tb_page_addr_t page_addr)
1290 PageDesc *p;
1291 #ifndef CONFIG_USER_ONLY
1292 bool page_already_protected;
1293 #endif
1295 tb->page_addr[n] = page_addr;
1296 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1297 tb->page_next[n] = p->first_tb;
1298 #ifndef CONFIG_USER_ONLY
1299 page_already_protected = p->first_tb != NULL;
1300 #endif
1301 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1302 invalidate_page_bitmap(p);
1304 #if defined(TARGET_HAS_SMC) || 1
1306 #if defined(CONFIG_USER_ONLY)
1307 if (p->flags & PAGE_WRITE) {
1308 target_ulong addr;
1309 PageDesc *p2;
1310 int prot;
1312 /* force the host page as non writable (writes will have a
1313 page fault + mprotect overhead) */
1314 page_addr &= qemu_host_page_mask;
1315 prot = 0;
1316 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1317 addr += TARGET_PAGE_SIZE) {
1319 p2 = page_find (addr >> TARGET_PAGE_BITS);
1320 if (!p2)
1321 continue;
1322 prot |= p2->flags;
1323 p2->flags &= ~PAGE_WRITE;
1325 mprotect(g2h(page_addr), qemu_host_page_size,
1326 (prot & PAGE_BITS) & ~PAGE_WRITE);
1327 #ifdef DEBUG_TB_INVALIDATE
1328 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1329 page_addr);
1330 #endif
1332 #else
1333 /* if some code is already present, then the pages are already
1334 protected. So we handle the case where only the first TB is
1335 allocated in a physical page */
1336 if (!page_already_protected) {
1337 tlb_protect_code(page_addr);
1339 #endif
1341 #endif /* TARGET_HAS_SMC */
1344 /* add a new TB and link it to the physical page tables. phys_page2 is
1345 (-1) to indicate that only one page contains the TB. */
1346 void tb_link_page(TranslationBlock *tb,
1347 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1349 unsigned int h;
1350 TranslationBlock **ptb;
1352 /* Grab the mmap lock to stop another thread invalidating this TB
1353 before we are done. */
1354 mmap_lock();
1355 /* add in the physical hash table */
1356 h = tb_phys_hash_func(phys_pc);
1357 ptb = &tb_phys_hash[h];
1358 tb->phys_hash_next = *ptb;
1359 *ptb = tb;
1361 /* add in the page list */
1362 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1363 if (phys_page2 != -1)
1364 tb_alloc_page(tb, 1, phys_page2);
1365 else
1366 tb->page_addr[1] = -1;
1368 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1369 tb->jmp_next[0] = NULL;
1370 tb->jmp_next[1] = NULL;
1372 /* init original jump addresses */
1373 if (tb->tb_next_offset[0] != 0xffff)
1374 tb_reset_jump(tb, 0);
1375 if (tb->tb_next_offset[1] != 0xffff)
1376 tb_reset_jump(tb, 1);
1378 #ifdef DEBUG_TB_CHECK
1379 tb_page_check();
1380 #endif
1381 mmap_unlock();
1384 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1385 tb[1].tc_ptr. Return NULL if not found */
1386 TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1388 int m_min, m_max, m;
1389 uintptr_t v;
1390 TranslationBlock *tb;
1392 if (nb_tbs <= 0)
1393 return NULL;
1394 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1395 tc_ptr >= (uintptr_t)code_gen_ptr) {
1396 return NULL;
1398 /* binary search (cf Knuth) */
1399 m_min = 0;
1400 m_max = nb_tbs - 1;
1401 while (m_min <= m_max) {
1402 m = (m_min + m_max) >> 1;
1403 tb = &tbs[m];
1404 v = (uintptr_t)tb->tc_ptr;
1405 if (v == tc_ptr)
1406 return tb;
1407 else if (tc_ptr < v) {
1408 m_max = m - 1;
1409 } else {
1410 m_min = m + 1;
1413 return &tbs[m_max];
1416 static void tb_reset_jump_recursive(TranslationBlock *tb);
1418 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1420 TranslationBlock *tb1, *tb_next, **ptb;
1421 unsigned int n1;
1423 tb1 = tb->jmp_next[n];
1424 if (tb1 != NULL) {
1425 /* find head of list */
1426 for(;;) {
1427 n1 = (uintptr_t)tb1 & 3;
1428 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1429 if (n1 == 2)
1430 break;
1431 tb1 = tb1->jmp_next[n1];
1433 /* we are now sure now that tb jumps to tb1 */
1434 tb_next = tb1;
1436 /* remove tb from the jmp_first list */
1437 ptb = &tb_next->jmp_first;
1438 for(;;) {
1439 tb1 = *ptb;
1440 n1 = (uintptr_t)tb1 & 3;
1441 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1442 if (n1 == n && tb1 == tb)
1443 break;
1444 ptb = &tb1->jmp_next[n1];
1446 *ptb = tb->jmp_next[n];
1447 tb->jmp_next[n] = NULL;
1449 /* suppress the jump to next tb in generated code */
1450 tb_reset_jump(tb, n);
1452 /* suppress jumps in the tb on which we could have jumped */
1453 tb_reset_jump_recursive(tb_next);
1457 static void tb_reset_jump_recursive(TranslationBlock *tb)
1459 tb_reset_jump_recursive2(tb, 0);
1460 tb_reset_jump_recursive2(tb, 1);
1463 #if defined(TARGET_HAS_ICE)
1464 #if defined(CONFIG_USER_ONLY)
1465 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1467 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1469 #else
1470 void tb_invalidate_phys_addr(target_phys_addr_t addr)
1472 ram_addr_t ram_addr;
1473 MemoryRegionSection *section;
1475 section = phys_page_find(addr >> TARGET_PAGE_BITS);
1476 if (!(memory_region_is_ram(section->mr)
1477 || (section->mr->rom_device && section->mr->readable))) {
1478 return;
1480 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1481 + memory_region_section_addr(section, addr);
1482 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1485 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1487 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1488 (pc & ~TARGET_PAGE_MASK));
1490 #endif
1491 #endif /* TARGET_HAS_ICE */
1493 #if defined(CONFIG_USER_ONLY)
1494 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1499 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1500 int flags, CPUWatchpoint **watchpoint)
1502 return -ENOSYS;
1504 #else
1505 /* Add a watchpoint. */
1506 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1507 int flags, CPUWatchpoint **watchpoint)
1509 target_ulong len_mask = ~(len - 1);
1510 CPUWatchpoint *wp;
1512 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1513 if ((len & (len - 1)) || (addr & ~len_mask) ||
1514 len == 0 || len > TARGET_PAGE_SIZE) {
1515 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1516 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1517 return -EINVAL;
1519 wp = g_malloc(sizeof(*wp));
1521 wp->vaddr = addr;
1522 wp->len_mask = len_mask;
1523 wp->flags = flags;
1525 /* keep all GDB-injected watchpoints in front */
1526 if (flags & BP_GDB)
1527 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1528 else
1529 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1531 tlb_flush_page(env, addr);
1533 if (watchpoint)
1534 *watchpoint = wp;
1535 return 0;
1538 /* Remove a specific watchpoint. */
1539 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
1540 int flags)
1542 target_ulong len_mask = ~(len - 1);
1543 CPUWatchpoint *wp;
1545 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1546 if (addr == wp->vaddr && len_mask == wp->len_mask
1547 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1548 cpu_watchpoint_remove_by_ref(env, wp);
1549 return 0;
1552 return -ENOENT;
1555 /* Remove a specific watchpoint by reference. */
1556 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
1558 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1560 tlb_flush_page(env, watchpoint->vaddr);
1562 g_free(watchpoint);
1565 /* Remove all matching watchpoints. */
1566 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1568 CPUWatchpoint *wp, *next;
1570 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1571 if (wp->flags & mask)
1572 cpu_watchpoint_remove_by_ref(env, wp);
1575 #endif
1577 /* Add a breakpoint. */
1578 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
1579 CPUBreakpoint **breakpoint)
1581 #if defined(TARGET_HAS_ICE)
1582 CPUBreakpoint *bp;
1584 bp = g_malloc(sizeof(*bp));
1586 bp->pc = pc;
1587 bp->flags = flags;
1589 /* keep all GDB-injected breakpoints in front */
1590 if (flags & BP_GDB)
1591 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1592 else
1593 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1595 breakpoint_invalidate(env, pc);
1597 if (breakpoint)
1598 *breakpoint = bp;
1599 return 0;
1600 #else
1601 return -ENOSYS;
1602 #endif
1605 /* Remove a specific breakpoint. */
1606 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
1608 #if defined(TARGET_HAS_ICE)
1609 CPUBreakpoint *bp;
1611 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1612 if (bp->pc == pc && bp->flags == flags) {
1613 cpu_breakpoint_remove_by_ref(env, bp);
1614 return 0;
1617 return -ENOENT;
1618 #else
1619 return -ENOSYS;
1620 #endif
1623 /* Remove a specific breakpoint by reference. */
1624 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
1626 #if defined(TARGET_HAS_ICE)
1627 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1629 breakpoint_invalidate(env, breakpoint->pc);
1631 g_free(breakpoint);
1632 #endif
1635 /* Remove all matching breakpoints. */
1636 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
1638 #if defined(TARGET_HAS_ICE)
1639 CPUBreakpoint *bp, *next;
1641 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1642 if (bp->flags & mask)
1643 cpu_breakpoint_remove_by_ref(env, bp);
1645 #endif
1648 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1649 CPU loop after each instruction */
1650 void cpu_single_step(CPUArchState *env, int enabled)
1652 #if defined(TARGET_HAS_ICE)
1653 if (env->singlestep_enabled != enabled) {
1654 env->singlestep_enabled = enabled;
1655 if (kvm_enabled())
1656 kvm_update_guest_debug(env, 0);
1657 else {
1658 /* must flush all the translated code to avoid inconsistencies */
1659 /* XXX: only flush what is necessary */
1660 tb_flush(env);
1663 #endif
1666 static void cpu_unlink_tb(CPUArchState *env)
1668 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1669 problem and hope the cpu will stop of its own accord. For userspace
1670 emulation this often isn't actually as bad as it sounds. Often
1671 signals are used primarily to interrupt blocking syscalls. */
1672 TranslationBlock *tb;
1673 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1675 spin_lock(&interrupt_lock);
1676 tb = env->current_tb;
1677 /* if the cpu is currently executing code, we must unlink it and
1678 all the potentially executing TB */
1679 if (tb) {
1680 env->current_tb = NULL;
1681 tb_reset_jump_recursive(tb);
1683 spin_unlock(&interrupt_lock);
1686 #ifndef CONFIG_USER_ONLY
1687 /* mask must never be zero, except for A20 change call */
1688 static void tcg_handle_interrupt(CPUArchState *env, int mask)
1690 int old_mask;
1692 old_mask = env->interrupt_request;
1693 env->interrupt_request |= mask;
1696 * If called from iothread context, wake the target cpu in
1697 * case its halted.
1699 if (!qemu_cpu_is_self(env)) {
1700 qemu_cpu_kick(env);
1701 return;
1704 if (use_icount) {
1705 env->icount_decr.u16.high = 0xffff;
1706 if (!can_do_io(env)
1707 && (mask & ~old_mask) != 0) {
1708 cpu_abort(env, "Raised interrupt while not in I/O function");
1710 } else {
1711 cpu_unlink_tb(env);
1715 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1717 #else /* CONFIG_USER_ONLY */
1719 void cpu_interrupt(CPUArchState *env, int mask)
1721 env->interrupt_request |= mask;
1722 cpu_unlink_tb(env);
1724 #endif /* CONFIG_USER_ONLY */
1726 void cpu_reset_interrupt(CPUArchState *env, int mask)
1728 env->interrupt_request &= ~mask;
1731 void cpu_exit(CPUArchState *env)
1733 env->exit_request = 1;
1734 cpu_unlink_tb(env);
1737 void cpu_abort(CPUArchState *env, const char *fmt, ...)
1739 va_list ap;
1740 va_list ap2;
1742 va_start(ap, fmt);
1743 va_copy(ap2, ap);
1744 fprintf(stderr, "qemu: fatal: ");
1745 vfprintf(stderr, fmt, ap);
1746 fprintf(stderr, "\n");
1747 #ifdef TARGET_I386
1748 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1749 #else
1750 cpu_dump_state(env, stderr, fprintf, 0);
1751 #endif
1752 if (qemu_log_enabled()) {
1753 qemu_log("qemu: fatal: ");
1754 qemu_log_vprintf(fmt, ap2);
1755 qemu_log("\n");
1756 #ifdef TARGET_I386
1757 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1758 #else
1759 log_cpu_state(env, 0);
1760 #endif
1761 qemu_log_flush();
1762 qemu_log_close();
1764 va_end(ap2);
1765 va_end(ap);
1766 #if defined(CONFIG_USER_ONLY)
1768 struct sigaction act;
1769 sigfillset(&act.sa_mask);
1770 act.sa_handler = SIG_DFL;
1771 sigaction(SIGABRT, &act, NULL);
1773 #endif
1774 abort();
1777 CPUArchState *cpu_copy(CPUArchState *env)
1779 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1780 CPUArchState *next_cpu = new_env->next_cpu;
1781 int cpu_index = new_env->cpu_index;
1782 #if defined(TARGET_HAS_ICE)
1783 CPUBreakpoint *bp;
1784 CPUWatchpoint *wp;
1785 #endif
1787 memcpy(new_env, env, sizeof(CPUArchState));
1789 /* Preserve chaining and index. */
1790 new_env->next_cpu = next_cpu;
1791 new_env->cpu_index = cpu_index;
1793 /* Clone all break/watchpoints.
1794 Note: Once we support ptrace with hw-debug register access, make sure
1795 BP_CPU break/watchpoints are handled correctly on clone. */
1796 QTAILQ_INIT(&env->breakpoints);
1797 QTAILQ_INIT(&env->watchpoints);
1798 #if defined(TARGET_HAS_ICE)
1799 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1800 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1802 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1803 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1804 wp->flags, NULL);
1806 #endif
1808 return new_env;
1811 #if !defined(CONFIG_USER_ONLY)
1812 void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1814 unsigned int i;
1816 /* Discard jump cache entries for any tb which might potentially
1817 overlap the flushed page. */
1818 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1819 memset (&env->tb_jmp_cache[i], 0,
1820 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1822 i = tb_jmp_cache_hash_page(addr);
1823 memset (&env->tb_jmp_cache[i], 0,
1824 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1827 /* Note: start and end must be within the same ram block. */
1828 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1829 int dirty_flags)
1831 uintptr_t length, start1;
1833 start &= TARGET_PAGE_MASK;
1834 end = TARGET_PAGE_ALIGN(end);
1836 length = end - start;
1837 if (length == 0)
1838 return;
1839 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1841 /* we modify the TLB cache so that the dirty bit will be set again
1842 when accessing the range */
1843 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
1844 /* Check that we don't span multiple blocks - this breaks the
1845 address comparisons below. */
1846 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
1847 != (end - 1) - start) {
1848 abort();
1850 cpu_tlb_reset_dirty_all(start1, length);
1853 int cpu_physical_memory_set_dirty_tracking(int enable)
1855 int ret = 0;
1856 in_migration = enable;
1857 return ret;
1860 target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1861 MemoryRegionSection *section,
1862 target_ulong vaddr,
1863 target_phys_addr_t paddr,
1864 int prot,
1865 target_ulong *address)
1867 target_phys_addr_t iotlb;
1868 CPUWatchpoint *wp;
1870 if (memory_region_is_ram(section->mr)) {
1871 /* Normal RAM. */
1872 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1873 + memory_region_section_addr(section, paddr);
1874 if (!section->readonly) {
1875 iotlb |= phys_section_notdirty;
1876 } else {
1877 iotlb |= phys_section_rom;
1879 } else {
1880 /* IO handlers are currently passed a physical address.
1881 It would be nice to pass an offset from the base address
1882 of that region. This would avoid having to special case RAM,
1883 and avoid full address decoding in every device.
1884 We can't use the high bits of pd for this because
1885 IO_MEM_ROMD uses these as a ram address. */
1886 iotlb = section - phys_sections;
1887 iotlb += memory_region_section_addr(section, paddr);
1890 /* Make accesses to pages with watchpoints go via the
1891 watchpoint trap routines. */
1892 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1893 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1894 /* Avoid trapping reads of pages with a write breakpoint. */
1895 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1896 iotlb = phys_section_watch + paddr;
1897 *address |= TLB_MMIO;
1898 break;
1903 return iotlb;
1906 #else
1908 * Walks guest process memory "regions" one by one
1909 * and calls callback function 'fn' for each region.
1912 struct walk_memory_regions_data
1914 walk_memory_regions_fn fn;
1915 void *priv;
1916 uintptr_t start;
1917 int prot;
1920 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1921 abi_ulong end, int new_prot)
1923 if (data->start != -1ul) {
1924 int rc = data->fn(data->priv, data->start, end, data->prot);
1925 if (rc != 0) {
1926 return rc;
1930 data->start = (new_prot ? end : -1ul);
1931 data->prot = new_prot;
1933 return 0;
1936 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1937 abi_ulong base, int level, void **lp)
1939 abi_ulong pa;
1940 int i, rc;
1942 if (*lp == NULL) {
1943 return walk_memory_regions_end(data, base, 0);
1946 if (level == 0) {
1947 PageDesc *pd = *lp;
1948 for (i = 0; i < L2_SIZE; ++i) {
1949 int prot = pd[i].flags;
1951 pa = base | (i << TARGET_PAGE_BITS);
1952 if (prot != data->prot) {
1953 rc = walk_memory_regions_end(data, pa, prot);
1954 if (rc != 0) {
1955 return rc;
1959 } else {
1960 void **pp = *lp;
1961 for (i = 0; i < L2_SIZE; ++i) {
1962 pa = base | ((abi_ulong)i <<
1963 (TARGET_PAGE_BITS + L2_BITS * level));
1964 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1965 if (rc != 0) {
1966 return rc;
1971 return 0;
1974 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1976 struct walk_memory_regions_data data;
1977 uintptr_t i;
1979 data.fn = fn;
1980 data.priv = priv;
1981 data.start = -1ul;
1982 data.prot = 0;
1984 for (i = 0; i < V_L1_SIZE; i++) {
1985 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1986 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1987 if (rc != 0) {
1988 return rc;
1992 return walk_memory_regions_end(&data, 0, 0);
1995 static int dump_region(void *priv, abi_ulong start,
1996 abi_ulong end, unsigned long prot)
1998 FILE *f = (FILE *)priv;
2000 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2001 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2002 start, end, end - start,
2003 ((prot & PAGE_READ) ? 'r' : '-'),
2004 ((prot & PAGE_WRITE) ? 'w' : '-'),
2005 ((prot & PAGE_EXEC) ? 'x' : '-'));
2007 return (0);
2010 /* dump memory mappings */
2011 void page_dump(FILE *f)
2013 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2014 "start", "end", "size", "prot");
2015 walk_memory_regions(f, dump_region);
2018 int page_get_flags(target_ulong address)
2020 PageDesc *p;
2022 p = page_find(address >> TARGET_PAGE_BITS);
2023 if (!p)
2024 return 0;
2025 return p->flags;
2028 /* Modify the flags of a page and invalidate the code if necessary.
2029 The flag PAGE_WRITE_ORG is positioned automatically depending
2030 on PAGE_WRITE. The mmap_lock should already be held. */
2031 void page_set_flags(target_ulong start, target_ulong end, int flags)
2033 target_ulong addr, len;
2035 /* This function should never be called with addresses outside the
2036 guest address space. If this assert fires, it probably indicates
2037 a missing call to h2g_valid. */
2038 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2039 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2040 #endif
2041 assert(start < end);
2043 start = start & TARGET_PAGE_MASK;
2044 end = TARGET_PAGE_ALIGN(end);
2046 if (flags & PAGE_WRITE) {
2047 flags |= PAGE_WRITE_ORG;
2050 for (addr = start, len = end - start;
2051 len != 0;
2052 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2053 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2055 /* If the write protection bit is set, then we invalidate
2056 the code inside. */
2057 if (!(p->flags & PAGE_WRITE) &&
2058 (flags & PAGE_WRITE) &&
2059 p->first_tb) {
2060 tb_invalidate_phys_page(addr, 0, NULL);
2062 p->flags = flags;
2066 int page_check_range(target_ulong start, target_ulong len, int flags)
2068 PageDesc *p;
2069 target_ulong end;
2070 target_ulong addr;
2072 /* This function should never be called with addresses outside the
2073 guest address space. If this assert fires, it probably indicates
2074 a missing call to h2g_valid. */
2075 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2076 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2077 #endif
2079 if (len == 0) {
2080 return 0;
2082 if (start + len - 1 < start) {
2083 /* We've wrapped around. */
2084 return -1;
2087 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2088 start = start & TARGET_PAGE_MASK;
2090 for (addr = start, len = end - start;
2091 len != 0;
2092 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2093 p = page_find(addr >> TARGET_PAGE_BITS);
2094 if( !p )
2095 return -1;
2096 if( !(p->flags & PAGE_VALID) )
2097 return -1;
2099 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2100 return -1;
2101 if (flags & PAGE_WRITE) {
2102 if (!(p->flags & PAGE_WRITE_ORG))
2103 return -1;
2104 /* unprotect the page if it was put read-only because it
2105 contains translated code */
2106 if (!(p->flags & PAGE_WRITE)) {
2107 if (!page_unprotect(addr, 0, NULL))
2108 return -1;
2110 return 0;
2113 return 0;
2116 /* called from signal handler: invalidate the code and unprotect the
2117 page. Return TRUE if the fault was successfully handled. */
2118 int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
2120 unsigned int prot;
2121 PageDesc *p;
2122 target_ulong host_start, host_end, addr;
2124 /* Technically this isn't safe inside a signal handler. However we
2125 know this only ever happens in a synchronous SEGV handler, so in
2126 practice it seems to be ok. */
2127 mmap_lock();
2129 p = page_find(address >> TARGET_PAGE_BITS);
2130 if (!p) {
2131 mmap_unlock();
2132 return 0;
2135 /* if the page was really writable, then we change its
2136 protection back to writable */
2137 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2138 host_start = address & qemu_host_page_mask;
2139 host_end = host_start + qemu_host_page_size;
2141 prot = 0;
2142 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2143 p = page_find(addr >> TARGET_PAGE_BITS);
2144 p->flags |= PAGE_WRITE;
2145 prot |= p->flags;
2147 /* and since the content will be modified, we must invalidate
2148 the corresponding translated code. */
2149 tb_invalidate_phys_page(addr, pc, puc);
2150 #ifdef DEBUG_TB_CHECK
2151 tb_invalidate_check(addr);
2152 #endif
2154 mprotect((void *)g2h(host_start), qemu_host_page_size,
2155 prot & PAGE_BITS);
2157 mmap_unlock();
2158 return 1;
2160 mmap_unlock();
2161 return 0;
2163 #endif /* defined(CONFIG_USER_ONLY) */
2165 #if !defined(CONFIG_USER_ONLY)
2167 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2168 typedef struct subpage_t {
2169 MemoryRegion iomem;
2170 target_phys_addr_t base;
2171 uint16_t sub_section[TARGET_PAGE_SIZE];
2172 } subpage_t;
2174 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2175 uint16_t section);
2176 static subpage_t *subpage_init(target_phys_addr_t base);
2177 static void destroy_page_desc(uint16_t section_index)
2179 MemoryRegionSection *section = &phys_sections[section_index];
2180 MemoryRegion *mr = section->mr;
2182 if (mr->subpage) {
2183 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2184 memory_region_destroy(&subpage->iomem);
2185 g_free(subpage);
2189 static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
2191 unsigned i;
2192 PhysPageEntry *p;
2194 if (lp->ptr == PHYS_MAP_NODE_NIL) {
2195 return;
2198 p = phys_map_nodes[lp->ptr];
2199 for (i = 0; i < L2_SIZE; ++i) {
2200 if (!p[i].is_leaf) {
2201 destroy_l2_mapping(&p[i], level - 1);
2202 } else {
2203 destroy_page_desc(p[i].ptr);
2206 lp->is_leaf = 0;
2207 lp->ptr = PHYS_MAP_NODE_NIL;
2210 static void destroy_all_mappings(void)
2212 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
2213 phys_map_nodes_reset();
2216 static uint16_t phys_section_add(MemoryRegionSection *section)
2218 if (phys_sections_nb == phys_sections_nb_alloc) {
2219 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2220 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2221 phys_sections_nb_alloc);
2223 phys_sections[phys_sections_nb] = *section;
2224 return phys_sections_nb++;
2227 static void phys_sections_clear(void)
2229 phys_sections_nb = 0;
2232 /* register physical memory.
2233 For RAM, 'size' must be a multiple of the target page size.
2234 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2235 io memory page. The address used when calling the IO function is
2236 the offset from the start of the region, plus region_offset. Both
2237 start_addr and region_offset are rounded down to a page boundary
2238 before calculating this offset. This should not be a problem unless
2239 the low bits of start_addr and region_offset differ. */
2240 static void register_subpage(MemoryRegionSection *section)
2242 subpage_t *subpage;
2243 target_phys_addr_t base = section->offset_within_address_space
2244 & TARGET_PAGE_MASK;
2245 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
2246 MemoryRegionSection subsection = {
2247 .offset_within_address_space = base,
2248 .size = TARGET_PAGE_SIZE,
2250 target_phys_addr_t start, end;
2252 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
2254 if (!(existing->mr->subpage)) {
2255 subpage = subpage_init(base);
2256 subsection.mr = &subpage->iomem;
2257 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2258 phys_section_add(&subsection));
2259 } else {
2260 subpage = container_of(existing->mr, subpage_t, iomem);
2262 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2263 end = start + section->size;
2264 subpage_register(subpage, start, end, phys_section_add(section));
2268 static void register_multipage(MemoryRegionSection *section)
2270 target_phys_addr_t start_addr = section->offset_within_address_space;
2271 ram_addr_t size = section->size;
2272 target_phys_addr_t addr;
2273 uint16_t section_index = phys_section_add(section);
2275 assert(size);
2277 addr = start_addr;
2278 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2279 section_index);
2282 void cpu_register_physical_memory_log(MemoryRegionSection *section,
2283 bool readonly)
2285 MemoryRegionSection now = *section, remain = *section;
2287 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2288 || (now.size < TARGET_PAGE_SIZE)) {
2289 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2290 - now.offset_within_address_space,
2291 now.size);
2292 register_subpage(&now);
2293 remain.size -= now.size;
2294 remain.offset_within_address_space += now.size;
2295 remain.offset_within_region += now.size;
2297 now = remain;
2298 now.size &= TARGET_PAGE_MASK;
2299 if (now.size) {
2300 register_multipage(&now);
2301 remain.size -= now.size;
2302 remain.offset_within_address_space += now.size;
2303 remain.offset_within_region += now.size;
2305 now = remain;
2306 if (now.size) {
2307 register_subpage(&now);
2312 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2314 if (kvm_enabled())
2315 kvm_coalesce_mmio_region(addr, size);
2318 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2320 if (kvm_enabled())
2321 kvm_uncoalesce_mmio_region(addr, size);
2324 void qemu_flush_coalesced_mmio_buffer(void)
2326 if (kvm_enabled())
2327 kvm_flush_coalesced_mmio_buffer();
2330 #if defined(__linux__) && !defined(TARGET_S390X)
2332 #include <sys/vfs.h>
2334 #define HUGETLBFS_MAGIC 0x958458f6
2336 static long gethugepagesize(const char *path)
2338 struct statfs fs;
2339 int ret;
2341 do {
2342 ret = statfs(path, &fs);
2343 } while (ret != 0 && errno == EINTR);
2345 if (ret != 0) {
2346 perror(path);
2347 return 0;
2350 if (fs.f_type != HUGETLBFS_MAGIC)
2351 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2353 return fs.f_bsize;
2356 static void *file_ram_alloc(RAMBlock *block,
2357 ram_addr_t memory,
2358 const char *path)
2360 char *filename;
2361 void *area;
2362 int fd;
2363 #ifdef MAP_POPULATE
2364 int flags;
2365 #endif
2366 unsigned long hpagesize;
2368 hpagesize = gethugepagesize(path);
2369 if (!hpagesize) {
2370 return NULL;
2373 if (memory < hpagesize) {
2374 return NULL;
2377 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2378 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2379 return NULL;
2382 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2383 return NULL;
2386 fd = mkstemp(filename);
2387 if (fd < 0) {
2388 perror("unable to create backing store for hugepages");
2389 free(filename);
2390 return NULL;
2392 unlink(filename);
2393 free(filename);
2395 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2398 * ftruncate is not supported by hugetlbfs in older
2399 * hosts, so don't bother bailing out on errors.
2400 * If anything goes wrong with it under other filesystems,
2401 * mmap will fail.
2403 if (ftruncate(fd, memory))
2404 perror("ftruncate");
2406 #ifdef MAP_POPULATE
2407 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2408 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2409 * to sidestep this quirk.
2411 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2412 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2413 #else
2414 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2415 #endif
2416 if (area == MAP_FAILED) {
2417 perror("file_ram_alloc: can't mmap RAM pages");
2418 close(fd);
2419 return (NULL);
2421 block->fd = fd;
2422 return area;
2424 #endif
2426 static ram_addr_t find_ram_offset(ram_addr_t size)
2428 RAMBlock *block, *next_block;
2429 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2431 if (QLIST_EMPTY(&ram_list.blocks))
2432 return 0;
2434 QLIST_FOREACH(block, &ram_list.blocks, next) {
2435 ram_addr_t end, next = RAM_ADDR_MAX;
2437 end = block->offset + block->length;
2439 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2440 if (next_block->offset >= end) {
2441 next = MIN(next, next_block->offset);
2444 if (next - end >= size && next - end < mingap) {
2445 offset = end;
2446 mingap = next - end;
2450 if (offset == RAM_ADDR_MAX) {
2451 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2452 (uint64_t)size);
2453 abort();
2456 return offset;
2459 static ram_addr_t last_ram_offset(void)
2461 RAMBlock *block;
2462 ram_addr_t last = 0;
2464 QLIST_FOREACH(block, &ram_list.blocks, next)
2465 last = MAX(last, block->offset + block->length);
2467 return last;
2470 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2472 RAMBlock *new_block, *block;
2474 new_block = NULL;
2475 QLIST_FOREACH(block, &ram_list.blocks, next) {
2476 if (block->offset == addr) {
2477 new_block = block;
2478 break;
2481 assert(new_block);
2482 assert(!new_block->idstr[0]);
2484 if (dev) {
2485 char *id = qdev_get_dev_path(dev);
2486 if (id) {
2487 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2488 g_free(id);
2491 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2493 QLIST_FOREACH(block, &ram_list.blocks, next) {
2494 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2495 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2496 new_block->idstr);
2497 abort();
2502 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2503 MemoryRegion *mr)
2505 RAMBlock *new_block;
2507 size = TARGET_PAGE_ALIGN(size);
2508 new_block = g_malloc0(sizeof(*new_block));
2510 new_block->mr = mr;
2511 new_block->offset = find_ram_offset(size);
2512 if (host) {
2513 new_block->host = host;
2514 new_block->flags |= RAM_PREALLOC_MASK;
2515 } else {
2516 if (mem_path) {
2517 #if defined (__linux__) && !defined(TARGET_S390X)
2518 new_block->host = file_ram_alloc(new_block, size, mem_path);
2519 if (!new_block->host) {
2520 new_block->host = qemu_vmalloc(size);
2521 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2523 #else
2524 fprintf(stderr, "-mem-path option unsupported\n");
2525 exit(1);
2526 #endif
2527 } else {
2528 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2529 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2530 an system defined value, which is at least 256GB. Larger systems
2531 have larger values. We put the guest between the end of data
2532 segment (system break) and this value. We use 32GB as a base to
2533 have enough room for the system break to grow. */
2534 new_block->host = mmap((void*)0x800000000, size,
2535 PROT_EXEC|PROT_READ|PROT_WRITE,
2536 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2537 if (new_block->host == MAP_FAILED) {
2538 fprintf(stderr, "Allocating RAM failed\n");
2539 abort();
2541 #else
2542 if (xen_enabled()) {
2543 xen_ram_alloc(new_block->offset, size, mr);
2544 } else {
2545 new_block->host = qemu_vmalloc(size);
2547 #endif
2548 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2551 new_block->length = size;
2553 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2555 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2556 last_ram_offset() >> TARGET_PAGE_BITS);
2557 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2558 0xff, size >> TARGET_PAGE_BITS);
2560 if (kvm_enabled())
2561 kvm_setup_guest_memory(new_block->host, size);
2563 return new_block->offset;
2566 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2568 return qemu_ram_alloc_from_ptr(size, NULL, mr);
2571 void qemu_ram_free_from_ptr(ram_addr_t addr)
2573 RAMBlock *block;
2575 QLIST_FOREACH(block, &ram_list.blocks, next) {
2576 if (addr == block->offset) {
2577 QLIST_REMOVE(block, next);
2578 g_free(block);
2579 return;
2584 void qemu_ram_free(ram_addr_t addr)
2586 RAMBlock *block;
2588 QLIST_FOREACH(block, &ram_list.blocks, next) {
2589 if (addr == block->offset) {
2590 QLIST_REMOVE(block, next);
2591 if (block->flags & RAM_PREALLOC_MASK) {
2593 } else if (mem_path) {
2594 #if defined (__linux__) && !defined(TARGET_S390X)
2595 if (block->fd) {
2596 munmap(block->host, block->length);
2597 close(block->fd);
2598 } else {
2599 qemu_vfree(block->host);
2601 #else
2602 abort();
2603 #endif
2604 } else {
2605 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2606 munmap(block->host, block->length);
2607 #else
2608 if (xen_enabled()) {
2609 xen_invalidate_map_cache_entry(block->host);
2610 } else {
2611 qemu_vfree(block->host);
2613 #endif
2615 g_free(block);
2616 return;
2622 #ifndef _WIN32
2623 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2625 RAMBlock *block;
2626 ram_addr_t offset;
2627 int flags;
2628 void *area, *vaddr;
2630 QLIST_FOREACH(block, &ram_list.blocks, next) {
2631 offset = addr - block->offset;
2632 if (offset < block->length) {
2633 vaddr = block->host + offset;
2634 if (block->flags & RAM_PREALLOC_MASK) {
2636 } else {
2637 flags = MAP_FIXED;
2638 munmap(vaddr, length);
2639 if (mem_path) {
2640 #if defined(__linux__) && !defined(TARGET_S390X)
2641 if (block->fd) {
2642 #ifdef MAP_POPULATE
2643 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2644 MAP_PRIVATE;
2645 #else
2646 flags |= MAP_PRIVATE;
2647 #endif
2648 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2649 flags, block->fd, offset);
2650 } else {
2651 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2652 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2653 flags, -1, 0);
2655 #else
2656 abort();
2657 #endif
2658 } else {
2659 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2660 flags |= MAP_SHARED | MAP_ANONYMOUS;
2661 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2662 flags, -1, 0);
2663 #else
2664 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2665 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2666 flags, -1, 0);
2667 #endif
2669 if (area != vaddr) {
2670 fprintf(stderr, "Could not remap addr: "
2671 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2672 length, addr);
2673 exit(1);
2675 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2677 return;
2681 #endif /* !_WIN32 */
2683 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2684 With the exception of the softmmu code in this file, this should
2685 only be used for local memory (e.g. video ram) that the device owns,
2686 and knows it isn't going to access beyond the end of the block.
2688 It should not be used for general purpose DMA.
2689 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2691 void *qemu_get_ram_ptr(ram_addr_t addr)
2693 RAMBlock *block;
2695 QLIST_FOREACH(block, &ram_list.blocks, next) {
2696 if (addr - block->offset < block->length) {
2697 /* Move this entry to to start of the list. */
2698 if (block != QLIST_FIRST(&ram_list.blocks)) {
2699 QLIST_REMOVE(block, next);
2700 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2702 if (xen_enabled()) {
2703 /* We need to check if the requested address is in the RAM
2704 * because we don't want to map the entire memory in QEMU.
2705 * In that case just map until the end of the page.
2707 if (block->offset == 0) {
2708 return xen_map_cache(addr, 0, 0);
2709 } else if (block->host == NULL) {
2710 block->host =
2711 xen_map_cache(block->offset, block->length, 1);
2714 return block->host + (addr - block->offset);
2718 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2719 abort();
2721 return NULL;
2724 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2725 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2727 void *qemu_safe_ram_ptr(ram_addr_t addr)
2729 RAMBlock *block;
2731 QLIST_FOREACH(block, &ram_list.blocks, next) {
2732 if (addr - block->offset < block->length) {
2733 if (xen_enabled()) {
2734 /* We need to check if the requested address is in the RAM
2735 * because we don't want to map the entire memory in QEMU.
2736 * In that case just map until the end of the page.
2738 if (block->offset == 0) {
2739 return xen_map_cache(addr, 0, 0);
2740 } else if (block->host == NULL) {
2741 block->host =
2742 xen_map_cache(block->offset, block->length, 1);
2745 return block->host + (addr - block->offset);
2749 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2750 abort();
2752 return NULL;
2755 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2756 * but takes a size argument */
2757 void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
2759 if (*size == 0) {
2760 return NULL;
2762 if (xen_enabled()) {
2763 return xen_map_cache(addr, *size, 1);
2764 } else {
2765 RAMBlock *block;
2767 QLIST_FOREACH(block, &ram_list.blocks, next) {
2768 if (addr - block->offset < block->length) {
2769 if (addr - block->offset + *size > block->length)
2770 *size = block->length - addr + block->offset;
2771 return block->host + (addr - block->offset);
2775 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2776 abort();
2780 void qemu_put_ram_ptr(void *addr)
2782 trace_qemu_put_ram_ptr(addr);
2785 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2787 RAMBlock *block;
2788 uint8_t *host = ptr;
2790 if (xen_enabled()) {
2791 *ram_addr = xen_ram_addr_from_mapcache(ptr);
2792 return 0;
2795 QLIST_FOREACH(block, &ram_list.blocks, next) {
2796 /* This case append when the block is not mapped. */
2797 if (block->host == NULL) {
2798 continue;
2800 if (host - block->host < block->length) {
2801 *ram_addr = block->offset + (host - block->host);
2802 return 0;
2806 return -1;
2809 /* Some of the softmmu routines need to translate from a host pointer
2810 (typically a TLB entry) back to a ram offset. */
2811 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2813 ram_addr_t ram_addr;
2815 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2816 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2817 abort();
2819 return ram_addr;
2822 static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2823 unsigned size)
2825 #ifdef DEBUG_UNASSIGNED
2826 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2827 #endif
2828 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2829 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
2830 #endif
2831 return 0;
2834 static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2835 uint64_t val, unsigned size)
2837 #ifdef DEBUG_UNASSIGNED
2838 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
2839 #endif
2840 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2841 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
2842 #endif
2845 static const MemoryRegionOps unassigned_mem_ops = {
2846 .read = unassigned_mem_read,
2847 .write = unassigned_mem_write,
2848 .endianness = DEVICE_NATIVE_ENDIAN,
2851 static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2852 unsigned size)
2854 abort();
2857 static void error_mem_write(void *opaque, target_phys_addr_t addr,
2858 uint64_t value, unsigned size)
2860 abort();
2863 static const MemoryRegionOps error_mem_ops = {
2864 .read = error_mem_read,
2865 .write = error_mem_write,
2866 .endianness = DEVICE_NATIVE_ENDIAN,
2869 static const MemoryRegionOps rom_mem_ops = {
2870 .read = error_mem_read,
2871 .write = unassigned_mem_write,
2872 .endianness = DEVICE_NATIVE_ENDIAN,
2875 static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2876 uint64_t val, unsigned size)
2878 int dirty_flags;
2879 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2880 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2881 #if !defined(CONFIG_USER_ONLY)
2882 tb_invalidate_phys_page_fast(ram_addr, size);
2883 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2884 #endif
2886 switch (size) {
2887 case 1:
2888 stb_p(qemu_get_ram_ptr(ram_addr), val);
2889 break;
2890 case 2:
2891 stw_p(qemu_get_ram_ptr(ram_addr), val);
2892 break;
2893 case 4:
2894 stl_p(qemu_get_ram_ptr(ram_addr), val);
2895 break;
2896 default:
2897 abort();
2899 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2900 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2901 /* we remove the notdirty callback only if the code has been
2902 flushed */
2903 if (dirty_flags == 0xff)
2904 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2907 static const MemoryRegionOps notdirty_mem_ops = {
2908 .read = error_mem_read,
2909 .write = notdirty_mem_write,
2910 .endianness = DEVICE_NATIVE_ENDIAN,
2913 /* Generate a debug exception if a watchpoint has been hit. */
2914 static void check_watchpoint(int offset, int len_mask, int flags)
2916 CPUArchState *env = cpu_single_env;
2917 target_ulong pc, cs_base;
2918 TranslationBlock *tb;
2919 target_ulong vaddr;
2920 CPUWatchpoint *wp;
2921 int cpu_flags;
2923 if (env->watchpoint_hit) {
2924 /* We re-entered the check after replacing the TB. Now raise
2925 * the debug interrupt so that is will trigger after the
2926 * current instruction. */
2927 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2928 return;
2930 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2931 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2932 if ((vaddr == (wp->vaddr & len_mask) ||
2933 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2934 wp->flags |= BP_WATCHPOINT_HIT;
2935 if (!env->watchpoint_hit) {
2936 env->watchpoint_hit = wp;
2937 tb = tb_find_pc(env->mem_io_pc);
2938 if (!tb) {
2939 cpu_abort(env, "check_watchpoint: could not find TB for "
2940 "pc=%p", (void *)env->mem_io_pc);
2942 cpu_restore_state(tb, env, env->mem_io_pc);
2943 tb_phys_invalidate(tb, -1);
2944 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2945 env->exception_index = EXCP_DEBUG;
2946 cpu_loop_exit(env);
2947 } else {
2948 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2949 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2950 cpu_resume_from_signal(env, NULL);
2953 } else {
2954 wp->flags &= ~BP_WATCHPOINT_HIT;
2959 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2960 so these check for a hit then pass through to the normal out-of-line
2961 phys routines. */
2962 static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
2963 unsigned size)
2965 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
2966 switch (size) {
2967 case 1: return ldub_phys(addr);
2968 case 2: return lduw_phys(addr);
2969 case 4: return ldl_phys(addr);
2970 default: abort();
2974 static void watch_mem_write(void *opaque, target_phys_addr_t addr,
2975 uint64_t val, unsigned size)
2977 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
2978 switch (size) {
2979 case 1:
2980 stb_phys(addr, val);
2981 break;
2982 case 2:
2983 stw_phys(addr, val);
2984 break;
2985 case 4:
2986 stl_phys(addr, val);
2987 break;
2988 default: abort();
2992 static const MemoryRegionOps watch_mem_ops = {
2993 .read = watch_mem_read,
2994 .write = watch_mem_write,
2995 .endianness = DEVICE_NATIVE_ENDIAN,
2998 static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
2999 unsigned len)
3001 subpage_t *mmio = opaque;
3002 unsigned int idx = SUBPAGE_IDX(addr);
3003 MemoryRegionSection *section;
3004 #if defined(DEBUG_SUBPAGE)
3005 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3006 mmio, len, addr, idx);
3007 #endif
3009 section = &phys_sections[mmio->sub_section[idx]];
3010 addr += mmio->base;
3011 addr -= section->offset_within_address_space;
3012 addr += section->offset_within_region;
3013 return io_mem_read(section->mr, addr, len);
3016 static void subpage_write(void *opaque, target_phys_addr_t addr,
3017 uint64_t value, unsigned len)
3019 subpage_t *mmio = opaque;
3020 unsigned int idx = SUBPAGE_IDX(addr);
3021 MemoryRegionSection *section;
3022 #if defined(DEBUG_SUBPAGE)
3023 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3024 " idx %d value %"PRIx64"\n",
3025 __func__, mmio, len, addr, idx, value);
3026 #endif
3028 section = &phys_sections[mmio->sub_section[idx]];
3029 addr += mmio->base;
3030 addr -= section->offset_within_address_space;
3031 addr += section->offset_within_region;
3032 io_mem_write(section->mr, addr, value, len);
3035 static const MemoryRegionOps subpage_ops = {
3036 .read = subpage_read,
3037 .write = subpage_write,
3038 .endianness = DEVICE_NATIVE_ENDIAN,
3041 static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3042 unsigned size)
3044 ram_addr_t raddr = addr;
3045 void *ptr = qemu_get_ram_ptr(raddr);
3046 switch (size) {
3047 case 1: return ldub_p(ptr);
3048 case 2: return lduw_p(ptr);
3049 case 4: return ldl_p(ptr);
3050 default: abort();
3054 static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3055 uint64_t value, unsigned size)
3057 ram_addr_t raddr = addr;
3058 void *ptr = qemu_get_ram_ptr(raddr);
3059 switch (size) {
3060 case 1: return stb_p(ptr, value);
3061 case 2: return stw_p(ptr, value);
3062 case 4: return stl_p(ptr, value);
3063 default: abort();
3067 static const MemoryRegionOps subpage_ram_ops = {
3068 .read = subpage_ram_read,
3069 .write = subpage_ram_write,
3070 .endianness = DEVICE_NATIVE_ENDIAN,
3073 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3074 uint16_t section)
3076 int idx, eidx;
3078 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3079 return -1;
3080 idx = SUBPAGE_IDX(start);
3081 eidx = SUBPAGE_IDX(end);
3082 #if defined(DEBUG_SUBPAGE)
3083 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3084 mmio, start, end, idx, eidx, memory);
3085 #endif
3086 if (memory_region_is_ram(phys_sections[section].mr)) {
3087 MemoryRegionSection new_section = phys_sections[section];
3088 new_section.mr = &io_mem_subpage_ram;
3089 section = phys_section_add(&new_section);
3091 for (; idx <= eidx; idx++) {
3092 mmio->sub_section[idx] = section;
3095 return 0;
3098 static subpage_t *subpage_init(target_phys_addr_t base)
3100 subpage_t *mmio;
3102 mmio = g_malloc0(sizeof(subpage_t));
3104 mmio->base = base;
3105 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3106 "subpage", TARGET_PAGE_SIZE);
3107 mmio->iomem.subpage = true;
3108 #if defined(DEBUG_SUBPAGE)
3109 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3110 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3111 #endif
3112 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
3114 return mmio;
3117 static uint16_t dummy_section(MemoryRegion *mr)
3119 MemoryRegionSection section = {
3120 .mr = mr,
3121 .offset_within_address_space = 0,
3122 .offset_within_region = 0,
3123 .size = UINT64_MAX,
3126 return phys_section_add(&section);
3129 MemoryRegion *iotlb_to_region(target_phys_addr_t index)
3131 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
3134 static void io_mem_init(void)
3136 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3137 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3138 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3139 "unassigned", UINT64_MAX);
3140 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3141 "notdirty", UINT64_MAX);
3142 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3143 "subpage-ram", UINT64_MAX);
3144 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3145 "watch", UINT64_MAX);
3148 static void core_begin(MemoryListener *listener)
3150 destroy_all_mappings();
3151 phys_sections_clear();
3152 phys_map.ptr = PHYS_MAP_NODE_NIL;
3153 phys_section_unassigned = dummy_section(&io_mem_unassigned);
3154 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3155 phys_section_rom = dummy_section(&io_mem_rom);
3156 phys_section_watch = dummy_section(&io_mem_watch);
3159 static void core_commit(MemoryListener *listener)
3161 CPUArchState *env;
3163 /* since each CPU stores ram addresses in its TLB cache, we must
3164 reset the modified entries */
3165 /* XXX: slow ! */
3166 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3167 tlb_flush(env, 1);
3171 static void core_region_add(MemoryListener *listener,
3172 MemoryRegionSection *section)
3174 cpu_register_physical_memory_log(section, section->readonly);
3177 static void core_region_del(MemoryListener *listener,
3178 MemoryRegionSection *section)
3182 static void core_region_nop(MemoryListener *listener,
3183 MemoryRegionSection *section)
3185 cpu_register_physical_memory_log(section, section->readonly);
3188 static void core_log_start(MemoryListener *listener,
3189 MemoryRegionSection *section)
3193 static void core_log_stop(MemoryListener *listener,
3194 MemoryRegionSection *section)
3198 static void core_log_sync(MemoryListener *listener,
3199 MemoryRegionSection *section)
3203 static void core_log_global_start(MemoryListener *listener)
3205 cpu_physical_memory_set_dirty_tracking(1);
3208 static void core_log_global_stop(MemoryListener *listener)
3210 cpu_physical_memory_set_dirty_tracking(0);
3213 static void core_eventfd_add(MemoryListener *listener,
3214 MemoryRegionSection *section,
3215 bool match_data, uint64_t data, int fd)
3219 static void core_eventfd_del(MemoryListener *listener,
3220 MemoryRegionSection *section,
3221 bool match_data, uint64_t data, int fd)
3225 static void io_begin(MemoryListener *listener)
3229 static void io_commit(MemoryListener *listener)
3233 static void io_region_add(MemoryListener *listener,
3234 MemoryRegionSection *section)
3236 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3238 mrio->mr = section->mr;
3239 mrio->offset = section->offset_within_region;
3240 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
3241 section->offset_within_address_space, section->size);
3242 ioport_register(&mrio->iorange);
3245 static void io_region_del(MemoryListener *listener,
3246 MemoryRegionSection *section)
3248 isa_unassign_ioport(section->offset_within_address_space, section->size);
3251 static void io_region_nop(MemoryListener *listener,
3252 MemoryRegionSection *section)
3256 static void io_log_start(MemoryListener *listener,
3257 MemoryRegionSection *section)
3261 static void io_log_stop(MemoryListener *listener,
3262 MemoryRegionSection *section)
3266 static void io_log_sync(MemoryListener *listener,
3267 MemoryRegionSection *section)
3271 static void io_log_global_start(MemoryListener *listener)
3275 static void io_log_global_stop(MemoryListener *listener)
3279 static void io_eventfd_add(MemoryListener *listener,
3280 MemoryRegionSection *section,
3281 bool match_data, uint64_t data, int fd)
3285 static void io_eventfd_del(MemoryListener *listener,
3286 MemoryRegionSection *section,
3287 bool match_data, uint64_t data, int fd)
3291 static MemoryListener core_memory_listener = {
3292 .begin = core_begin,
3293 .commit = core_commit,
3294 .region_add = core_region_add,
3295 .region_del = core_region_del,
3296 .region_nop = core_region_nop,
3297 .log_start = core_log_start,
3298 .log_stop = core_log_stop,
3299 .log_sync = core_log_sync,
3300 .log_global_start = core_log_global_start,
3301 .log_global_stop = core_log_global_stop,
3302 .eventfd_add = core_eventfd_add,
3303 .eventfd_del = core_eventfd_del,
3304 .priority = 0,
3307 static MemoryListener io_memory_listener = {
3308 .begin = io_begin,
3309 .commit = io_commit,
3310 .region_add = io_region_add,
3311 .region_del = io_region_del,
3312 .region_nop = io_region_nop,
3313 .log_start = io_log_start,
3314 .log_stop = io_log_stop,
3315 .log_sync = io_log_sync,
3316 .log_global_start = io_log_global_start,
3317 .log_global_stop = io_log_global_stop,
3318 .eventfd_add = io_eventfd_add,
3319 .eventfd_del = io_eventfd_del,
3320 .priority = 0,
3323 static void memory_map_init(void)
3325 system_memory = g_malloc(sizeof(*system_memory));
3326 memory_region_init(system_memory, "system", INT64_MAX);
3327 set_system_memory_map(system_memory);
3329 system_io = g_malloc(sizeof(*system_io));
3330 memory_region_init(system_io, "io", 65536);
3331 set_system_io_map(system_io);
3333 memory_listener_register(&core_memory_listener, system_memory);
3334 memory_listener_register(&io_memory_listener, system_io);
3337 MemoryRegion *get_system_memory(void)
3339 return system_memory;
3342 MemoryRegion *get_system_io(void)
3344 return system_io;
3347 #endif /* !defined(CONFIG_USER_ONLY) */
3349 /* physical memory access (slow version, mainly for debug) */
3350 #if defined(CONFIG_USER_ONLY)
3351 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
3352 uint8_t *buf, int len, int is_write)
3354 int l, flags;
3355 target_ulong page;
3356 void * p;
3358 while (len > 0) {
3359 page = addr & TARGET_PAGE_MASK;
3360 l = (page + TARGET_PAGE_SIZE) - addr;
3361 if (l > len)
3362 l = len;
3363 flags = page_get_flags(page);
3364 if (!(flags & PAGE_VALID))
3365 return -1;
3366 if (is_write) {
3367 if (!(flags & PAGE_WRITE))
3368 return -1;
3369 /* XXX: this code should not depend on lock_user */
3370 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3371 return -1;
3372 memcpy(p, buf, l);
3373 unlock_user(p, addr, l);
3374 } else {
3375 if (!(flags & PAGE_READ))
3376 return -1;
3377 /* XXX: this code should not depend on lock_user */
3378 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3379 return -1;
3380 memcpy(buf, p, l);
3381 unlock_user(p, addr, 0);
3383 len -= l;
3384 buf += l;
3385 addr += l;
3387 return 0;
3390 #else
3391 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3392 int len, int is_write)
3394 int l;
3395 uint8_t *ptr;
3396 uint32_t val;
3397 target_phys_addr_t page;
3398 MemoryRegionSection *section;
3400 while (len > 0) {
3401 page = addr & TARGET_PAGE_MASK;
3402 l = (page + TARGET_PAGE_SIZE) - addr;
3403 if (l > len)
3404 l = len;
3405 section = phys_page_find(page >> TARGET_PAGE_BITS);
3407 if (is_write) {
3408 if (!memory_region_is_ram(section->mr)) {
3409 target_phys_addr_t addr1;
3410 addr1 = memory_region_section_addr(section, addr);
3411 /* XXX: could force cpu_single_env to NULL to avoid
3412 potential bugs */
3413 if (l >= 4 && ((addr1 & 3) == 0)) {
3414 /* 32 bit write access */
3415 val = ldl_p(buf);
3416 io_mem_write(section->mr, addr1, val, 4);
3417 l = 4;
3418 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3419 /* 16 bit write access */
3420 val = lduw_p(buf);
3421 io_mem_write(section->mr, addr1, val, 2);
3422 l = 2;
3423 } else {
3424 /* 8 bit write access */
3425 val = ldub_p(buf);
3426 io_mem_write(section->mr, addr1, val, 1);
3427 l = 1;
3429 } else if (!section->readonly) {
3430 ram_addr_t addr1;
3431 addr1 = memory_region_get_ram_addr(section->mr)
3432 + memory_region_section_addr(section, addr);
3433 /* RAM case */
3434 ptr = qemu_get_ram_ptr(addr1);
3435 memcpy(ptr, buf, l);
3436 if (!cpu_physical_memory_is_dirty(addr1)) {
3437 /* invalidate code */
3438 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3439 /* set dirty bit */
3440 cpu_physical_memory_set_dirty_flags(
3441 addr1, (0xff & ~CODE_DIRTY_FLAG));
3443 qemu_put_ram_ptr(ptr);
3445 } else {
3446 if (!(memory_region_is_ram(section->mr) ||
3447 memory_region_is_romd(section->mr))) {
3448 target_phys_addr_t addr1;
3449 /* I/O case */
3450 addr1 = memory_region_section_addr(section, addr);
3451 if (l >= 4 && ((addr1 & 3) == 0)) {
3452 /* 32 bit read access */
3453 val = io_mem_read(section->mr, addr1, 4);
3454 stl_p(buf, val);
3455 l = 4;
3456 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3457 /* 16 bit read access */
3458 val = io_mem_read(section->mr, addr1, 2);
3459 stw_p(buf, val);
3460 l = 2;
3461 } else {
3462 /* 8 bit read access */
3463 val = io_mem_read(section->mr, addr1, 1);
3464 stb_p(buf, val);
3465 l = 1;
3467 } else {
3468 /* RAM case */
3469 ptr = qemu_get_ram_ptr(section->mr->ram_addr
3470 + memory_region_section_addr(section,
3471 addr));
3472 memcpy(buf, ptr, l);
3473 qemu_put_ram_ptr(ptr);
3476 len -= l;
3477 buf += l;
3478 addr += l;
3482 /* used for ROM loading : can write in RAM and ROM */
3483 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3484 const uint8_t *buf, int len)
3486 int l;
3487 uint8_t *ptr;
3488 target_phys_addr_t page;
3489 MemoryRegionSection *section;
3491 while (len > 0) {
3492 page = addr & TARGET_PAGE_MASK;
3493 l = (page + TARGET_PAGE_SIZE) - addr;
3494 if (l > len)
3495 l = len;
3496 section = phys_page_find(page >> TARGET_PAGE_BITS);
3498 if (!(memory_region_is_ram(section->mr) ||
3499 memory_region_is_romd(section->mr))) {
3500 /* do nothing */
3501 } else {
3502 unsigned long addr1;
3503 addr1 = memory_region_get_ram_addr(section->mr)
3504 + memory_region_section_addr(section, addr);
3505 /* ROM/RAM case */
3506 ptr = qemu_get_ram_ptr(addr1);
3507 memcpy(ptr, buf, l);
3508 qemu_put_ram_ptr(ptr);
3510 len -= l;
3511 buf += l;
3512 addr += l;
3516 typedef struct {
3517 void *buffer;
3518 target_phys_addr_t addr;
3519 target_phys_addr_t len;
3520 } BounceBuffer;
3522 static BounceBuffer bounce;
3524 typedef struct MapClient {
3525 void *opaque;
3526 void (*callback)(void *opaque);
3527 QLIST_ENTRY(MapClient) link;
3528 } MapClient;
3530 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3531 = QLIST_HEAD_INITIALIZER(map_client_list);
3533 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3535 MapClient *client = g_malloc(sizeof(*client));
3537 client->opaque = opaque;
3538 client->callback = callback;
3539 QLIST_INSERT_HEAD(&map_client_list, client, link);
3540 return client;
3543 void cpu_unregister_map_client(void *_client)
3545 MapClient *client = (MapClient *)_client;
3547 QLIST_REMOVE(client, link);
3548 g_free(client);
3551 static void cpu_notify_map_clients(void)
3553 MapClient *client;
3555 while (!QLIST_EMPTY(&map_client_list)) {
3556 client = QLIST_FIRST(&map_client_list);
3557 client->callback(client->opaque);
3558 cpu_unregister_map_client(client);
3562 /* Map a physical memory region into a host virtual address.
3563 * May map a subset of the requested range, given by and returned in *plen.
3564 * May return NULL if resources needed to perform the mapping are exhausted.
3565 * Use only for reads OR writes - not for read-modify-write operations.
3566 * Use cpu_register_map_client() to know when retrying the map operation is
3567 * likely to succeed.
3569 void *cpu_physical_memory_map(target_phys_addr_t addr,
3570 target_phys_addr_t *plen,
3571 int is_write)
3573 target_phys_addr_t len = *plen;
3574 target_phys_addr_t todo = 0;
3575 int l;
3576 target_phys_addr_t page;
3577 MemoryRegionSection *section;
3578 ram_addr_t raddr = RAM_ADDR_MAX;
3579 ram_addr_t rlen;
3580 void *ret;
3582 while (len > 0) {
3583 page = addr & TARGET_PAGE_MASK;
3584 l = (page + TARGET_PAGE_SIZE) - addr;
3585 if (l > len)
3586 l = len;
3587 section = phys_page_find(page >> TARGET_PAGE_BITS);
3589 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
3590 if (todo || bounce.buffer) {
3591 break;
3593 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3594 bounce.addr = addr;
3595 bounce.len = l;
3596 if (!is_write) {
3597 cpu_physical_memory_read(addr, bounce.buffer, l);
3600 *plen = l;
3601 return bounce.buffer;
3603 if (!todo) {
3604 raddr = memory_region_get_ram_addr(section->mr)
3605 + memory_region_section_addr(section, addr);
3608 len -= l;
3609 addr += l;
3610 todo += l;
3612 rlen = todo;
3613 ret = qemu_ram_ptr_length(raddr, &rlen);
3614 *plen = rlen;
3615 return ret;
3618 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3619 * Will also mark the memory as dirty if is_write == 1. access_len gives
3620 * the amount of memory that was actually read or written by the caller.
3622 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3623 int is_write, target_phys_addr_t access_len)
3625 if (buffer != bounce.buffer) {
3626 if (is_write) {
3627 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3628 while (access_len) {
3629 unsigned l;
3630 l = TARGET_PAGE_SIZE;
3631 if (l > access_len)
3632 l = access_len;
3633 if (!cpu_physical_memory_is_dirty(addr1)) {
3634 /* invalidate code */
3635 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3636 /* set dirty bit */
3637 cpu_physical_memory_set_dirty_flags(
3638 addr1, (0xff & ~CODE_DIRTY_FLAG));
3640 addr1 += l;
3641 access_len -= l;
3644 if (xen_enabled()) {
3645 xen_invalidate_map_cache_entry(buffer);
3647 return;
3649 if (is_write) {
3650 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3652 qemu_vfree(bounce.buffer);
3653 bounce.buffer = NULL;
3654 cpu_notify_map_clients();
3657 /* warning: addr must be aligned */
3658 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3659 enum device_endian endian)
3661 uint8_t *ptr;
3662 uint32_t val;
3663 MemoryRegionSection *section;
3665 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3667 if (!(memory_region_is_ram(section->mr) ||
3668 memory_region_is_romd(section->mr))) {
3669 /* I/O case */
3670 addr = memory_region_section_addr(section, addr);
3671 val = io_mem_read(section->mr, addr, 4);
3672 #if defined(TARGET_WORDS_BIGENDIAN)
3673 if (endian == DEVICE_LITTLE_ENDIAN) {
3674 val = bswap32(val);
3676 #else
3677 if (endian == DEVICE_BIG_ENDIAN) {
3678 val = bswap32(val);
3680 #endif
3681 } else {
3682 /* RAM case */
3683 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3684 & TARGET_PAGE_MASK)
3685 + memory_region_section_addr(section, addr));
3686 switch (endian) {
3687 case DEVICE_LITTLE_ENDIAN:
3688 val = ldl_le_p(ptr);
3689 break;
3690 case DEVICE_BIG_ENDIAN:
3691 val = ldl_be_p(ptr);
3692 break;
3693 default:
3694 val = ldl_p(ptr);
3695 break;
3698 return val;
3701 uint32_t ldl_phys(target_phys_addr_t addr)
3703 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3706 uint32_t ldl_le_phys(target_phys_addr_t addr)
3708 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3711 uint32_t ldl_be_phys(target_phys_addr_t addr)
3713 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3716 /* warning: addr must be aligned */
3717 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3718 enum device_endian endian)
3720 uint8_t *ptr;
3721 uint64_t val;
3722 MemoryRegionSection *section;
3724 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3726 if (!(memory_region_is_ram(section->mr) ||
3727 memory_region_is_romd(section->mr))) {
3728 /* I/O case */
3729 addr = memory_region_section_addr(section, addr);
3731 /* XXX This is broken when device endian != cpu endian.
3732 Fix and add "endian" variable check */
3733 #ifdef TARGET_WORDS_BIGENDIAN
3734 val = io_mem_read(section->mr, addr, 4) << 32;
3735 val |= io_mem_read(section->mr, addr + 4, 4);
3736 #else
3737 val = io_mem_read(section->mr, addr, 4);
3738 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
3739 #endif
3740 } else {
3741 /* RAM case */
3742 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3743 & TARGET_PAGE_MASK)
3744 + memory_region_section_addr(section, addr));
3745 switch (endian) {
3746 case DEVICE_LITTLE_ENDIAN:
3747 val = ldq_le_p(ptr);
3748 break;
3749 case DEVICE_BIG_ENDIAN:
3750 val = ldq_be_p(ptr);
3751 break;
3752 default:
3753 val = ldq_p(ptr);
3754 break;
3757 return val;
3760 uint64_t ldq_phys(target_phys_addr_t addr)
3762 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3765 uint64_t ldq_le_phys(target_phys_addr_t addr)
3767 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3770 uint64_t ldq_be_phys(target_phys_addr_t addr)
3772 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3775 /* XXX: optimize */
3776 uint32_t ldub_phys(target_phys_addr_t addr)
3778 uint8_t val;
3779 cpu_physical_memory_read(addr, &val, 1);
3780 return val;
3783 /* warning: addr must be aligned */
3784 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3785 enum device_endian endian)
3787 uint8_t *ptr;
3788 uint64_t val;
3789 MemoryRegionSection *section;
3791 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3793 if (!(memory_region_is_ram(section->mr) ||
3794 memory_region_is_romd(section->mr))) {
3795 /* I/O case */
3796 addr = memory_region_section_addr(section, addr);
3797 val = io_mem_read(section->mr, addr, 2);
3798 #if defined(TARGET_WORDS_BIGENDIAN)
3799 if (endian == DEVICE_LITTLE_ENDIAN) {
3800 val = bswap16(val);
3802 #else
3803 if (endian == DEVICE_BIG_ENDIAN) {
3804 val = bswap16(val);
3806 #endif
3807 } else {
3808 /* RAM case */
3809 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3810 & TARGET_PAGE_MASK)
3811 + memory_region_section_addr(section, addr));
3812 switch (endian) {
3813 case DEVICE_LITTLE_ENDIAN:
3814 val = lduw_le_p(ptr);
3815 break;
3816 case DEVICE_BIG_ENDIAN:
3817 val = lduw_be_p(ptr);
3818 break;
3819 default:
3820 val = lduw_p(ptr);
3821 break;
3824 return val;
3827 uint32_t lduw_phys(target_phys_addr_t addr)
3829 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3832 uint32_t lduw_le_phys(target_phys_addr_t addr)
3834 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3837 uint32_t lduw_be_phys(target_phys_addr_t addr)
3839 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3842 /* warning: addr must be aligned. The ram page is not masked as dirty
3843 and the code inside is not invalidated. It is useful if the dirty
3844 bits are used to track modified PTEs */
3845 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3847 uint8_t *ptr;
3848 MemoryRegionSection *section;
3850 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3852 if (!memory_region_is_ram(section->mr) || section->readonly) {
3853 addr = memory_region_section_addr(section, addr);
3854 if (memory_region_is_ram(section->mr)) {
3855 section = &phys_sections[phys_section_rom];
3857 io_mem_write(section->mr, addr, val, 4);
3858 } else {
3859 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
3860 & TARGET_PAGE_MASK)
3861 + memory_region_section_addr(section, addr);
3862 ptr = qemu_get_ram_ptr(addr1);
3863 stl_p(ptr, val);
3865 if (unlikely(in_migration)) {
3866 if (!cpu_physical_memory_is_dirty(addr1)) {
3867 /* invalidate code */
3868 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3869 /* set dirty bit */
3870 cpu_physical_memory_set_dirty_flags(
3871 addr1, (0xff & ~CODE_DIRTY_FLAG));
3877 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3879 uint8_t *ptr;
3880 MemoryRegionSection *section;
3882 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3884 if (!memory_region_is_ram(section->mr) || section->readonly) {
3885 addr = memory_region_section_addr(section, addr);
3886 if (memory_region_is_ram(section->mr)) {
3887 section = &phys_sections[phys_section_rom];
3889 #ifdef TARGET_WORDS_BIGENDIAN
3890 io_mem_write(section->mr, addr, val >> 32, 4);
3891 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
3892 #else
3893 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3894 io_mem_write(section->mr, addr + 4, val >> 32, 4);
3895 #endif
3896 } else {
3897 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3898 & TARGET_PAGE_MASK)
3899 + memory_region_section_addr(section, addr));
3900 stq_p(ptr, val);
3904 /* warning: addr must be aligned */
3905 static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3906 enum device_endian endian)
3908 uint8_t *ptr;
3909 MemoryRegionSection *section;
3911 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3913 if (!memory_region_is_ram(section->mr) || section->readonly) {
3914 addr = memory_region_section_addr(section, addr);
3915 if (memory_region_is_ram(section->mr)) {
3916 section = &phys_sections[phys_section_rom];
3918 #if defined(TARGET_WORDS_BIGENDIAN)
3919 if (endian == DEVICE_LITTLE_ENDIAN) {
3920 val = bswap32(val);
3922 #else
3923 if (endian == DEVICE_BIG_ENDIAN) {
3924 val = bswap32(val);
3926 #endif
3927 io_mem_write(section->mr, addr, val, 4);
3928 } else {
3929 unsigned long addr1;
3930 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
3931 + memory_region_section_addr(section, addr);
3932 /* RAM case */
3933 ptr = qemu_get_ram_ptr(addr1);
3934 switch (endian) {
3935 case DEVICE_LITTLE_ENDIAN:
3936 stl_le_p(ptr, val);
3937 break;
3938 case DEVICE_BIG_ENDIAN:
3939 stl_be_p(ptr, val);
3940 break;
3941 default:
3942 stl_p(ptr, val);
3943 break;
3945 if (!cpu_physical_memory_is_dirty(addr1)) {
3946 /* invalidate code */
3947 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3948 /* set dirty bit */
3949 cpu_physical_memory_set_dirty_flags(addr1,
3950 (0xff & ~CODE_DIRTY_FLAG));
3955 void stl_phys(target_phys_addr_t addr, uint32_t val)
3957 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3960 void stl_le_phys(target_phys_addr_t addr, uint32_t val)
3962 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3965 void stl_be_phys(target_phys_addr_t addr, uint32_t val)
3967 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3970 /* XXX: optimize */
3971 void stb_phys(target_phys_addr_t addr, uint32_t val)
3973 uint8_t v = val;
3974 cpu_physical_memory_write(addr, &v, 1);
3977 /* warning: addr must be aligned */
3978 static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
3979 enum device_endian endian)
3981 uint8_t *ptr;
3982 MemoryRegionSection *section;
3984 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3986 if (!memory_region_is_ram(section->mr) || section->readonly) {
3987 addr = memory_region_section_addr(section, addr);
3988 if (memory_region_is_ram(section->mr)) {
3989 section = &phys_sections[phys_section_rom];
3991 #if defined(TARGET_WORDS_BIGENDIAN)
3992 if (endian == DEVICE_LITTLE_ENDIAN) {
3993 val = bswap16(val);
3995 #else
3996 if (endian == DEVICE_BIG_ENDIAN) {
3997 val = bswap16(val);
3999 #endif
4000 io_mem_write(section->mr, addr, val, 2);
4001 } else {
4002 unsigned long addr1;
4003 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4004 + memory_region_section_addr(section, addr);
4005 /* RAM case */
4006 ptr = qemu_get_ram_ptr(addr1);
4007 switch (endian) {
4008 case DEVICE_LITTLE_ENDIAN:
4009 stw_le_p(ptr, val);
4010 break;
4011 case DEVICE_BIG_ENDIAN:
4012 stw_be_p(ptr, val);
4013 break;
4014 default:
4015 stw_p(ptr, val);
4016 break;
4018 if (!cpu_physical_memory_is_dirty(addr1)) {
4019 /* invalidate code */
4020 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4021 /* set dirty bit */
4022 cpu_physical_memory_set_dirty_flags(addr1,
4023 (0xff & ~CODE_DIRTY_FLAG));
4028 void stw_phys(target_phys_addr_t addr, uint32_t val)
4030 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4033 void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4035 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4038 void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4040 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4043 /* XXX: optimize */
4044 void stq_phys(target_phys_addr_t addr, uint64_t val)
4046 val = tswap64(val);
4047 cpu_physical_memory_write(addr, &val, 8);
4050 void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4052 val = cpu_to_le64(val);
4053 cpu_physical_memory_write(addr, &val, 8);
4056 void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4058 val = cpu_to_be64(val);
4059 cpu_physical_memory_write(addr, &val, 8);
4062 /* virtual memory access for debug (includes writing to ROM) */
4063 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
4064 uint8_t *buf, int len, int is_write)
4066 int l;
4067 target_phys_addr_t phys_addr;
4068 target_ulong page;
4070 while (len > 0) {
4071 page = addr & TARGET_PAGE_MASK;
4072 phys_addr = cpu_get_phys_page_debug(env, page);
4073 /* if no physical page mapped, return an error */
4074 if (phys_addr == -1)
4075 return -1;
4076 l = (page + TARGET_PAGE_SIZE) - addr;
4077 if (l > len)
4078 l = len;
4079 phys_addr += (addr & ~TARGET_PAGE_MASK);
4080 if (is_write)
4081 cpu_physical_memory_write_rom(phys_addr, buf, l);
4082 else
4083 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4084 len -= l;
4085 buf += l;
4086 addr += l;
4088 return 0;
4090 #endif
4092 /* in deterministic execution mode, instructions doing device I/Os
4093 must be at the end of the TB */
4094 void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
4096 TranslationBlock *tb;
4097 uint32_t n, cflags;
4098 target_ulong pc, cs_base;
4099 uint64_t flags;
4101 tb = tb_find_pc(retaddr);
4102 if (!tb) {
4103 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4104 (void *)retaddr);
4106 n = env->icount_decr.u16.low + tb->icount;
4107 cpu_restore_state(tb, env, retaddr);
4108 /* Calculate how many instructions had been executed before the fault
4109 occurred. */
4110 n = n - env->icount_decr.u16.low;
4111 /* Generate a new TB ending on the I/O insn. */
4112 n++;
4113 /* On MIPS and SH, delay slot instructions can only be restarted if
4114 they were already the first instruction in the TB. If this is not
4115 the first instruction in a TB then re-execute the preceding
4116 branch. */
4117 #if defined(TARGET_MIPS)
4118 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4119 env->active_tc.PC -= 4;
4120 env->icount_decr.u16.low++;
4121 env->hflags &= ~MIPS_HFLAG_BMASK;
4123 #elif defined(TARGET_SH4)
4124 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4125 && n > 1) {
4126 env->pc -= 2;
4127 env->icount_decr.u16.low++;
4128 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4130 #endif
4131 /* This should never happen. */
4132 if (n > CF_COUNT_MASK)
4133 cpu_abort(env, "TB too big during recompile");
4135 cflags = n | CF_LAST_IO;
4136 pc = tb->pc;
4137 cs_base = tb->cs_base;
4138 flags = tb->flags;
4139 tb_phys_invalidate(tb, -1);
4140 /* FIXME: In theory this could raise an exception. In practice
4141 we have already translated the block once so it's probably ok. */
4142 tb_gen_code(env, pc, cs_base, flags, cflags);
4143 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4144 the first in the TB) then we end up generating a whole new TB and
4145 repeating the fault, which is horribly inefficient.
4146 Better would be to execute just this insn uncached, or generate a
4147 second new TB. */
4148 cpu_resume_from_signal(env, NULL);
4151 #if !defined(CONFIG_USER_ONLY)
4153 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4155 int i, target_code_size, max_target_code_size;
4156 int direct_jmp_count, direct_jmp2_count, cross_page;
4157 TranslationBlock *tb;
4159 target_code_size = 0;
4160 max_target_code_size = 0;
4161 cross_page = 0;
4162 direct_jmp_count = 0;
4163 direct_jmp2_count = 0;
4164 for(i = 0; i < nb_tbs; i++) {
4165 tb = &tbs[i];
4166 target_code_size += tb->size;
4167 if (tb->size > max_target_code_size)
4168 max_target_code_size = tb->size;
4169 if (tb->page_addr[1] != -1)
4170 cross_page++;
4171 if (tb->tb_next_offset[0] != 0xffff) {
4172 direct_jmp_count++;
4173 if (tb->tb_next_offset[1] != 0xffff) {
4174 direct_jmp2_count++;
4178 /* XXX: avoid using doubles ? */
4179 cpu_fprintf(f, "Translation buffer state:\n");
4180 cpu_fprintf(f, "gen code size %td/%ld\n",
4181 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4182 cpu_fprintf(f, "TB count %d/%d\n",
4183 nb_tbs, code_gen_max_blocks);
4184 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4185 nb_tbs ? target_code_size / nb_tbs : 0,
4186 max_target_code_size);
4187 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4188 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4189 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4190 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4191 cross_page,
4192 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4193 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4194 direct_jmp_count,
4195 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4196 direct_jmp2_count,
4197 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4198 cpu_fprintf(f, "\nStatistics:\n");
4199 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4200 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4201 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4202 tcg_dump_info(f, cpu_fprintf);
4206 * A helper function for the _utterly broken_ virtio device model to find out if
4207 * it's running on a big endian machine. Don't do this at home kids!
4209 bool virtio_is_big_endian(void);
4210 bool virtio_is_big_endian(void)
4212 #if defined(TARGET_WORDS_BIGENDIAN)
4213 return true;
4214 #else
4215 return false;
4216 #endif
4219 #endif
4221 #ifndef CONFIG_USER_ONLY
4222 bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4224 MemoryRegionSection *section;
4226 section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4228 return !(memory_region_is_ram(section->mr) ||
4229 memory_region_is_romd(section->mr));
4231 #endif