cputlb: prepare private memory API for public consumption
[qemu.git] / exec.c
blob62d4140f12828757699e8e381a418a60e0d16787
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "osdep.h"
33 #include "kvm.h"
34 #include "hw/xen.h"
35 #include "qemu-timer.h"
36 #include "memory.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include <qemu.h>
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <machine/profile.h>
48 #define _KERNEL
49 #include <sys/user.h>
50 #undef _KERNEL
51 #undef sigqueue
52 #include <libutil.h>
53 #endif
54 #endif
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
57 #include "trace.h"
58 #endif
60 #include "cputlb.h"
62 #define WANT_EXEC_OBSOLETE
63 #include "exec-obsolete.h"
65 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_FLUSH
67 //#define DEBUG_UNASSIGNED
69 /* make various TB consistency checks */
70 //#define DEBUG_TB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
77 #undef DEBUG_TB_CHECK
78 #endif
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock *tbs;
83 static int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 static int nb_tbs;
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32) && !defined(_WIN64)
97 #define code_gen_section \
98 __attribute__((aligned (16)))
99 #else
100 #define code_gen_section \
101 __attribute__((aligned (32)))
102 #endif
104 uint8_t code_gen_prologue[1024] code_gen_section;
105 static uint8_t *code_gen_buffer;
106 static unsigned long code_gen_buffer_size;
107 /* threshold to flush the translated code buffer */
108 static unsigned long code_gen_buffer_max_size;
109 static uint8_t *code_gen_ptr;
111 #if !defined(CONFIG_USER_ONLY)
112 int phys_ram_fd;
113 static int in_migration;
115 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
117 static MemoryRegion *system_memory;
118 static MemoryRegion *system_io;
120 MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
121 static MemoryRegion io_mem_subpage_ram;
123 #endif
125 CPUArchState *first_cpu;
126 /* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
128 DEFINE_TLS(CPUArchState *,cpu_single_env);
129 /* 0 = Do not count executed instructions.
130 1 = Precise instruction counting.
131 2 = Adaptive rate instruction counting. */
132 int use_icount = 0;
134 typedef struct PageDesc {
135 /* list of TBs intersecting this ram page */
136 TranslationBlock *first_tb;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141 #if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143 #endif
144 } PageDesc;
146 /* In system mode we want L1_MAP to be based on ram offsets,
147 while in user mode we want it to be based on virtual addresses. */
148 #if !defined(CONFIG_USER_ONLY)
149 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151 #else
152 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
153 #endif
154 #else
155 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
156 #endif
158 /* Size of the L2 (and L3, etc) page tables. */
159 #define L2_BITS 10
160 #define L2_SIZE (1 << L2_BITS)
162 #define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165 /* The bits remaining after N lower levels of page tables. */
166 #define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169 #if V_L1_BITS_REM < 4
170 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171 #else
172 #define V_L1_BITS V_L1_BITS_REM
173 #endif
175 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179 uintptr_t qemu_real_host_page_size;
180 uintptr_t qemu_host_page_size;
181 uintptr_t qemu_host_page_mask;
183 /* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185 static void *l1_map[V_L1_SIZE];
187 #if !defined(CONFIG_USER_ONLY)
188 typedef struct PhysPageEntry PhysPageEntry;
190 static MemoryRegionSection *phys_sections;
191 static unsigned phys_sections_nb, phys_sections_nb_alloc;
192 static uint16_t phys_section_unassigned;
193 static uint16_t phys_section_notdirty;
194 static uint16_t phys_section_rom;
195 static uint16_t phys_section_watch;
197 struct PhysPageEntry {
198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
203 /* Simple allocator for PhysPageEntry nodes */
204 static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205 static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
207 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
209 /* This is a multi-level map on the physical address space.
210 The bottom level has pointers to MemoryRegionSections. */
211 static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
213 static void io_mem_init(void);
214 static void memory_map_init(void);
216 static MemoryRegion io_mem_watch;
217 #endif
219 /* log support */
220 #ifdef WIN32
221 static const char *logfilename = "qemu.log";
222 #else
223 static const char *logfilename = "/tmp/qemu.log";
224 #endif
225 FILE *logfile;
226 int loglevel;
227 static int log_append = 0;
229 /* statistics */
230 static int tb_flush_count;
231 static int tb_phys_invalidate_count;
233 #ifdef _WIN32
234 static void map_exec(void *addr, long size)
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
241 #else
242 static void map_exec(void *addr, long size)
244 unsigned long start, end, page_size;
246 page_size = getpagesize();
247 start = (unsigned long)addr;
248 start &= ~(page_size - 1);
250 end = (unsigned long)addr + size;
251 end += page_size - 1;
252 end &= ~(page_size - 1);
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
257 #endif
259 static void page_init(void)
261 /* NOTE: we can always suppose that qemu_host_page_size >=
262 TARGET_PAGE_SIZE */
263 #ifdef _WIN32
265 SYSTEM_INFO system_info;
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
270 #else
271 qemu_real_host_page_size = getpagesize();
272 #endif
273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
279 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
281 #ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
299 } else {
300 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
303 #endif
307 free(freep);
308 mmap_unlock();
310 #else
311 FILE *f;
313 last_brk = (unsigned long)sbrk(0);
315 f = fopen("/compat/linux/proc/self/maps", "r");
316 if (f) {
317 mmap_lock();
319 do {
320 unsigned long startaddr, endaddr;
321 int n;
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
335 } while (!feof(f));
337 fclose(f);
338 mmap_unlock();
340 #endif
342 #endif
345 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
347 PageDesc *pd;
348 void **lp;
349 int i;
351 #if defined(CONFIG_USER_ONLY)
352 /* We can't use g_malloc because it may recurse into a locked mutex. */
353 # define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
357 } while (0)
358 #else
359 # define ALLOC(P, SIZE) \
360 do { P = g_malloc0(SIZE); } while (0)
361 #endif
363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
390 #undef ALLOC
392 return pd + (index & (L2_SIZE - 1));
395 static inline PageDesc *page_find(tb_page_addr_t index)
397 return page_find_alloc(index, 0);
400 #if !defined(CONFIG_USER_ONLY)
402 static void phys_map_node_reserve(unsigned nodes)
404 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
405 typedef PhysPageEntry Node[L2_SIZE];
406 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
407 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
408 phys_map_nodes_nb + nodes);
409 phys_map_nodes = g_renew(Node, phys_map_nodes,
410 phys_map_nodes_nb_alloc);
414 static uint16_t phys_map_node_alloc(void)
416 unsigned i;
417 uint16_t ret;
419 ret = phys_map_nodes_nb++;
420 assert(ret != PHYS_MAP_NODE_NIL);
421 assert(ret != phys_map_nodes_nb_alloc);
422 for (i = 0; i < L2_SIZE; ++i) {
423 phys_map_nodes[ret][i].is_leaf = 0;
424 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
426 return ret;
429 static void phys_map_nodes_reset(void)
431 phys_map_nodes_nb = 0;
435 static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
436 target_phys_addr_t *nb, uint16_t leaf,
437 int level)
439 PhysPageEntry *p;
440 int i;
441 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
443 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
444 lp->ptr = phys_map_node_alloc();
445 p = phys_map_nodes[lp->ptr];
446 if (level == 0) {
447 for (i = 0; i < L2_SIZE; i++) {
448 p[i].is_leaf = 1;
449 p[i].ptr = phys_section_unassigned;
452 } else {
453 p = phys_map_nodes[lp->ptr];
455 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
457 while (*nb && lp < &p[L2_SIZE]) {
458 if ((*index & (step - 1)) == 0 && *nb >= step) {
459 lp->is_leaf = true;
460 lp->ptr = leaf;
461 *index += step;
462 *nb -= step;
463 } else {
464 phys_page_set_level(lp, index, nb, leaf, level - 1);
466 ++lp;
470 static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
471 uint16_t leaf)
473 /* Wildly overreserve - it doesn't matter much. */
474 phys_map_node_reserve(3 * P_L2_LEVELS);
476 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
479 MemoryRegionSection *phys_page_find(target_phys_addr_t index)
481 PhysPageEntry lp = phys_map;
482 PhysPageEntry *p;
483 int i;
484 uint16_t s_index = phys_section_unassigned;
486 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
487 if (lp.ptr == PHYS_MAP_NODE_NIL) {
488 goto not_found;
490 p = phys_map_nodes[lp.ptr];
491 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
494 s_index = lp.ptr;
495 not_found:
496 return &phys_sections[s_index];
499 bool memory_region_is_unassigned(MemoryRegion *mr)
501 return mr != &io_mem_ram && mr != &io_mem_rom
502 && mr != &io_mem_notdirty && !mr->rom_device
503 && mr != &io_mem_watch;
506 target_phys_addr_t memory_region_section_addr(MemoryRegionSection *section,
507 target_phys_addr_t addr)
509 addr -= section->offset_within_address_space;
510 addr += section->offset_within_region;
511 return addr;
514 #define mmap_lock() do { } while(0)
515 #define mmap_unlock() do { } while(0)
516 #endif
518 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
520 #if defined(CONFIG_USER_ONLY)
521 /* Currently it is not recommended to allocate big chunks of data in
522 user mode. It will change when a dedicated libc will be used */
523 #define USE_STATIC_CODE_GEN_BUFFER
524 #endif
526 #ifdef USE_STATIC_CODE_GEN_BUFFER
527 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
528 __attribute__((aligned (CODE_GEN_ALIGN)));
529 #endif
531 static void code_gen_alloc(unsigned long tb_size)
533 #ifdef USE_STATIC_CODE_GEN_BUFFER
534 code_gen_buffer = static_code_gen_buffer;
535 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
536 map_exec(code_gen_buffer, code_gen_buffer_size);
537 #else
538 code_gen_buffer_size = tb_size;
539 if (code_gen_buffer_size == 0) {
540 #if defined(CONFIG_USER_ONLY)
541 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
542 #else
543 /* XXX: needs adjustments */
544 code_gen_buffer_size = (unsigned long)(ram_size / 4);
545 #endif
547 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
548 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
549 /* The code gen buffer location may have constraints depending on
550 the host cpu and OS */
551 #if defined(__linux__)
553 int flags;
554 void *start = NULL;
556 flags = MAP_PRIVATE | MAP_ANONYMOUS;
557 #if defined(__x86_64__)
558 flags |= MAP_32BIT;
559 /* Cannot map more than that */
560 if (code_gen_buffer_size > (800 * 1024 * 1024))
561 code_gen_buffer_size = (800 * 1024 * 1024);
562 #elif defined(__sparc_v9__)
563 // Map the buffer below 2G, so we can use direct calls and branches
564 flags |= MAP_FIXED;
565 start = (void *) 0x60000000UL;
566 if (code_gen_buffer_size > (512 * 1024 * 1024))
567 code_gen_buffer_size = (512 * 1024 * 1024);
568 #elif defined(__arm__)
569 /* Keep the buffer no bigger than 16MB to branch between blocks */
570 if (code_gen_buffer_size > 16 * 1024 * 1024)
571 code_gen_buffer_size = 16 * 1024 * 1024;
572 #elif defined(__s390x__)
573 /* Map the buffer so that we can use direct calls and branches. */
574 /* We have a +- 4GB range on the branches; leave some slop. */
575 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
576 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
578 start = (void *)0x90000000UL;
579 #endif
580 code_gen_buffer = mmap(start, code_gen_buffer_size,
581 PROT_WRITE | PROT_READ | PROT_EXEC,
582 flags, -1, 0);
583 if (code_gen_buffer == MAP_FAILED) {
584 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
585 exit(1);
588 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
589 || defined(__DragonFly__) || defined(__OpenBSD__) \
590 || defined(__NetBSD__)
592 int flags;
593 void *addr = NULL;
594 flags = MAP_PRIVATE | MAP_ANONYMOUS;
595 #if defined(__x86_64__)
596 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
597 * 0x40000000 is free */
598 flags |= MAP_FIXED;
599 addr = (void *)0x40000000;
600 /* Cannot map more than that */
601 if (code_gen_buffer_size > (800 * 1024 * 1024))
602 code_gen_buffer_size = (800 * 1024 * 1024);
603 #elif defined(__sparc_v9__)
604 // Map the buffer below 2G, so we can use direct calls and branches
605 flags |= MAP_FIXED;
606 addr = (void *) 0x60000000UL;
607 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
608 code_gen_buffer_size = (512 * 1024 * 1024);
610 #endif
611 code_gen_buffer = mmap(addr, code_gen_buffer_size,
612 PROT_WRITE | PROT_READ | PROT_EXEC,
613 flags, -1, 0);
614 if (code_gen_buffer == MAP_FAILED) {
615 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
616 exit(1);
619 #else
620 code_gen_buffer = g_malloc(code_gen_buffer_size);
621 map_exec(code_gen_buffer, code_gen_buffer_size);
622 #endif
623 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
624 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
625 code_gen_buffer_max_size = code_gen_buffer_size -
626 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
627 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
628 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
631 /* Must be called before using the QEMU cpus. 'tb_size' is the size
632 (in bytes) allocated to the translation buffer. Zero means default
633 size. */
634 void tcg_exec_init(unsigned long tb_size)
636 cpu_gen_init();
637 code_gen_alloc(tb_size);
638 code_gen_ptr = code_gen_buffer;
639 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
640 page_init();
641 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
642 /* There's no guest base to take into account, so go ahead and
643 initialize the prologue now. */
644 tcg_prologue_init(&tcg_ctx);
645 #endif
648 bool tcg_enabled(void)
650 return code_gen_buffer != NULL;
653 void cpu_exec_init_all(void)
655 #if !defined(CONFIG_USER_ONLY)
656 memory_map_init();
657 io_mem_init();
658 #endif
661 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
663 static int cpu_common_post_load(void *opaque, int version_id)
665 CPUArchState *env = opaque;
667 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
668 version_id is increased. */
669 env->interrupt_request &= ~0x01;
670 tlb_flush(env, 1);
672 return 0;
675 static const VMStateDescription vmstate_cpu_common = {
676 .name = "cpu_common",
677 .version_id = 1,
678 .minimum_version_id = 1,
679 .minimum_version_id_old = 1,
680 .post_load = cpu_common_post_load,
681 .fields = (VMStateField []) {
682 VMSTATE_UINT32(halted, CPUArchState),
683 VMSTATE_UINT32(interrupt_request, CPUArchState),
684 VMSTATE_END_OF_LIST()
687 #endif
689 CPUArchState *qemu_get_cpu(int cpu)
691 CPUArchState *env = first_cpu;
693 while (env) {
694 if (env->cpu_index == cpu)
695 break;
696 env = env->next_cpu;
699 return env;
702 void cpu_exec_init(CPUArchState *env)
704 CPUArchState **penv;
705 int cpu_index;
707 #if defined(CONFIG_USER_ONLY)
708 cpu_list_lock();
709 #endif
710 env->next_cpu = NULL;
711 penv = &first_cpu;
712 cpu_index = 0;
713 while (*penv != NULL) {
714 penv = &(*penv)->next_cpu;
715 cpu_index++;
717 env->cpu_index = cpu_index;
718 env->numa_node = 0;
719 QTAILQ_INIT(&env->breakpoints);
720 QTAILQ_INIT(&env->watchpoints);
721 #ifndef CONFIG_USER_ONLY
722 env->thread_id = qemu_get_thread_id();
723 #endif
724 *penv = env;
725 #if defined(CONFIG_USER_ONLY)
726 cpu_list_unlock();
727 #endif
728 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
729 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
730 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
731 cpu_save, cpu_load, env);
732 #endif
735 /* Allocate a new translation block. Flush the translation buffer if
736 too many translation blocks or too much generated code. */
737 static TranslationBlock *tb_alloc(target_ulong pc)
739 TranslationBlock *tb;
741 if (nb_tbs >= code_gen_max_blocks ||
742 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
743 return NULL;
744 tb = &tbs[nb_tbs++];
745 tb->pc = pc;
746 tb->cflags = 0;
747 return tb;
750 void tb_free(TranslationBlock *tb)
752 /* In practice this is mostly used for single use temporary TB
753 Ignore the hard cases and just back up if this TB happens to
754 be the last one generated. */
755 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
756 code_gen_ptr = tb->tc_ptr;
757 nb_tbs--;
761 static inline void invalidate_page_bitmap(PageDesc *p)
763 if (p->code_bitmap) {
764 g_free(p->code_bitmap);
765 p->code_bitmap = NULL;
767 p->code_write_count = 0;
770 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
772 static void page_flush_tb_1 (int level, void **lp)
774 int i;
776 if (*lp == NULL) {
777 return;
779 if (level == 0) {
780 PageDesc *pd = *lp;
781 for (i = 0; i < L2_SIZE; ++i) {
782 pd[i].first_tb = NULL;
783 invalidate_page_bitmap(pd + i);
785 } else {
786 void **pp = *lp;
787 for (i = 0; i < L2_SIZE; ++i) {
788 page_flush_tb_1 (level - 1, pp + i);
793 static void page_flush_tb(void)
795 int i;
796 for (i = 0; i < V_L1_SIZE; i++) {
797 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
801 /* flush all the translation blocks */
802 /* XXX: tb_flush is currently not thread safe */
803 void tb_flush(CPUArchState *env1)
805 CPUArchState *env;
806 #if defined(DEBUG_FLUSH)
807 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
808 (unsigned long)(code_gen_ptr - code_gen_buffer),
809 nb_tbs, nb_tbs > 0 ?
810 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
811 #endif
812 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
813 cpu_abort(env1, "Internal error: code buffer overflow\n");
815 nb_tbs = 0;
817 for(env = first_cpu; env != NULL; env = env->next_cpu) {
818 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
821 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
822 page_flush_tb();
824 code_gen_ptr = code_gen_buffer;
825 /* XXX: flush processor icache at this point if cache flush is
826 expensive */
827 tb_flush_count++;
830 #ifdef DEBUG_TB_CHECK
832 static void tb_invalidate_check(target_ulong address)
834 TranslationBlock *tb;
835 int i;
836 address &= TARGET_PAGE_MASK;
837 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
838 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
839 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
840 address >= tb->pc + tb->size)) {
841 printf("ERROR invalidate: address=" TARGET_FMT_lx
842 " PC=%08lx size=%04x\n",
843 address, (long)tb->pc, tb->size);
849 /* verify that all the pages have correct rights for code */
850 static void tb_page_check(void)
852 TranslationBlock *tb;
853 int i, flags1, flags2;
855 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
856 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
857 flags1 = page_get_flags(tb->pc);
858 flags2 = page_get_flags(tb->pc + tb->size - 1);
859 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
860 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
861 (long)tb->pc, tb->size, flags1, flags2);
867 #endif
869 /* invalidate one TB */
870 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
871 int next_offset)
873 TranslationBlock *tb1;
874 for(;;) {
875 tb1 = *ptb;
876 if (tb1 == tb) {
877 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
878 break;
880 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
884 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
886 TranslationBlock *tb1;
887 unsigned int n1;
889 for(;;) {
890 tb1 = *ptb;
891 n1 = (uintptr_t)tb1 & 3;
892 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
893 if (tb1 == tb) {
894 *ptb = tb1->page_next[n1];
895 break;
897 ptb = &tb1->page_next[n1];
901 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
903 TranslationBlock *tb1, **ptb;
904 unsigned int n1;
906 ptb = &tb->jmp_next[n];
907 tb1 = *ptb;
908 if (tb1) {
909 /* find tb(n) in circular list */
910 for(;;) {
911 tb1 = *ptb;
912 n1 = (uintptr_t)tb1 & 3;
913 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
914 if (n1 == n && tb1 == tb)
915 break;
916 if (n1 == 2) {
917 ptb = &tb1->jmp_first;
918 } else {
919 ptb = &tb1->jmp_next[n1];
922 /* now we can suppress tb(n) from the list */
923 *ptb = tb->jmp_next[n];
925 tb->jmp_next[n] = NULL;
929 /* reset the jump entry 'n' of a TB so that it is not chained to
930 another TB */
931 static inline void tb_reset_jump(TranslationBlock *tb, int n)
933 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
936 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
938 CPUArchState *env;
939 PageDesc *p;
940 unsigned int h, n1;
941 tb_page_addr_t phys_pc;
942 TranslationBlock *tb1, *tb2;
944 /* remove the TB from the hash list */
945 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
946 h = tb_phys_hash_func(phys_pc);
947 tb_remove(&tb_phys_hash[h], tb,
948 offsetof(TranslationBlock, phys_hash_next));
950 /* remove the TB from the page list */
951 if (tb->page_addr[0] != page_addr) {
952 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
953 tb_page_remove(&p->first_tb, tb);
954 invalidate_page_bitmap(p);
956 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
957 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
958 tb_page_remove(&p->first_tb, tb);
959 invalidate_page_bitmap(p);
962 tb_invalidated_flag = 1;
964 /* remove the TB from the hash list */
965 h = tb_jmp_cache_hash_func(tb->pc);
966 for(env = first_cpu; env != NULL; env = env->next_cpu) {
967 if (env->tb_jmp_cache[h] == tb)
968 env->tb_jmp_cache[h] = NULL;
971 /* suppress this TB from the two jump lists */
972 tb_jmp_remove(tb, 0);
973 tb_jmp_remove(tb, 1);
975 /* suppress any remaining jumps to this TB */
976 tb1 = tb->jmp_first;
977 for(;;) {
978 n1 = (uintptr_t)tb1 & 3;
979 if (n1 == 2)
980 break;
981 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
982 tb2 = tb1->jmp_next[n1];
983 tb_reset_jump(tb1, n1);
984 tb1->jmp_next[n1] = NULL;
985 tb1 = tb2;
987 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
989 tb_phys_invalidate_count++;
992 static inline void set_bits(uint8_t *tab, int start, int len)
994 int end, mask, end1;
996 end = start + len;
997 tab += start >> 3;
998 mask = 0xff << (start & 7);
999 if ((start & ~7) == (end & ~7)) {
1000 if (start < end) {
1001 mask &= ~(0xff << (end & 7));
1002 *tab |= mask;
1004 } else {
1005 *tab++ |= mask;
1006 start = (start + 8) & ~7;
1007 end1 = end & ~7;
1008 while (start < end1) {
1009 *tab++ = 0xff;
1010 start += 8;
1012 if (start < end) {
1013 mask = ~(0xff << (end & 7));
1014 *tab |= mask;
1019 static void build_page_bitmap(PageDesc *p)
1021 int n, tb_start, tb_end;
1022 TranslationBlock *tb;
1024 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1026 tb = p->first_tb;
1027 while (tb != NULL) {
1028 n = (uintptr_t)tb & 3;
1029 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1030 /* NOTE: this is subtle as a TB may span two physical pages */
1031 if (n == 0) {
1032 /* NOTE: tb_end may be after the end of the page, but
1033 it is not a problem */
1034 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1035 tb_end = tb_start + tb->size;
1036 if (tb_end > TARGET_PAGE_SIZE)
1037 tb_end = TARGET_PAGE_SIZE;
1038 } else {
1039 tb_start = 0;
1040 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1042 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1043 tb = tb->page_next[n];
1047 TranslationBlock *tb_gen_code(CPUArchState *env,
1048 target_ulong pc, target_ulong cs_base,
1049 int flags, int cflags)
1051 TranslationBlock *tb;
1052 uint8_t *tc_ptr;
1053 tb_page_addr_t phys_pc, phys_page2;
1054 target_ulong virt_page2;
1055 int code_gen_size;
1057 phys_pc = get_page_addr_code(env, pc);
1058 tb = tb_alloc(pc);
1059 if (!tb) {
1060 /* flush must be done */
1061 tb_flush(env);
1062 /* cannot fail at this point */
1063 tb = tb_alloc(pc);
1064 /* Don't forget to invalidate previous TB info. */
1065 tb_invalidated_flag = 1;
1067 tc_ptr = code_gen_ptr;
1068 tb->tc_ptr = tc_ptr;
1069 tb->cs_base = cs_base;
1070 tb->flags = flags;
1071 tb->cflags = cflags;
1072 cpu_gen_code(env, tb, &code_gen_size);
1073 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1074 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1076 /* check next page if needed */
1077 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1078 phys_page2 = -1;
1079 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1080 phys_page2 = get_page_addr_code(env, virt_page2);
1082 tb_link_page(tb, phys_pc, phys_page2);
1083 return tb;
1086 /* invalidate all TBs which intersect with the target physical page
1087 starting in range [start;end[. NOTE: start and end must refer to
1088 the same physical page. 'is_cpu_write_access' should be true if called
1089 from a real cpu write access: the virtual CPU will exit the current
1090 TB if code is modified inside this TB. */
1091 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1092 int is_cpu_write_access)
1094 TranslationBlock *tb, *tb_next, *saved_tb;
1095 CPUArchState *env = cpu_single_env;
1096 tb_page_addr_t tb_start, tb_end;
1097 PageDesc *p;
1098 int n;
1099 #ifdef TARGET_HAS_PRECISE_SMC
1100 int current_tb_not_found = is_cpu_write_access;
1101 TranslationBlock *current_tb = NULL;
1102 int current_tb_modified = 0;
1103 target_ulong current_pc = 0;
1104 target_ulong current_cs_base = 0;
1105 int current_flags = 0;
1106 #endif /* TARGET_HAS_PRECISE_SMC */
1108 p = page_find(start >> TARGET_PAGE_BITS);
1109 if (!p)
1110 return;
1111 if (!p->code_bitmap &&
1112 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1113 is_cpu_write_access) {
1114 /* build code bitmap */
1115 build_page_bitmap(p);
1118 /* we remove all the TBs in the range [start, end[ */
1119 /* XXX: see if in some cases it could be faster to invalidate all the code */
1120 tb = p->first_tb;
1121 while (tb != NULL) {
1122 n = (uintptr_t)tb & 3;
1123 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1124 tb_next = tb->page_next[n];
1125 /* NOTE: this is subtle as a TB may span two physical pages */
1126 if (n == 0) {
1127 /* NOTE: tb_end may be after the end of the page, but
1128 it is not a problem */
1129 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1130 tb_end = tb_start + tb->size;
1131 } else {
1132 tb_start = tb->page_addr[1];
1133 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1135 if (!(tb_end <= start || tb_start >= end)) {
1136 #ifdef TARGET_HAS_PRECISE_SMC
1137 if (current_tb_not_found) {
1138 current_tb_not_found = 0;
1139 current_tb = NULL;
1140 if (env->mem_io_pc) {
1141 /* now we have a real cpu fault */
1142 current_tb = tb_find_pc(env->mem_io_pc);
1145 if (current_tb == tb &&
1146 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1147 /* If we are modifying the current TB, we must stop
1148 its execution. We could be more precise by checking
1149 that the modification is after the current PC, but it
1150 would require a specialized function to partially
1151 restore the CPU state */
1153 current_tb_modified = 1;
1154 cpu_restore_state(current_tb, env, env->mem_io_pc);
1155 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1156 &current_flags);
1158 #endif /* TARGET_HAS_PRECISE_SMC */
1159 /* we need to do that to handle the case where a signal
1160 occurs while doing tb_phys_invalidate() */
1161 saved_tb = NULL;
1162 if (env) {
1163 saved_tb = env->current_tb;
1164 env->current_tb = NULL;
1166 tb_phys_invalidate(tb, -1);
1167 if (env) {
1168 env->current_tb = saved_tb;
1169 if (env->interrupt_request && env->current_tb)
1170 cpu_interrupt(env, env->interrupt_request);
1173 tb = tb_next;
1175 #if !defined(CONFIG_USER_ONLY)
1176 /* if no code remaining, no need to continue to use slow writes */
1177 if (!p->first_tb) {
1178 invalidate_page_bitmap(p);
1179 if (is_cpu_write_access) {
1180 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1183 #endif
1184 #ifdef TARGET_HAS_PRECISE_SMC
1185 if (current_tb_modified) {
1186 /* we generate a block containing just the instruction
1187 modifying the memory. It will ensure that it cannot modify
1188 itself */
1189 env->current_tb = NULL;
1190 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1191 cpu_resume_from_signal(env, NULL);
1193 #endif
1196 /* len must be <= 8 and start must be a multiple of len */
1197 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1199 PageDesc *p;
1200 int offset, b;
1201 #if 0
1202 if (1) {
1203 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1204 cpu_single_env->mem_io_vaddr, len,
1205 cpu_single_env->eip,
1206 cpu_single_env->eip +
1207 (intptr_t)cpu_single_env->segs[R_CS].base);
1209 #endif
1210 p = page_find(start >> TARGET_PAGE_BITS);
1211 if (!p)
1212 return;
1213 if (p->code_bitmap) {
1214 offset = start & ~TARGET_PAGE_MASK;
1215 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1216 if (b & ((1 << len) - 1))
1217 goto do_invalidate;
1218 } else {
1219 do_invalidate:
1220 tb_invalidate_phys_page_range(start, start + len, 1);
1224 #if !defined(CONFIG_SOFTMMU)
1225 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1226 uintptr_t pc, void *puc)
1228 TranslationBlock *tb;
1229 PageDesc *p;
1230 int n;
1231 #ifdef TARGET_HAS_PRECISE_SMC
1232 TranslationBlock *current_tb = NULL;
1233 CPUArchState *env = cpu_single_env;
1234 int current_tb_modified = 0;
1235 target_ulong current_pc = 0;
1236 target_ulong current_cs_base = 0;
1237 int current_flags = 0;
1238 #endif
1240 addr &= TARGET_PAGE_MASK;
1241 p = page_find(addr >> TARGET_PAGE_BITS);
1242 if (!p)
1243 return;
1244 tb = p->first_tb;
1245 #ifdef TARGET_HAS_PRECISE_SMC
1246 if (tb && pc != 0) {
1247 current_tb = tb_find_pc(pc);
1249 #endif
1250 while (tb != NULL) {
1251 n = (uintptr_t)tb & 3;
1252 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1253 #ifdef TARGET_HAS_PRECISE_SMC
1254 if (current_tb == tb &&
1255 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1256 /* If we are modifying the current TB, we must stop
1257 its execution. We could be more precise by checking
1258 that the modification is after the current PC, but it
1259 would require a specialized function to partially
1260 restore the CPU state */
1262 current_tb_modified = 1;
1263 cpu_restore_state(current_tb, env, pc);
1264 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1265 &current_flags);
1267 #endif /* TARGET_HAS_PRECISE_SMC */
1268 tb_phys_invalidate(tb, addr);
1269 tb = tb->page_next[n];
1271 p->first_tb = NULL;
1272 #ifdef TARGET_HAS_PRECISE_SMC
1273 if (current_tb_modified) {
1274 /* we generate a block containing just the instruction
1275 modifying the memory. It will ensure that it cannot modify
1276 itself */
1277 env->current_tb = NULL;
1278 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1279 cpu_resume_from_signal(env, puc);
1281 #endif
1283 #endif
1285 /* add the tb in the target page and protect it if necessary */
1286 static inline void tb_alloc_page(TranslationBlock *tb,
1287 unsigned int n, tb_page_addr_t page_addr)
1289 PageDesc *p;
1290 #ifndef CONFIG_USER_ONLY
1291 bool page_already_protected;
1292 #endif
1294 tb->page_addr[n] = page_addr;
1295 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1296 tb->page_next[n] = p->first_tb;
1297 #ifndef CONFIG_USER_ONLY
1298 page_already_protected = p->first_tb != NULL;
1299 #endif
1300 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1301 invalidate_page_bitmap(p);
1303 #if defined(TARGET_HAS_SMC) || 1
1305 #if defined(CONFIG_USER_ONLY)
1306 if (p->flags & PAGE_WRITE) {
1307 target_ulong addr;
1308 PageDesc *p2;
1309 int prot;
1311 /* force the host page as non writable (writes will have a
1312 page fault + mprotect overhead) */
1313 page_addr &= qemu_host_page_mask;
1314 prot = 0;
1315 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1316 addr += TARGET_PAGE_SIZE) {
1318 p2 = page_find (addr >> TARGET_PAGE_BITS);
1319 if (!p2)
1320 continue;
1321 prot |= p2->flags;
1322 p2->flags &= ~PAGE_WRITE;
1324 mprotect(g2h(page_addr), qemu_host_page_size,
1325 (prot & PAGE_BITS) & ~PAGE_WRITE);
1326 #ifdef DEBUG_TB_INVALIDATE
1327 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1328 page_addr);
1329 #endif
1331 #else
1332 /* if some code is already present, then the pages are already
1333 protected. So we handle the case where only the first TB is
1334 allocated in a physical page */
1335 if (!page_already_protected) {
1336 tlb_protect_code(page_addr);
1338 #endif
1340 #endif /* TARGET_HAS_SMC */
1343 /* add a new TB and link it to the physical page tables. phys_page2 is
1344 (-1) to indicate that only one page contains the TB. */
1345 void tb_link_page(TranslationBlock *tb,
1346 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1348 unsigned int h;
1349 TranslationBlock **ptb;
1351 /* Grab the mmap lock to stop another thread invalidating this TB
1352 before we are done. */
1353 mmap_lock();
1354 /* add in the physical hash table */
1355 h = tb_phys_hash_func(phys_pc);
1356 ptb = &tb_phys_hash[h];
1357 tb->phys_hash_next = *ptb;
1358 *ptb = tb;
1360 /* add in the page list */
1361 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1362 if (phys_page2 != -1)
1363 tb_alloc_page(tb, 1, phys_page2);
1364 else
1365 tb->page_addr[1] = -1;
1367 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1368 tb->jmp_next[0] = NULL;
1369 tb->jmp_next[1] = NULL;
1371 /* init original jump addresses */
1372 if (tb->tb_next_offset[0] != 0xffff)
1373 tb_reset_jump(tb, 0);
1374 if (tb->tb_next_offset[1] != 0xffff)
1375 tb_reset_jump(tb, 1);
1377 #ifdef DEBUG_TB_CHECK
1378 tb_page_check();
1379 #endif
1380 mmap_unlock();
1383 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1384 tb[1].tc_ptr. Return NULL if not found */
1385 TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1387 int m_min, m_max, m;
1388 uintptr_t v;
1389 TranslationBlock *tb;
1391 if (nb_tbs <= 0)
1392 return NULL;
1393 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1394 tc_ptr >= (uintptr_t)code_gen_ptr) {
1395 return NULL;
1397 /* binary search (cf Knuth) */
1398 m_min = 0;
1399 m_max = nb_tbs - 1;
1400 while (m_min <= m_max) {
1401 m = (m_min + m_max) >> 1;
1402 tb = &tbs[m];
1403 v = (uintptr_t)tb->tc_ptr;
1404 if (v == tc_ptr)
1405 return tb;
1406 else if (tc_ptr < v) {
1407 m_max = m - 1;
1408 } else {
1409 m_min = m + 1;
1412 return &tbs[m_max];
1415 static void tb_reset_jump_recursive(TranslationBlock *tb);
1417 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1419 TranslationBlock *tb1, *tb_next, **ptb;
1420 unsigned int n1;
1422 tb1 = tb->jmp_next[n];
1423 if (tb1 != NULL) {
1424 /* find head of list */
1425 for(;;) {
1426 n1 = (uintptr_t)tb1 & 3;
1427 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1428 if (n1 == 2)
1429 break;
1430 tb1 = tb1->jmp_next[n1];
1432 /* we are now sure now that tb jumps to tb1 */
1433 tb_next = tb1;
1435 /* remove tb from the jmp_first list */
1436 ptb = &tb_next->jmp_first;
1437 for(;;) {
1438 tb1 = *ptb;
1439 n1 = (uintptr_t)tb1 & 3;
1440 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1441 if (n1 == n && tb1 == tb)
1442 break;
1443 ptb = &tb1->jmp_next[n1];
1445 *ptb = tb->jmp_next[n];
1446 tb->jmp_next[n] = NULL;
1448 /* suppress the jump to next tb in generated code */
1449 tb_reset_jump(tb, n);
1451 /* suppress jumps in the tb on which we could have jumped */
1452 tb_reset_jump_recursive(tb_next);
1456 static void tb_reset_jump_recursive(TranslationBlock *tb)
1458 tb_reset_jump_recursive2(tb, 0);
1459 tb_reset_jump_recursive2(tb, 1);
1462 #if defined(TARGET_HAS_ICE)
1463 #if defined(CONFIG_USER_ONLY)
1464 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1466 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1468 #else
1469 void tb_invalidate_phys_addr(target_phys_addr_t addr)
1471 ram_addr_t ram_addr;
1472 MemoryRegionSection *section;
1474 section = phys_page_find(addr >> TARGET_PAGE_BITS);
1475 if (!(memory_region_is_ram(section->mr)
1476 || (section->mr->rom_device && section->mr->readable))) {
1477 return;
1479 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1480 + memory_region_section_addr(section, addr);
1481 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1484 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1486 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc));
1488 #endif
1489 #endif /* TARGET_HAS_ICE */
1491 #if defined(CONFIG_USER_ONLY)
1492 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1497 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1498 int flags, CPUWatchpoint **watchpoint)
1500 return -ENOSYS;
1502 #else
1503 /* Add a watchpoint. */
1504 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1505 int flags, CPUWatchpoint **watchpoint)
1507 target_ulong len_mask = ~(len - 1);
1508 CPUWatchpoint *wp;
1510 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1511 if ((len & (len - 1)) || (addr & ~len_mask) ||
1512 len == 0 || len > TARGET_PAGE_SIZE) {
1513 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1514 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1515 return -EINVAL;
1517 wp = g_malloc(sizeof(*wp));
1519 wp->vaddr = addr;
1520 wp->len_mask = len_mask;
1521 wp->flags = flags;
1523 /* keep all GDB-injected watchpoints in front */
1524 if (flags & BP_GDB)
1525 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1526 else
1527 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1529 tlb_flush_page(env, addr);
1531 if (watchpoint)
1532 *watchpoint = wp;
1533 return 0;
1536 /* Remove a specific watchpoint. */
1537 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
1538 int flags)
1540 target_ulong len_mask = ~(len - 1);
1541 CPUWatchpoint *wp;
1543 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1544 if (addr == wp->vaddr && len_mask == wp->len_mask
1545 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1546 cpu_watchpoint_remove_by_ref(env, wp);
1547 return 0;
1550 return -ENOENT;
1553 /* Remove a specific watchpoint by reference. */
1554 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
1556 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1558 tlb_flush_page(env, watchpoint->vaddr);
1560 g_free(watchpoint);
1563 /* Remove all matching watchpoints. */
1564 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1566 CPUWatchpoint *wp, *next;
1568 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1569 if (wp->flags & mask)
1570 cpu_watchpoint_remove_by_ref(env, wp);
1573 #endif
1575 /* Add a breakpoint. */
1576 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
1577 CPUBreakpoint **breakpoint)
1579 #if defined(TARGET_HAS_ICE)
1580 CPUBreakpoint *bp;
1582 bp = g_malloc(sizeof(*bp));
1584 bp->pc = pc;
1585 bp->flags = flags;
1587 /* keep all GDB-injected breakpoints in front */
1588 if (flags & BP_GDB)
1589 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1590 else
1591 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1593 breakpoint_invalidate(env, pc);
1595 if (breakpoint)
1596 *breakpoint = bp;
1597 return 0;
1598 #else
1599 return -ENOSYS;
1600 #endif
1603 /* Remove a specific breakpoint. */
1604 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
1606 #if defined(TARGET_HAS_ICE)
1607 CPUBreakpoint *bp;
1609 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1610 if (bp->pc == pc && bp->flags == flags) {
1611 cpu_breakpoint_remove_by_ref(env, bp);
1612 return 0;
1615 return -ENOENT;
1616 #else
1617 return -ENOSYS;
1618 #endif
1621 /* Remove a specific breakpoint by reference. */
1622 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
1624 #if defined(TARGET_HAS_ICE)
1625 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1627 breakpoint_invalidate(env, breakpoint->pc);
1629 g_free(breakpoint);
1630 #endif
1633 /* Remove all matching breakpoints. */
1634 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
1636 #if defined(TARGET_HAS_ICE)
1637 CPUBreakpoint *bp, *next;
1639 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1640 if (bp->flags & mask)
1641 cpu_breakpoint_remove_by_ref(env, bp);
1643 #endif
1646 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1647 CPU loop after each instruction */
1648 void cpu_single_step(CPUArchState *env, int enabled)
1650 #if defined(TARGET_HAS_ICE)
1651 if (env->singlestep_enabled != enabled) {
1652 env->singlestep_enabled = enabled;
1653 if (kvm_enabled())
1654 kvm_update_guest_debug(env, 0);
1655 else {
1656 /* must flush all the translated code to avoid inconsistencies */
1657 /* XXX: only flush what is necessary */
1658 tb_flush(env);
1661 #endif
1664 /* enable or disable low levels log */
1665 void cpu_set_log(int log_flags)
1667 loglevel = log_flags;
1668 if (loglevel && !logfile) {
1669 logfile = fopen(logfilename, log_append ? "a" : "w");
1670 if (!logfile) {
1671 perror(logfilename);
1672 _exit(1);
1674 #if !defined(CONFIG_SOFTMMU)
1675 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1677 static char logfile_buf[4096];
1678 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1680 #elif defined(_WIN32)
1681 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1682 setvbuf(logfile, NULL, _IONBF, 0);
1683 #else
1684 setvbuf(logfile, NULL, _IOLBF, 0);
1685 #endif
1686 log_append = 1;
1688 if (!loglevel && logfile) {
1689 fclose(logfile);
1690 logfile = NULL;
1694 void cpu_set_log_filename(const char *filename)
1696 logfilename = strdup(filename);
1697 if (logfile) {
1698 fclose(logfile);
1699 logfile = NULL;
1701 cpu_set_log(loglevel);
1704 static void cpu_unlink_tb(CPUArchState *env)
1706 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1707 problem and hope the cpu will stop of its own accord. For userspace
1708 emulation this often isn't actually as bad as it sounds. Often
1709 signals are used primarily to interrupt blocking syscalls. */
1710 TranslationBlock *tb;
1711 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1713 spin_lock(&interrupt_lock);
1714 tb = env->current_tb;
1715 /* if the cpu is currently executing code, we must unlink it and
1716 all the potentially executing TB */
1717 if (tb) {
1718 env->current_tb = NULL;
1719 tb_reset_jump_recursive(tb);
1721 spin_unlock(&interrupt_lock);
1724 #ifndef CONFIG_USER_ONLY
1725 /* mask must never be zero, except for A20 change call */
1726 static void tcg_handle_interrupt(CPUArchState *env, int mask)
1728 int old_mask;
1730 old_mask = env->interrupt_request;
1731 env->interrupt_request |= mask;
1734 * If called from iothread context, wake the target cpu in
1735 * case its halted.
1737 if (!qemu_cpu_is_self(env)) {
1738 qemu_cpu_kick(env);
1739 return;
1742 if (use_icount) {
1743 env->icount_decr.u16.high = 0xffff;
1744 if (!can_do_io(env)
1745 && (mask & ~old_mask) != 0) {
1746 cpu_abort(env, "Raised interrupt while not in I/O function");
1748 } else {
1749 cpu_unlink_tb(env);
1753 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1755 #else /* CONFIG_USER_ONLY */
1757 void cpu_interrupt(CPUArchState *env, int mask)
1759 env->interrupt_request |= mask;
1760 cpu_unlink_tb(env);
1762 #endif /* CONFIG_USER_ONLY */
1764 void cpu_reset_interrupt(CPUArchState *env, int mask)
1766 env->interrupt_request &= ~mask;
1769 void cpu_exit(CPUArchState *env)
1771 env->exit_request = 1;
1772 cpu_unlink_tb(env);
1775 const CPULogItem cpu_log_items[] = {
1776 { CPU_LOG_TB_OUT_ASM, "out_asm",
1777 "show generated host assembly code for each compiled TB" },
1778 { CPU_LOG_TB_IN_ASM, "in_asm",
1779 "show target assembly code for each compiled TB" },
1780 { CPU_LOG_TB_OP, "op",
1781 "show micro ops for each compiled TB" },
1782 { CPU_LOG_TB_OP_OPT, "op_opt",
1783 "show micro ops "
1784 #ifdef TARGET_I386
1785 "before eflags optimization and "
1786 #endif
1787 "after liveness analysis" },
1788 { CPU_LOG_INT, "int",
1789 "show interrupts/exceptions in short format" },
1790 { CPU_LOG_EXEC, "exec",
1791 "show trace before each executed TB (lots of logs)" },
1792 { CPU_LOG_TB_CPU, "cpu",
1793 "show CPU state before block translation" },
1794 #ifdef TARGET_I386
1795 { CPU_LOG_PCALL, "pcall",
1796 "show protected mode far calls/returns/exceptions" },
1797 { CPU_LOG_RESET, "cpu_reset",
1798 "show CPU state before CPU resets" },
1799 #endif
1800 #ifdef DEBUG_IOPORT
1801 { CPU_LOG_IOPORT, "ioport",
1802 "show all i/o ports accesses" },
1803 #endif
1804 { 0, NULL, NULL },
1807 static int cmp1(const char *s1, int n, const char *s2)
1809 if (strlen(s2) != n)
1810 return 0;
1811 return memcmp(s1, s2, n) == 0;
1814 /* takes a comma separated list of log masks. Return 0 if error. */
1815 int cpu_str_to_log_mask(const char *str)
1817 const CPULogItem *item;
1818 int mask;
1819 const char *p, *p1;
1821 p = str;
1822 mask = 0;
1823 for(;;) {
1824 p1 = strchr(p, ',');
1825 if (!p1)
1826 p1 = p + strlen(p);
1827 if(cmp1(p,p1-p,"all")) {
1828 for(item = cpu_log_items; item->mask != 0; item++) {
1829 mask |= item->mask;
1831 } else {
1832 for(item = cpu_log_items; item->mask != 0; item++) {
1833 if (cmp1(p, p1 - p, item->name))
1834 goto found;
1836 return 0;
1838 found:
1839 mask |= item->mask;
1840 if (*p1 != ',')
1841 break;
1842 p = p1 + 1;
1844 return mask;
1847 void cpu_abort(CPUArchState *env, const char *fmt, ...)
1849 va_list ap;
1850 va_list ap2;
1852 va_start(ap, fmt);
1853 va_copy(ap2, ap);
1854 fprintf(stderr, "qemu: fatal: ");
1855 vfprintf(stderr, fmt, ap);
1856 fprintf(stderr, "\n");
1857 #ifdef TARGET_I386
1858 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1859 #else
1860 cpu_dump_state(env, stderr, fprintf, 0);
1861 #endif
1862 if (qemu_log_enabled()) {
1863 qemu_log("qemu: fatal: ");
1864 qemu_log_vprintf(fmt, ap2);
1865 qemu_log("\n");
1866 #ifdef TARGET_I386
1867 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1868 #else
1869 log_cpu_state(env, 0);
1870 #endif
1871 qemu_log_flush();
1872 qemu_log_close();
1874 va_end(ap2);
1875 va_end(ap);
1876 #if defined(CONFIG_USER_ONLY)
1878 struct sigaction act;
1879 sigfillset(&act.sa_mask);
1880 act.sa_handler = SIG_DFL;
1881 sigaction(SIGABRT, &act, NULL);
1883 #endif
1884 abort();
1887 CPUArchState *cpu_copy(CPUArchState *env)
1889 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1890 CPUArchState *next_cpu = new_env->next_cpu;
1891 int cpu_index = new_env->cpu_index;
1892 #if defined(TARGET_HAS_ICE)
1893 CPUBreakpoint *bp;
1894 CPUWatchpoint *wp;
1895 #endif
1897 memcpy(new_env, env, sizeof(CPUArchState));
1899 /* Preserve chaining and index. */
1900 new_env->next_cpu = next_cpu;
1901 new_env->cpu_index = cpu_index;
1903 /* Clone all break/watchpoints.
1904 Note: Once we support ptrace with hw-debug register access, make sure
1905 BP_CPU break/watchpoints are handled correctly on clone. */
1906 QTAILQ_INIT(&env->breakpoints);
1907 QTAILQ_INIT(&env->watchpoints);
1908 #if defined(TARGET_HAS_ICE)
1909 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1910 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1912 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1913 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1914 wp->flags, NULL);
1916 #endif
1918 return new_env;
1921 #if !defined(CONFIG_USER_ONLY)
1922 void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1924 unsigned int i;
1926 /* Discard jump cache entries for any tb which might potentially
1927 overlap the flushed page. */
1928 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1929 memset (&env->tb_jmp_cache[i], 0,
1930 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1932 i = tb_jmp_cache_hash_page(addr);
1933 memset (&env->tb_jmp_cache[i], 0,
1934 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1937 /* Note: start and end must be within the same ram block. */
1938 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1939 int dirty_flags)
1941 uintptr_t length, start1;
1943 start &= TARGET_PAGE_MASK;
1944 end = TARGET_PAGE_ALIGN(end);
1946 length = end - start;
1947 if (length == 0)
1948 return;
1949 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1951 /* we modify the TLB cache so that the dirty bit will be set again
1952 when accessing the range */
1953 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
1954 /* Check that we don't span multiple blocks - this breaks the
1955 address comparisons below. */
1956 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
1957 != (end - 1) - start) {
1958 abort();
1960 cpu_tlb_reset_dirty_all(start1, length);
1963 int cpu_physical_memory_set_dirty_tracking(int enable)
1965 int ret = 0;
1966 in_migration = enable;
1967 return ret;
1970 target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1971 MemoryRegionSection *section,
1972 target_ulong vaddr,
1973 target_phys_addr_t paddr,
1974 int prot,
1975 target_ulong *address)
1977 target_phys_addr_t iotlb;
1978 CPUWatchpoint *wp;
1980 if (memory_region_is_ram(section->mr)) {
1981 /* Normal RAM. */
1982 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1983 + memory_region_section_addr(section, paddr);
1984 if (!section->readonly) {
1985 iotlb |= phys_section_notdirty;
1986 } else {
1987 iotlb |= phys_section_rom;
1989 } else {
1990 /* IO handlers are currently passed a physical address.
1991 It would be nice to pass an offset from the base address
1992 of that region. This would avoid having to special case RAM,
1993 and avoid full address decoding in every device.
1994 We can't use the high bits of pd for this because
1995 IO_MEM_ROMD uses these as a ram address. */
1996 iotlb = section - phys_sections;
1997 iotlb += memory_region_section_addr(section, paddr);
2000 /* Make accesses to pages with watchpoints go via the
2001 watchpoint trap routines. */
2002 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2003 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2004 /* Avoid trapping reads of pages with a write breakpoint. */
2005 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2006 iotlb = phys_section_watch + paddr;
2007 *address |= TLB_MMIO;
2008 break;
2013 return iotlb;
2016 #else
2018 * Walks guest process memory "regions" one by one
2019 * and calls callback function 'fn' for each region.
2022 struct walk_memory_regions_data
2024 walk_memory_regions_fn fn;
2025 void *priv;
2026 uintptr_t start;
2027 int prot;
2030 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2031 abi_ulong end, int new_prot)
2033 if (data->start != -1ul) {
2034 int rc = data->fn(data->priv, data->start, end, data->prot);
2035 if (rc != 0) {
2036 return rc;
2040 data->start = (new_prot ? end : -1ul);
2041 data->prot = new_prot;
2043 return 0;
2046 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2047 abi_ulong base, int level, void **lp)
2049 abi_ulong pa;
2050 int i, rc;
2052 if (*lp == NULL) {
2053 return walk_memory_regions_end(data, base, 0);
2056 if (level == 0) {
2057 PageDesc *pd = *lp;
2058 for (i = 0; i < L2_SIZE; ++i) {
2059 int prot = pd[i].flags;
2061 pa = base | (i << TARGET_PAGE_BITS);
2062 if (prot != data->prot) {
2063 rc = walk_memory_regions_end(data, pa, prot);
2064 if (rc != 0) {
2065 return rc;
2069 } else {
2070 void **pp = *lp;
2071 for (i = 0; i < L2_SIZE; ++i) {
2072 pa = base | ((abi_ulong)i <<
2073 (TARGET_PAGE_BITS + L2_BITS * level));
2074 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2075 if (rc != 0) {
2076 return rc;
2081 return 0;
2084 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2086 struct walk_memory_regions_data data;
2087 uintptr_t i;
2089 data.fn = fn;
2090 data.priv = priv;
2091 data.start = -1ul;
2092 data.prot = 0;
2094 for (i = 0; i < V_L1_SIZE; i++) {
2095 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2096 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2097 if (rc != 0) {
2098 return rc;
2102 return walk_memory_regions_end(&data, 0, 0);
2105 static int dump_region(void *priv, abi_ulong start,
2106 abi_ulong end, unsigned long prot)
2108 FILE *f = (FILE *)priv;
2110 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2111 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2112 start, end, end - start,
2113 ((prot & PAGE_READ) ? 'r' : '-'),
2114 ((prot & PAGE_WRITE) ? 'w' : '-'),
2115 ((prot & PAGE_EXEC) ? 'x' : '-'));
2117 return (0);
2120 /* dump memory mappings */
2121 void page_dump(FILE *f)
2123 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2124 "start", "end", "size", "prot");
2125 walk_memory_regions(f, dump_region);
2128 int page_get_flags(target_ulong address)
2130 PageDesc *p;
2132 p = page_find(address >> TARGET_PAGE_BITS);
2133 if (!p)
2134 return 0;
2135 return p->flags;
2138 /* Modify the flags of a page and invalidate the code if necessary.
2139 The flag PAGE_WRITE_ORG is positioned automatically depending
2140 on PAGE_WRITE. The mmap_lock should already be held. */
2141 void page_set_flags(target_ulong start, target_ulong end, int flags)
2143 target_ulong addr, len;
2145 /* This function should never be called with addresses outside the
2146 guest address space. If this assert fires, it probably indicates
2147 a missing call to h2g_valid. */
2148 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2149 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2150 #endif
2151 assert(start < end);
2153 start = start & TARGET_PAGE_MASK;
2154 end = TARGET_PAGE_ALIGN(end);
2156 if (flags & PAGE_WRITE) {
2157 flags |= PAGE_WRITE_ORG;
2160 for (addr = start, len = end - start;
2161 len != 0;
2162 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2163 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2165 /* If the write protection bit is set, then we invalidate
2166 the code inside. */
2167 if (!(p->flags & PAGE_WRITE) &&
2168 (flags & PAGE_WRITE) &&
2169 p->first_tb) {
2170 tb_invalidate_phys_page(addr, 0, NULL);
2172 p->flags = flags;
2176 int page_check_range(target_ulong start, target_ulong len, int flags)
2178 PageDesc *p;
2179 target_ulong end;
2180 target_ulong addr;
2182 /* This function should never be called with addresses outside the
2183 guest address space. If this assert fires, it probably indicates
2184 a missing call to h2g_valid. */
2185 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2186 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2187 #endif
2189 if (len == 0) {
2190 return 0;
2192 if (start + len - 1 < start) {
2193 /* We've wrapped around. */
2194 return -1;
2197 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2198 start = start & TARGET_PAGE_MASK;
2200 for (addr = start, len = end - start;
2201 len != 0;
2202 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2203 p = page_find(addr >> TARGET_PAGE_BITS);
2204 if( !p )
2205 return -1;
2206 if( !(p->flags & PAGE_VALID) )
2207 return -1;
2209 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2210 return -1;
2211 if (flags & PAGE_WRITE) {
2212 if (!(p->flags & PAGE_WRITE_ORG))
2213 return -1;
2214 /* unprotect the page if it was put read-only because it
2215 contains translated code */
2216 if (!(p->flags & PAGE_WRITE)) {
2217 if (!page_unprotect(addr, 0, NULL))
2218 return -1;
2220 return 0;
2223 return 0;
2226 /* called from signal handler: invalidate the code and unprotect the
2227 page. Return TRUE if the fault was successfully handled. */
2228 int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
2230 unsigned int prot;
2231 PageDesc *p;
2232 target_ulong host_start, host_end, addr;
2234 /* Technically this isn't safe inside a signal handler. However we
2235 know this only ever happens in a synchronous SEGV handler, so in
2236 practice it seems to be ok. */
2237 mmap_lock();
2239 p = page_find(address >> TARGET_PAGE_BITS);
2240 if (!p) {
2241 mmap_unlock();
2242 return 0;
2245 /* if the page was really writable, then we change its
2246 protection back to writable */
2247 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2248 host_start = address & qemu_host_page_mask;
2249 host_end = host_start + qemu_host_page_size;
2251 prot = 0;
2252 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2253 p = page_find(addr >> TARGET_PAGE_BITS);
2254 p->flags |= PAGE_WRITE;
2255 prot |= p->flags;
2257 /* and since the content will be modified, we must invalidate
2258 the corresponding translated code. */
2259 tb_invalidate_phys_page(addr, pc, puc);
2260 #ifdef DEBUG_TB_CHECK
2261 tb_invalidate_check(addr);
2262 #endif
2264 mprotect((void *)g2h(host_start), qemu_host_page_size,
2265 prot & PAGE_BITS);
2267 mmap_unlock();
2268 return 1;
2270 mmap_unlock();
2271 return 0;
2273 #endif /* defined(CONFIG_USER_ONLY) */
2275 #if !defined(CONFIG_USER_ONLY)
2277 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2278 typedef struct subpage_t {
2279 MemoryRegion iomem;
2280 target_phys_addr_t base;
2281 uint16_t sub_section[TARGET_PAGE_SIZE];
2282 } subpage_t;
2284 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2285 uint16_t section);
2286 static subpage_t *subpage_init(target_phys_addr_t base);
2287 static void destroy_page_desc(uint16_t section_index)
2289 MemoryRegionSection *section = &phys_sections[section_index];
2290 MemoryRegion *mr = section->mr;
2292 if (mr->subpage) {
2293 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2294 memory_region_destroy(&subpage->iomem);
2295 g_free(subpage);
2299 static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
2301 unsigned i;
2302 PhysPageEntry *p;
2304 if (lp->ptr == PHYS_MAP_NODE_NIL) {
2305 return;
2308 p = phys_map_nodes[lp->ptr];
2309 for (i = 0; i < L2_SIZE; ++i) {
2310 if (!p[i].is_leaf) {
2311 destroy_l2_mapping(&p[i], level - 1);
2312 } else {
2313 destroy_page_desc(p[i].ptr);
2316 lp->is_leaf = 0;
2317 lp->ptr = PHYS_MAP_NODE_NIL;
2320 static void destroy_all_mappings(void)
2322 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
2323 phys_map_nodes_reset();
2326 static uint16_t phys_section_add(MemoryRegionSection *section)
2328 if (phys_sections_nb == phys_sections_nb_alloc) {
2329 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2330 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2331 phys_sections_nb_alloc);
2333 phys_sections[phys_sections_nb] = *section;
2334 return phys_sections_nb++;
2337 static void phys_sections_clear(void)
2339 phys_sections_nb = 0;
2342 /* register physical memory.
2343 For RAM, 'size' must be a multiple of the target page size.
2344 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2345 io memory page. The address used when calling the IO function is
2346 the offset from the start of the region, plus region_offset. Both
2347 start_addr and region_offset are rounded down to a page boundary
2348 before calculating this offset. This should not be a problem unless
2349 the low bits of start_addr and region_offset differ. */
2350 static void register_subpage(MemoryRegionSection *section)
2352 subpage_t *subpage;
2353 target_phys_addr_t base = section->offset_within_address_space
2354 & TARGET_PAGE_MASK;
2355 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
2356 MemoryRegionSection subsection = {
2357 .offset_within_address_space = base,
2358 .size = TARGET_PAGE_SIZE,
2360 target_phys_addr_t start, end;
2362 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
2364 if (!(existing->mr->subpage)) {
2365 subpage = subpage_init(base);
2366 subsection.mr = &subpage->iomem;
2367 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2368 phys_section_add(&subsection));
2369 } else {
2370 subpage = container_of(existing->mr, subpage_t, iomem);
2372 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2373 end = start + section->size;
2374 subpage_register(subpage, start, end, phys_section_add(section));
2378 static void register_multipage(MemoryRegionSection *section)
2380 target_phys_addr_t start_addr = section->offset_within_address_space;
2381 ram_addr_t size = section->size;
2382 target_phys_addr_t addr;
2383 uint16_t section_index = phys_section_add(section);
2385 assert(size);
2387 addr = start_addr;
2388 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2389 section_index);
2392 void cpu_register_physical_memory_log(MemoryRegionSection *section,
2393 bool readonly)
2395 MemoryRegionSection now = *section, remain = *section;
2397 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2398 || (now.size < TARGET_PAGE_SIZE)) {
2399 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2400 - now.offset_within_address_space,
2401 now.size);
2402 register_subpage(&now);
2403 remain.size -= now.size;
2404 remain.offset_within_address_space += now.size;
2405 remain.offset_within_region += now.size;
2407 now = remain;
2408 now.size &= TARGET_PAGE_MASK;
2409 if (now.size) {
2410 register_multipage(&now);
2411 remain.size -= now.size;
2412 remain.offset_within_address_space += now.size;
2413 remain.offset_within_region += now.size;
2415 now = remain;
2416 if (now.size) {
2417 register_subpage(&now);
2422 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2424 if (kvm_enabled())
2425 kvm_coalesce_mmio_region(addr, size);
2428 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2430 if (kvm_enabled())
2431 kvm_uncoalesce_mmio_region(addr, size);
2434 void qemu_flush_coalesced_mmio_buffer(void)
2436 if (kvm_enabled())
2437 kvm_flush_coalesced_mmio_buffer();
2440 #if defined(__linux__) && !defined(TARGET_S390X)
2442 #include <sys/vfs.h>
2444 #define HUGETLBFS_MAGIC 0x958458f6
2446 static long gethugepagesize(const char *path)
2448 struct statfs fs;
2449 int ret;
2451 do {
2452 ret = statfs(path, &fs);
2453 } while (ret != 0 && errno == EINTR);
2455 if (ret != 0) {
2456 perror(path);
2457 return 0;
2460 if (fs.f_type != HUGETLBFS_MAGIC)
2461 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2463 return fs.f_bsize;
2466 static void *file_ram_alloc(RAMBlock *block,
2467 ram_addr_t memory,
2468 const char *path)
2470 char *filename;
2471 void *area;
2472 int fd;
2473 #ifdef MAP_POPULATE
2474 int flags;
2475 #endif
2476 unsigned long hpagesize;
2478 hpagesize = gethugepagesize(path);
2479 if (!hpagesize) {
2480 return NULL;
2483 if (memory < hpagesize) {
2484 return NULL;
2487 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2488 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2489 return NULL;
2492 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2493 return NULL;
2496 fd = mkstemp(filename);
2497 if (fd < 0) {
2498 perror("unable to create backing store for hugepages");
2499 free(filename);
2500 return NULL;
2502 unlink(filename);
2503 free(filename);
2505 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2508 * ftruncate is not supported by hugetlbfs in older
2509 * hosts, so don't bother bailing out on errors.
2510 * If anything goes wrong with it under other filesystems,
2511 * mmap will fail.
2513 if (ftruncate(fd, memory))
2514 perror("ftruncate");
2516 #ifdef MAP_POPULATE
2517 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2518 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2519 * to sidestep this quirk.
2521 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2522 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2523 #else
2524 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2525 #endif
2526 if (area == MAP_FAILED) {
2527 perror("file_ram_alloc: can't mmap RAM pages");
2528 close(fd);
2529 return (NULL);
2531 block->fd = fd;
2532 return area;
2534 #endif
2536 static ram_addr_t find_ram_offset(ram_addr_t size)
2538 RAMBlock *block, *next_block;
2539 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2541 if (QLIST_EMPTY(&ram_list.blocks))
2542 return 0;
2544 QLIST_FOREACH(block, &ram_list.blocks, next) {
2545 ram_addr_t end, next = RAM_ADDR_MAX;
2547 end = block->offset + block->length;
2549 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2550 if (next_block->offset >= end) {
2551 next = MIN(next, next_block->offset);
2554 if (next - end >= size && next - end < mingap) {
2555 offset = end;
2556 mingap = next - end;
2560 if (offset == RAM_ADDR_MAX) {
2561 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2562 (uint64_t)size);
2563 abort();
2566 return offset;
2569 static ram_addr_t last_ram_offset(void)
2571 RAMBlock *block;
2572 ram_addr_t last = 0;
2574 QLIST_FOREACH(block, &ram_list.blocks, next)
2575 last = MAX(last, block->offset + block->length);
2577 return last;
2580 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2582 RAMBlock *new_block, *block;
2584 new_block = NULL;
2585 QLIST_FOREACH(block, &ram_list.blocks, next) {
2586 if (block->offset == addr) {
2587 new_block = block;
2588 break;
2591 assert(new_block);
2592 assert(!new_block->idstr[0]);
2594 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2595 char *id = dev->parent_bus->info->get_dev_path(dev);
2596 if (id) {
2597 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2598 g_free(id);
2601 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2603 QLIST_FOREACH(block, &ram_list.blocks, next) {
2604 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2605 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2606 new_block->idstr);
2607 abort();
2612 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2613 MemoryRegion *mr)
2615 RAMBlock *new_block;
2617 size = TARGET_PAGE_ALIGN(size);
2618 new_block = g_malloc0(sizeof(*new_block));
2620 new_block->mr = mr;
2621 new_block->offset = find_ram_offset(size);
2622 if (host) {
2623 new_block->host = host;
2624 new_block->flags |= RAM_PREALLOC_MASK;
2625 } else {
2626 if (mem_path) {
2627 #if defined (__linux__) && !defined(TARGET_S390X)
2628 new_block->host = file_ram_alloc(new_block, size, mem_path);
2629 if (!new_block->host) {
2630 new_block->host = qemu_vmalloc(size);
2631 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2633 #else
2634 fprintf(stderr, "-mem-path option unsupported\n");
2635 exit(1);
2636 #endif
2637 } else {
2638 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2639 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2640 an system defined value, which is at least 256GB. Larger systems
2641 have larger values. We put the guest between the end of data
2642 segment (system break) and this value. We use 32GB as a base to
2643 have enough room for the system break to grow. */
2644 new_block->host = mmap((void*)0x800000000, size,
2645 PROT_EXEC|PROT_READ|PROT_WRITE,
2646 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2647 if (new_block->host == MAP_FAILED) {
2648 fprintf(stderr, "Allocating RAM failed\n");
2649 abort();
2651 #else
2652 if (xen_enabled()) {
2653 xen_ram_alloc(new_block->offset, size, mr);
2654 } else {
2655 new_block->host = qemu_vmalloc(size);
2657 #endif
2658 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2661 new_block->length = size;
2663 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2665 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2666 last_ram_offset() >> TARGET_PAGE_BITS);
2667 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2668 0xff, size >> TARGET_PAGE_BITS);
2670 if (kvm_enabled())
2671 kvm_setup_guest_memory(new_block->host, size);
2673 return new_block->offset;
2676 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2678 return qemu_ram_alloc_from_ptr(size, NULL, mr);
2681 void qemu_ram_free_from_ptr(ram_addr_t addr)
2683 RAMBlock *block;
2685 QLIST_FOREACH(block, &ram_list.blocks, next) {
2686 if (addr == block->offset) {
2687 QLIST_REMOVE(block, next);
2688 g_free(block);
2689 return;
2694 void qemu_ram_free(ram_addr_t addr)
2696 RAMBlock *block;
2698 QLIST_FOREACH(block, &ram_list.blocks, next) {
2699 if (addr == block->offset) {
2700 QLIST_REMOVE(block, next);
2701 if (block->flags & RAM_PREALLOC_MASK) {
2703 } else if (mem_path) {
2704 #if defined (__linux__) && !defined(TARGET_S390X)
2705 if (block->fd) {
2706 munmap(block->host, block->length);
2707 close(block->fd);
2708 } else {
2709 qemu_vfree(block->host);
2711 #else
2712 abort();
2713 #endif
2714 } else {
2715 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2716 munmap(block->host, block->length);
2717 #else
2718 if (xen_enabled()) {
2719 xen_invalidate_map_cache_entry(block->host);
2720 } else {
2721 qemu_vfree(block->host);
2723 #endif
2725 g_free(block);
2726 return;
2732 #ifndef _WIN32
2733 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2735 RAMBlock *block;
2736 ram_addr_t offset;
2737 int flags;
2738 void *area, *vaddr;
2740 QLIST_FOREACH(block, &ram_list.blocks, next) {
2741 offset = addr - block->offset;
2742 if (offset < block->length) {
2743 vaddr = block->host + offset;
2744 if (block->flags & RAM_PREALLOC_MASK) {
2746 } else {
2747 flags = MAP_FIXED;
2748 munmap(vaddr, length);
2749 if (mem_path) {
2750 #if defined(__linux__) && !defined(TARGET_S390X)
2751 if (block->fd) {
2752 #ifdef MAP_POPULATE
2753 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2754 MAP_PRIVATE;
2755 #else
2756 flags |= MAP_PRIVATE;
2757 #endif
2758 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2759 flags, block->fd, offset);
2760 } else {
2761 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2762 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2763 flags, -1, 0);
2765 #else
2766 abort();
2767 #endif
2768 } else {
2769 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2770 flags |= MAP_SHARED | MAP_ANONYMOUS;
2771 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2772 flags, -1, 0);
2773 #else
2774 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2775 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2776 flags, -1, 0);
2777 #endif
2779 if (area != vaddr) {
2780 fprintf(stderr, "Could not remap addr: "
2781 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2782 length, addr);
2783 exit(1);
2785 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2787 return;
2791 #endif /* !_WIN32 */
2793 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2794 With the exception of the softmmu code in this file, this should
2795 only be used for local memory (e.g. video ram) that the device owns,
2796 and knows it isn't going to access beyond the end of the block.
2798 It should not be used for general purpose DMA.
2799 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2801 void *qemu_get_ram_ptr(ram_addr_t addr)
2803 RAMBlock *block;
2805 QLIST_FOREACH(block, &ram_list.blocks, next) {
2806 if (addr - block->offset < block->length) {
2807 /* Move this entry to to start of the list. */
2808 if (block != QLIST_FIRST(&ram_list.blocks)) {
2809 QLIST_REMOVE(block, next);
2810 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2812 if (xen_enabled()) {
2813 /* We need to check if the requested address is in the RAM
2814 * because we don't want to map the entire memory in QEMU.
2815 * In that case just map until the end of the page.
2817 if (block->offset == 0) {
2818 return xen_map_cache(addr, 0, 0);
2819 } else if (block->host == NULL) {
2820 block->host =
2821 xen_map_cache(block->offset, block->length, 1);
2824 return block->host + (addr - block->offset);
2828 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2829 abort();
2831 return NULL;
2834 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2835 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2837 void *qemu_safe_ram_ptr(ram_addr_t addr)
2839 RAMBlock *block;
2841 QLIST_FOREACH(block, &ram_list.blocks, next) {
2842 if (addr - block->offset < block->length) {
2843 if (xen_enabled()) {
2844 /* We need to check if the requested address is in the RAM
2845 * because we don't want to map the entire memory in QEMU.
2846 * In that case just map until the end of the page.
2848 if (block->offset == 0) {
2849 return xen_map_cache(addr, 0, 0);
2850 } else if (block->host == NULL) {
2851 block->host =
2852 xen_map_cache(block->offset, block->length, 1);
2855 return block->host + (addr - block->offset);
2859 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2860 abort();
2862 return NULL;
2865 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2866 * but takes a size argument */
2867 void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
2869 if (*size == 0) {
2870 return NULL;
2872 if (xen_enabled()) {
2873 return xen_map_cache(addr, *size, 1);
2874 } else {
2875 RAMBlock *block;
2877 QLIST_FOREACH(block, &ram_list.blocks, next) {
2878 if (addr - block->offset < block->length) {
2879 if (addr - block->offset + *size > block->length)
2880 *size = block->length - addr + block->offset;
2881 return block->host + (addr - block->offset);
2885 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2886 abort();
2890 void qemu_put_ram_ptr(void *addr)
2892 trace_qemu_put_ram_ptr(addr);
2895 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2897 RAMBlock *block;
2898 uint8_t *host = ptr;
2900 if (xen_enabled()) {
2901 *ram_addr = xen_ram_addr_from_mapcache(ptr);
2902 return 0;
2905 QLIST_FOREACH(block, &ram_list.blocks, next) {
2906 /* This case append when the block is not mapped. */
2907 if (block->host == NULL) {
2908 continue;
2910 if (host - block->host < block->length) {
2911 *ram_addr = block->offset + (host - block->host);
2912 return 0;
2916 return -1;
2919 /* Some of the softmmu routines need to translate from a host pointer
2920 (typically a TLB entry) back to a ram offset. */
2921 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2923 ram_addr_t ram_addr;
2925 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2926 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2927 abort();
2929 return ram_addr;
2932 static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2933 unsigned size)
2935 #ifdef DEBUG_UNASSIGNED
2936 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2937 #endif
2938 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2939 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
2940 #endif
2941 return 0;
2944 static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2945 uint64_t val, unsigned size)
2947 #ifdef DEBUG_UNASSIGNED
2948 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
2949 #endif
2950 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2951 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
2952 #endif
2955 static const MemoryRegionOps unassigned_mem_ops = {
2956 .read = unassigned_mem_read,
2957 .write = unassigned_mem_write,
2958 .endianness = DEVICE_NATIVE_ENDIAN,
2961 static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2962 unsigned size)
2964 abort();
2967 static void error_mem_write(void *opaque, target_phys_addr_t addr,
2968 uint64_t value, unsigned size)
2970 abort();
2973 static const MemoryRegionOps error_mem_ops = {
2974 .read = error_mem_read,
2975 .write = error_mem_write,
2976 .endianness = DEVICE_NATIVE_ENDIAN,
2979 static const MemoryRegionOps rom_mem_ops = {
2980 .read = error_mem_read,
2981 .write = unassigned_mem_write,
2982 .endianness = DEVICE_NATIVE_ENDIAN,
2985 static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2986 uint64_t val, unsigned size)
2988 int dirty_flags;
2989 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2990 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2991 #if !defined(CONFIG_USER_ONLY)
2992 tb_invalidate_phys_page_fast(ram_addr, size);
2993 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2994 #endif
2996 switch (size) {
2997 case 1:
2998 stb_p(qemu_get_ram_ptr(ram_addr), val);
2999 break;
3000 case 2:
3001 stw_p(qemu_get_ram_ptr(ram_addr), val);
3002 break;
3003 case 4:
3004 stl_p(qemu_get_ram_ptr(ram_addr), val);
3005 break;
3006 default:
3007 abort();
3009 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3010 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3011 /* we remove the notdirty callback only if the code has been
3012 flushed */
3013 if (dirty_flags == 0xff)
3014 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3017 static const MemoryRegionOps notdirty_mem_ops = {
3018 .read = error_mem_read,
3019 .write = notdirty_mem_write,
3020 .endianness = DEVICE_NATIVE_ENDIAN,
3023 /* Generate a debug exception if a watchpoint has been hit. */
3024 static void check_watchpoint(int offset, int len_mask, int flags)
3026 CPUArchState *env = cpu_single_env;
3027 target_ulong pc, cs_base;
3028 TranslationBlock *tb;
3029 target_ulong vaddr;
3030 CPUWatchpoint *wp;
3031 int cpu_flags;
3033 if (env->watchpoint_hit) {
3034 /* We re-entered the check after replacing the TB. Now raise
3035 * the debug interrupt so that is will trigger after the
3036 * current instruction. */
3037 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3038 return;
3040 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3041 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3042 if ((vaddr == (wp->vaddr & len_mask) ||
3043 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3044 wp->flags |= BP_WATCHPOINT_HIT;
3045 if (!env->watchpoint_hit) {
3046 env->watchpoint_hit = wp;
3047 tb = tb_find_pc(env->mem_io_pc);
3048 if (!tb) {
3049 cpu_abort(env, "check_watchpoint: could not find TB for "
3050 "pc=%p", (void *)env->mem_io_pc);
3052 cpu_restore_state(tb, env, env->mem_io_pc);
3053 tb_phys_invalidate(tb, -1);
3054 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3055 env->exception_index = EXCP_DEBUG;
3056 cpu_loop_exit(env);
3057 } else {
3058 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3059 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3060 cpu_resume_from_signal(env, NULL);
3063 } else {
3064 wp->flags &= ~BP_WATCHPOINT_HIT;
3069 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3070 so these check for a hit then pass through to the normal out-of-line
3071 phys routines. */
3072 static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3073 unsigned size)
3075 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3076 switch (size) {
3077 case 1: return ldub_phys(addr);
3078 case 2: return lduw_phys(addr);
3079 case 4: return ldl_phys(addr);
3080 default: abort();
3084 static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3085 uint64_t val, unsigned size)
3087 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3088 switch (size) {
3089 case 1:
3090 stb_phys(addr, val);
3091 break;
3092 case 2:
3093 stw_phys(addr, val);
3094 break;
3095 case 4:
3096 stl_phys(addr, val);
3097 break;
3098 default: abort();
3102 static const MemoryRegionOps watch_mem_ops = {
3103 .read = watch_mem_read,
3104 .write = watch_mem_write,
3105 .endianness = DEVICE_NATIVE_ENDIAN,
3108 static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3109 unsigned len)
3111 subpage_t *mmio = opaque;
3112 unsigned int idx = SUBPAGE_IDX(addr);
3113 MemoryRegionSection *section;
3114 #if defined(DEBUG_SUBPAGE)
3115 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3116 mmio, len, addr, idx);
3117 #endif
3119 section = &phys_sections[mmio->sub_section[idx]];
3120 addr += mmio->base;
3121 addr -= section->offset_within_address_space;
3122 addr += section->offset_within_region;
3123 return io_mem_read(section->mr, addr, len);
3126 static void subpage_write(void *opaque, target_phys_addr_t addr,
3127 uint64_t value, unsigned len)
3129 subpage_t *mmio = opaque;
3130 unsigned int idx = SUBPAGE_IDX(addr);
3131 MemoryRegionSection *section;
3132 #if defined(DEBUG_SUBPAGE)
3133 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3134 " idx %d value %"PRIx64"\n",
3135 __func__, mmio, len, addr, idx, value);
3136 #endif
3138 section = &phys_sections[mmio->sub_section[idx]];
3139 addr += mmio->base;
3140 addr -= section->offset_within_address_space;
3141 addr += section->offset_within_region;
3142 io_mem_write(section->mr, addr, value, len);
3145 static const MemoryRegionOps subpage_ops = {
3146 .read = subpage_read,
3147 .write = subpage_write,
3148 .endianness = DEVICE_NATIVE_ENDIAN,
3151 static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3152 unsigned size)
3154 ram_addr_t raddr = addr;
3155 void *ptr = qemu_get_ram_ptr(raddr);
3156 switch (size) {
3157 case 1: return ldub_p(ptr);
3158 case 2: return lduw_p(ptr);
3159 case 4: return ldl_p(ptr);
3160 default: abort();
3164 static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3165 uint64_t value, unsigned size)
3167 ram_addr_t raddr = addr;
3168 void *ptr = qemu_get_ram_ptr(raddr);
3169 switch (size) {
3170 case 1: return stb_p(ptr, value);
3171 case 2: return stw_p(ptr, value);
3172 case 4: return stl_p(ptr, value);
3173 default: abort();
3177 static const MemoryRegionOps subpage_ram_ops = {
3178 .read = subpage_ram_read,
3179 .write = subpage_ram_write,
3180 .endianness = DEVICE_NATIVE_ENDIAN,
3183 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3184 uint16_t section)
3186 int idx, eidx;
3188 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3189 return -1;
3190 idx = SUBPAGE_IDX(start);
3191 eidx = SUBPAGE_IDX(end);
3192 #if defined(DEBUG_SUBPAGE)
3193 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3194 mmio, start, end, idx, eidx, memory);
3195 #endif
3196 if (memory_region_is_ram(phys_sections[section].mr)) {
3197 MemoryRegionSection new_section = phys_sections[section];
3198 new_section.mr = &io_mem_subpage_ram;
3199 section = phys_section_add(&new_section);
3201 for (; idx <= eidx; idx++) {
3202 mmio->sub_section[idx] = section;
3205 return 0;
3208 static subpage_t *subpage_init(target_phys_addr_t base)
3210 subpage_t *mmio;
3212 mmio = g_malloc0(sizeof(subpage_t));
3214 mmio->base = base;
3215 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3216 "subpage", TARGET_PAGE_SIZE);
3217 mmio->iomem.subpage = true;
3218 #if defined(DEBUG_SUBPAGE)
3219 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3220 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3221 #endif
3222 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
3224 return mmio;
3227 static uint16_t dummy_section(MemoryRegion *mr)
3229 MemoryRegionSection section = {
3230 .mr = mr,
3231 .offset_within_address_space = 0,
3232 .offset_within_region = 0,
3233 .size = UINT64_MAX,
3236 return phys_section_add(&section);
3239 MemoryRegion *iotlb_to_region(target_phys_addr_t index)
3241 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
3244 static void io_mem_init(void)
3246 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3247 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3248 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3249 "unassigned", UINT64_MAX);
3250 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3251 "notdirty", UINT64_MAX);
3252 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3253 "subpage-ram", UINT64_MAX);
3254 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3255 "watch", UINT64_MAX);
3258 static void core_begin(MemoryListener *listener)
3260 destroy_all_mappings();
3261 phys_sections_clear();
3262 phys_map.ptr = PHYS_MAP_NODE_NIL;
3263 phys_section_unassigned = dummy_section(&io_mem_unassigned);
3264 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3265 phys_section_rom = dummy_section(&io_mem_rom);
3266 phys_section_watch = dummy_section(&io_mem_watch);
3269 static void core_commit(MemoryListener *listener)
3271 CPUArchState *env;
3273 /* since each CPU stores ram addresses in its TLB cache, we must
3274 reset the modified entries */
3275 /* XXX: slow ! */
3276 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3277 tlb_flush(env, 1);
3281 static void core_region_add(MemoryListener *listener,
3282 MemoryRegionSection *section)
3284 cpu_register_physical_memory_log(section, section->readonly);
3287 static void core_region_del(MemoryListener *listener,
3288 MemoryRegionSection *section)
3292 static void core_region_nop(MemoryListener *listener,
3293 MemoryRegionSection *section)
3295 cpu_register_physical_memory_log(section, section->readonly);
3298 static void core_log_start(MemoryListener *listener,
3299 MemoryRegionSection *section)
3303 static void core_log_stop(MemoryListener *listener,
3304 MemoryRegionSection *section)
3308 static void core_log_sync(MemoryListener *listener,
3309 MemoryRegionSection *section)
3313 static void core_log_global_start(MemoryListener *listener)
3315 cpu_physical_memory_set_dirty_tracking(1);
3318 static void core_log_global_stop(MemoryListener *listener)
3320 cpu_physical_memory_set_dirty_tracking(0);
3323 static void core_eventfd_add(MemoryListener *listener,
3324 MemoryRegionSection *section,
3325 bool match_data, uint64_t data, int fd)
3329 static void core_eventfd_del(MemoryListener *listener,
3330 MemoryRegionSection *section,
3331 bool match_data, uint64_t data, int fd)
3335 static void io_begin(MemoryListener *listener)
3339 static void io_commit(MemoryListener *listener)
3343 static void io_region_add(MemoryListener *listener,
3344 MemoryRegionSection *section)
3346 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3348 mrio->mr = section->mr;
3349 mrio->offset = section->offset_within_region;
3350 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
3351 section->offset_within_address_space, section->size);
3352 ioport_register(&mrio->iorange);
3355 static void io_region_del(MemoryListener *listener,
3356 MemoryRegionSection *section)
3358 isa_unassign_ioport(section->offset_within_address_space, section->size);
3361 static void io_region_nop(MemoryListener *listener,
3362 MemoryRegionSection *section)
3366 static void io_log_start(MemoryListener *listener,
3367 MemoryRegionSection *section)
3371 static void io_log_stop(MemoryListener *listener,
3372 MemoryRegionSection *section)
3376 static void io_log_sync(MemoryListener *listener,
3377 MemoryRegionSection *section)
3381 static void io_log_global_start(MemoryListener *listener)
3385 static void io_log_global_stop(MemoryListener *listener)
3389 static void io_eventfd_add(MemoryListener *listener,
3390 MemoryRegionSection *section,
3391 bool match_data, uint64_t data, int fd)
3395 static void io_eventfd_del(MemoryListener *listener,
3396 MemoryRegionSection *section,
3397 bool match_data, uint64_t data, int fd)
3401 static MemoryListener core_memory_listener = {
3402 .begin = core_begin,
3403 .commit = core_commit,
3404 .region_add = core_region_add,
3405 .region_del = core_region_del,
3406 .region_nop = core_region_nop,
3407 .log_start = core_log_start,
3408 .log_stop = core_log_stop,
3409 .log_sync = core_log_sync,
3410 .log_global_start = core_log_global_start,
3411 .log_global_stop = core_log_global_stop,
3412 .eventfd_add = core_eventfd_add,
3413 .eventfd_del = core_eventfd_del,
3414 .priority = 0,
3417 static MemoryListener io_memory_listener = {
3418 .begin = io_begin,
3419 .commit = io_commit,
3420 .region_add = io_region_add,
3421 .region_del = io_region_del,
3422 .region_nop = io_region_nop,
3423 .log_start = io_log_start,
3424 .log_stop = io_log_stop,
3425 .log_sync = io_log_sync,
3426 .log_global_start = io_log_global_start,
3427 .log_global_stop = io_log_global_stop,
3428 .eventfd_add = io_eventfd_add,
3429 .eventfd_del = io_eventfd_del,
3430 .priority = 0,
3433 static void memory_map_init(void)
3435 system_memory = g_malloc(sizeof(*system_memory));
3436 memory_region_init(system_memory, "system", INT64_MAX);
3437 set_system_memory_map(system_memory);
3439 system_io = g_malloc(sizeof(*system_io));
3440 memory_region_init(system_io, "io", 65536);
3441 set_system_io_map(system_io);
3443 memory_listener_register(&core_memory_listener, system_memory);
3444 memory_listener_register(&io_memory_listener, system_io);
3447 MemoryRegion *get_system_memory(void)
3449 return system_memory;
3452 MemoryRegion *get_system_io(void)
3454 return system_io;
3457 #endif /* !defined(CONFIG_USER_ONLY) */
3459 /* physical memory access (slow version, mainly for debug) */
3460 #if defined(CONFIG_USER_ONLY)
3461 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
3462 uint8_t *buf, int len, int is_write)
3464 int l, flags;
3465 target_ulong page;
3466 void * p;
3468 while (len > 0) {
3469 page = addr & TARGET_PAGE_MASK;
3470 l = (page + TARGET_PAGE_SIZE) - addr;
3471 if (l > len)
3472 l = len;
3473 flags = page_get_flags(page);
3474 if (!(flags & PAGE_VALID))
3475 return -1;
3476 if (is_write) {
3477 if (!(flags & PAGE_WRITE))
3478 return -1;
3479 /* XXX: this code should not depend on lock_user */
3480 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3481 return -1;
3482 memcpy(p, buf, l);
3483 unlock_user(p, addr, l);
3484 } else {
3485 if (!(flags & PAGE_READ))
3486 return -1;
3487 /* XXX: this code should not depend on lock_user */
3488 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3489 return -1;
3490 memcpy(buf, p, l);
3491 unlock_user(p, addr, 0);
3493 len -= l;
3494 buf += l;
3495 addr += l;
3497 return 0;
3500 #else
3501 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3502 int len, int is_write)
3504 int l;
3505 uint8_t *ptr;
3506 uint32_t val;
3507 target_phys_addr_t page;
3508 MemoryRegionSection *section;
3510 while (len > 0) {
3511 page = addr & TARGET_PAGE_MASK;
3512 l = (page + TARGET_PAGE_SIZE) - addr;
3513 if (l > len)
3514 l = len;
3515 section = phys_page_find(page >> TARGET_PAGE_BITS);
3517 if (is_write) {
3518 if (!memory_region_is_ram(section->mr)) {
3519 target_phys_addr_t addr1;
3520 addr1 = memory_region_section_addr(section, addr);
3521 /* XXX: could force cpu_single_env to NULL to avoid
3522 potential bugs */
3523 if (l >= 4 && ((addr1 & 3) == 0)) {
3524 /* 32 bit write access */
3525 val = ldl_p(buf);
3526 io_mem_write(section->mr, addr1, val, 4);
3527 l = 4;
3528 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3529 /* 16 bit write access */
3530 val = lduw_p(buf);
3531 io_mem_write(section->mr, addr1, val, 2);
3532 l = 2;
3533 } else {
3534 /* 8 bit write access */
3535 val = ldub_p(buf);
3536 io_mem_write(section->mr, addr1, val, 1);
3537 l = 1;
3539 } else if (!section->readonly) {
3540 ram_addr_t addr1;
3541 addr1 = memory_region_get_ram_addr(section->mr)
3542 + memory_region_section_addr(section, addr);
3543 /* RAM case */
3544 ptr = qemu_get_ram_ptr(addr1);
3545 memcpy(ptr, buf, l);
3546 if (!cpu_physical_memory_is_dirty(addr1)) {
3547 /* invalidate code */
3548 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3549 /* set dirty bit */
3550 cpu_physical_memory_set_dirty_flags(
3551 addr1, (0xff & ~CODE_DIRTY_FLAG));
3553 qemu_put_ram_ptr(ptr);
3555 } else {
3556 if (!(memory_region_is_ram(section->mr) ||
3557 memory_region_is_romd(section->mr))) {
3558 target_phys_addr_t addr1;
3559 /* I/O case */
3560 addr1 = memory_region_section_addr(section, addr);
3561 if (l >= 4 && ((addr1 & 3) == 0)) {
3562 /* 32 bit read access */
3563 val = io_mem_read(section->mr, addr1, 4);
3564 stl_p(buf, val);
3565 l = 4;
3566 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3567 /* 16 bit read access */
3568 val = io_mem_read(section->mr, addr1, 2);
3569 stw_p(buf, val);
3570 l = 2;
3571 } else {
3572 /* 8 bit read access */
3573 val = io_mem_read(section->mr, addr1, 1);
3574 stb_p(buf, val);
3575 l = 1;
3577 } else {
3578 /* RAM case */
3579 ptr = qemu_get_ram_ptr(section->mr->ram_addr
3580 + memory_region_section_addr(section,
3581 addr));
3582 memcpy(buf, ptr, l);
3583 qemu_put_ram_ptr(ptr);
3586 len -= l;
3587 buf += l;
3588 addr += l;
3592 /* used for ROM loading : can write in RAM and ROM */
3593 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3594 const uint8_t *buf, int len)
3596 int l;
3597 uint8_t *ptr;
3598 target_phys_addr_t page;
3599 MemoryRegionSection *section;
3601 while (len > 0) {
3602 page = addr & TARGET_PAGE_MASK;
3603 l = (page + TARGET_PAGE_SIZE) - addr;
3604 if (l > len)
3605 l = len;
3606 section = phys_page_find(page >> TARGET_PAGE_BITS);
3608 if (!(memory_region_is_ram(section->mr) ||
3609 memory_region_is_romd(section->mr))) {
3610 /* do nothing */
3611 } else {
3612 unsigned long addr1;
3613 addr1 = memory_region_get_ram_addr(section->mr)
3614 + memory_region_section_addr(section, addr);
3615 /* ROM/RAM case */
3616 ptr = qemu_get_ram_ptr(addr1);
3617 memcpy(ptr, buf, l);
3618 qemu_put_ram_ptr(ptr);
3620 len -= l;
3621 buf += l;
3622 addr += l;
3626 typedef struct {
3627 void *buffer;
3628 target_phys_addr_t addr;
3629 target_phys_addr_t len;
3630 } BounceBuffer;
3632 static BounceBuffer bounce;
3634 typedef struct MapClient {
3635 void *opaque;
3636 void (*callback)(void *opaque);
3637 QLIST_ENTRY(MapClient) link;
3638 } MapClient;
3640 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3641 = QLIST_HEAD_INITIALIZER(map_client_list);
3643 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3645 MapClient *client = g_malloc(sizeof(*client));
3647 client->opaque = opaque;
3648 client->callback = callback;
3649 QLIST_INSERT_HEAD(&map_client_list, client, link);
3650 return client;
3653 void cpu_unregister_map_client(void *_client)
3655 MapClient *client = (MapClient *)_client;
3657 QLIST_REMOVE(client, link);
3658 g_free(client);
3661 static void cpu_notify_map_clients(void)
3663 MapClient *client;
3665 while (!QLIST_EMPTY(&map_client_list)) {
3666 client = QLIST_FIRST(&map_client_list);
3667 client->callback(client->opaque);
3668 cpu_unregister_map_client(client);
3672 /* Map a physical memory region into a host virtual address.
3673 * May map a subset of the requested range, given by and returned in *plen.
3674 * May return NULL if resources needed to perform the mapping are exhausted.
3675 * Use only for reads OR writes - not for read-modify-write operations.
3676 * Use cpu_register_map_client() to know when retrying the map operation is
3677 * likely to succeed.
3679 void *cpu_physical_memory_map(target_phys_addr_t addr,
3680 target_phys_addr_t *plen,
3681 int is_write)
3683 target_phys_addr_t len = *plen;
3684 target_phys_addr_t todo = 0;
3685 int l;
3686 target_phys_addr_t page;
3687 MemoryRegionSection *section;
3688 ram_addr_t raddr = RAM_ADDR_MAX;
3689 ram_addr_t rlen;
3690 void *ret;
3692 while (len > 0) {
3693 page = addr & TARGET_PAGE_MASK;
3694 l = (page + TARGET_PAGE_SIZE) - addr;
3695 if (l > len)
3696 l = len;
3697 section = phys_page_find(page >> TARGET_PAGE_BITS);
3699 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
3700 if (todo || bounce.buffer) {
3701 break;
3703 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3704 bounce.addr = addr;
3705 bounce.len = l;
3706 if (!is_write) {
3707 cpu_physical_memory_read(addr, bounce.buffer, l);
3710 *plen = l;
3711 return bounce.buffer;
3713 if (!todo) {
3714 raddr = memory_region_get_ram_addr(section->mr)
3715 + memory_region_section_addr(section, addr);
3718 len -= l;
3719 addr += l;
3720 todo += l;
3722 rlen = todo;
3723 ret = qemu_ram_ptr_length(raddr, &rlen);
3724 *plen = rlen;
3725 return ret;
3728 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3729 * Will also mark the memory as dirty if is_write == 1. access_len gives
3730 * the amount of memory that was actually read or written by the caller.
3732 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3733 int is_write, target_phys_addr_t access_len)
3735 if (buffer != bounce.buffer) {
3736 if (is_write) {
3737 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3738 while (access_len) {
3739 unsigned l;
3740 l = TARGET_PAGE_SIZE;
3741 if (l > access_len)
3742 l = access_len;
3743 if (!cpu_physical_memory_is_dirty(addr1)) {
3744 /* invalidate code */
3745 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3746 /* set dirty bit */
3747 cpu_physical_memory_set_dirty_flags(
3748 addr1, (0xff & ~CODE_DIRTY_FLAG));
3750 addr1 += l;
3751 access_len -= l;
3754 if (xen_enabled()) {
3755 xen_invalidate_map_cache_entry(buffer);
3757 return;
3759 if (is_write) {
3760 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3762 qemu_vfree(bounce.buffer);
3763 bounce.buffer = NULL;
3764 cpu_notify_map_clients();
3767 /* warning: addr must be aligned */
3768 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3769 enum device_endian endian)
3771 uint8_t *ptr;
3772 uint32_t val;
3773 MemoryRegionSection *section;
3775 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3777 if (!(memory_region_is_ram(section->mr) ||
3778 memory_region_is_romd(section->mr))) {
3779 /* I/O case */
3780 addr = memory_region_section_addr(section, addr);
3781 val = io_mem_read(section->mr, addr, 4);
3782 #if defined(TARGET_WORDS_BIGENDIAN)
3783 if (endian == DEVICE_LITTLE_ENDIAN) {
3784 val = bswap32(val);
3786 #else
3787 if (endian == DEVICE_BIG_ENDIAN) {
3788 val = bswap32(val);
3790 #endif
3791 } else {
3792 /* RAM case */
3793 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3794 & TARGET_PAGE_MASK)
3795 + memory_region_section_addr(section, addr));
3796 switch (endian) {
3797 case DEVICE_LITTLE_ENDIAN:
3798 val = ldl_le_p(ptr);
3799 break;
3800 case DEVICE_BIG_ENDIAN:
3801 val = ldl_be_p(ptr);
3802 break;
3803 default:
3804 val = ldl_p(ptr);
3805 break;
3808 return val;
3811 uint32_t ldl_phys(target_phys_addr_t addr)
3813 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3816 uint32_t ldl_le_phys(target_phys_addr_t addr)
3818 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3821 uint32_t ldl_be_phys(target_phys_addr_t addr)
3823 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3826 /* warning: addr must be aligned */
3827 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3828 enum device_endian endian)
3830 uint8_t *ptr;
3831 uint64_t val;
3832 MemoryRegionSection *section;
3834 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3836 if (!(memory_region_is_ram(section->mr) ||
3837 memory_region_is_romd(section->mr))) {
3838 /* I/O case */
3839 addr = memory_region_section_addr(section, addr);
3841 /* XXX This is broken when device endian != cpu endian.
3842 Fix and add "endian" variable check */
3843 #ifdef TARGET_WORDS_BIGENDIAN
3844 val = io_mem_read(section->mr, addr, 4) << 32;
3845 val |= io_mem_read(section->mr, addr + 4, 4);
3846 #else
3847 val = io_mem_read(section->mr, addr, 4);
3848 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
3849 #endif
3850 } else {
3851 /* RAM case */
3852 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3853 & TARGET_PAGE_MASK)
3854 + memory_region_section_addr(section, addr));
3855 switch (endian) {
3856 case DEVICE_LITTLE_ENDIAN:
3857 val = ldq_le_p(ptr);
3858 break;
3859 case DEVICE_BIG_ENDIAN:
3860 val = ldq_be_p(ptr);
3861 break;
3862 default:
3863 val = ldq_p(ptr);
3864 break;
3867 return val;
3870 uint64_t ldq_phys(target_phys_addr_t addr)
3872 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3875 uint64_t ldq_le_phys(target_phys_addr_t addr)
3877 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3880 uint64_t ldq_be_phys(target_phys_addr_t addr)
3882 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3885 /* XXX: optimize */
3886 uint32_t ldub_phys(target_phys_addr_t addr)
3888 uint8_t val;
3889 cpu_physical_memory_read(addr, &val, 1);
3890 return val;
3893 /* warning: addr must be aligned */
3894 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3895 enum device_endian endian)
3897 uint8_t *ptr;
3898 uint64_t val;
3899 MemoryRegionSection *section;
3901 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3903 if (!(memory_region_is_ram(section->mr) ||
3904 memory_region_is_romd(section->mr))) {
3905 /* I/O case */
3906 addr = memory_region_section_addr(section, addr);
3907 val = io_mem_read(section->mr, addr, 2);
3908 #if defined(TARGET_WORDS_BIGENDIAN)
3909 if (endian == DEVICE_LITTLE_ENDIAN) {
3910 val = bswap16(val);
3912 #else
3913 if (endian == DEVICE_BIG_ENDIAN) {
3914 val = bswap16(val);
3916 #endif
3917 } else {
3918 /* RAM case */
3919 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3920 & TARGET_PAGE_MASK)
3921 + memory_region_section_addr(section, addr));
3922 switch (endian) {
3923 case DEVICE_LITTLE_ENDIAN:
3924 val = lduw_le_p(ptr);
3925 break;
3926 case DEVICE_BIG_ENDIAN:
3927 val = lduw_be_p(ptr);
3928 break;
3929 default:
3930 val = lduw_p(ptr);
3931 break;
3934 return val;
3937 uint32_t lduw_phys(target_phys_addr_t addr)
3939 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3942 uint32_t lduw_le_phys(target_phys_addr_t addr)
3944 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3947 uint32_t lduw_be_phys(target_phys_addr_t addr)
3949 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3952 /* warning: addr must be aligned. The ram page is not masked as dirty
3953 and the code inside is not invalidated. It is useful if the dirty
3954 bits are used to track modified PTEs */
3955 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3957 uint8_t *ptr;
3958 MemoryRegionSection *section;
3960 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3962 if (!memory_region_is_ram(section->mr) || section->readonly) {
3963 addr = memory_region_section_addr(section, addr);
3964 if (memory_region_is_ram(section->mr)) {
3965 section = &phys_sections[phys_section_rom];
3967 io_mem_write(section->mr, addr, val, 4);
3968 } else {
3969 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
3970 & TARGET_PAGE_MASK)
3971 + memory_region_section_addr(section, addr);
3972 ptr = qemu_get_ram_ptr(addr1);
3973 stl_p(ptr, val);
3975 if (unlikely(in_migration)) {
3976 if (!cpu_physical_memory_is_dirty(addr1)) {
3977 /* invalidate code */
3978 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3979 /* set dirty bit */
3980 cpu_physical_memory_set_dirty_flags(
3981 addr1, (0xff & ~CODE_DIRTY_FLAG));
3987 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3989 uint8_t *ptr;
3990 MemoryRegionSection *section;
3992 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3994 if (!memory_region_is_ram(section->mr) || section->readonly) {
3995 addr = memory_region_section_addr(section, addr);
3996 if (memory_region_is_ram(section->mr)) {
3997 section = &phys_sections[phys_section_rom];
3999 #ifdef TARGET_WORDS_BIGENDIAN
4000 io_mem_write(section->mr, addr, val >> 32, 4);
4001 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
4002 #else
4003 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4004 io_mem_write(section->mr, addr + 4, val >> 32, 4);
4005 #endif
4006 } else {
4007 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
4008 & TARGET_PAGE_MASK)
4009 + memory_region_section_addr(section, addr));
4010 stq_p(ptr, val);
4014 /* warning: addr must be aligned */
4015 static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4016 enum device_endian endian)
4018 uint8_t *ptr;
4019 MemoryRegionSection *section;
4021 section = phys_page_find(addr >> TARGET_PAGE_BITS);
4023 if (!memory_region_is_ram(section->mr) || section->readonly) {
4024 addr = memory_region_section_addr(section, addr);
4025 if (memory_region_is_ram(section->mr)) {
4026 section = &phys_sections[phys_section_rom];
4028 #if defined(TARGET_WORDS_BIGENDIAN)
4029 if (endian == DEVICE_LITTLE_ENDIAN) {
4030 val = bswap32(val);
4032 #else
4033 if (endian == DEVICE_BIG_ENDIAN) {
4034 val = bswap32(val);
4036 #endif
4037 io_mem_write(section->mr, addr, val, 4);
4038 } else {
4039 unsigned long addr1;
4040 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4041 + memory_region_section_addr(section, addr);
4042 /* RAM case */
4043 ptr = qemu_get_ram_ptr(addr1);
4044 switch (endian) {
4045 case DEVICE_LITTLE_ENDIAN:
4046 stl_le_p(ptr, val);
4047 break;
4048 case DEVICE_BIG_ENDIAN:
4049 stl_be_p(ptr, val);
4050 break;
4051 default:
4052 stl_p(ptr, val);
4053 break;
4055 if (!cpu_physical_memory_is_dirty(addr1)) {
4056 /* invalidate code */
4057 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4058 /* set dirty bit */
4059 cpu_physical_memory_set_dirty_flags(addr1,
4060 (0xff & ~CODE_DIRTY_FLAG));
4065 void stl_phys(target_phys_addr_t addr, uint32_t val)
4067 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4070 void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4072 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4075 void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4077 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4080 /* XXX: optimize */
4081 void stb_phys(target_phys_addr_t addr, uint32_t val)
4083 uint8_t v = val;
4084 cpu_physical_memory_write(addr, &v, 1);
4087 /* warning: addr must be aligned */
4088 static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4089 enum device_endian endian)
4091 uint8_t *ptr;
4092 MemoryRegionSection *section;
4094 section = phys_page_find(addr >> TARGET_PAGE_BITS);
4096 if (!memory_region_is_ram(section->mr) || section->readonly) {
4097 addr = memory_region_section_addr(section, addr);
4098 if (memory_region_is_ram(section->mr)) {
4099 section = &phys_sections[phys_section_rom];
4101 #if defined(TARGET_WORDS_BIGENDIAN)
4102 if (endian == DEVICE_LITTLE_ENDIAN) {
4103 val = bswap16(val);
4105 #else
4106 if (endian == DEVICE_BIG_ENDIAN) {
4107 val = bswap16(val);
4109 #endif
4110 io_mem_write(section->mr, addr, val, 2);
4111 } else {
4112 unsigned long addr1;
4113 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4114 + memory_region_section_addr(section, addr);
4115 /* RAM case */
4116 ptr = qemu_get_ram_ptr(addr1);
4117 switch (endian) {
4118 case DEVICE_LITTLE_ENDIAN:
4119 stw_le_p(ptr, val);
4120 break;
4121 case DEVICE_BIG_ENDIAN:
4122 stw_be_p(ptr, val);
4123 break;
4124 default:
4125 stw_p(ptr, val);
4126 break;
4128 if (!cpu_physical_memory_is_dirty(addr1)) {
4129 /* invalidate code */
4130 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4131 /* set dirty bit */
4132 cpu_physical_memory_set_dirty_flags(addr1,
4133 (0xff & ~CODE_DIRTY_FLAG));
4138 void stw_phys(target_phys_addr_t addr, uint32_t val)
4140 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4143 void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4145 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4148 void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4150 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4153 /* XXX: optimize */
4154 void stq_phys(target_phys_addr_t addr, uint64_t val)
4156 val = tswap64(val);
4157 cpu_physical_memory_write(addr, &val, 8);
4160 void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4162 val = cpu_to_le64(val);
4163 cpu_physical_memory_write(addr, &val, 8);
4166 void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4168 val = cpu_to_be64(val);
4169 cpu_physical_memory_write(addr, &val, 8);
4172 /* virtual memory access for debug (includes writing to ROM) */
4173 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
4174 uint8_t *buf, int len, int is_write)
4176 int l;
4177 target_phys_addr_t phys_addr;
4178 target_ulong page;
4180 while (len > 0) {
4181 page = addr & TARGET_PAGE_MASK;
4182 phys_addr = cpu_get_phys_page_debug(env, page);
4183 /* if no physical page mapped, return an error */
4184 if (phys_addr == -1)
4185 return -1;
4186 l = (page + TARGET_PAGE_SIZE) - addr;
4187 if (l > len)
4188 l = len;
4189 phys_addr += (addr & ~TARGET_PAGE_MASK);
4190 if (is_write)
4191 cpu_physical_memory_write_rom(phys_addr, buf, l);
4192 else
4193 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4194 len -= l;
4195 buf += l;
4196 addr += l;
4198 return 0;
4200 #endif
4202 /* in deterministic execution mode, instructions doing device I/Os
4203 must be at the end of the TB */
4204 void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
4206 TranslationBlock *tb;
4207 uint32_t n, cflags;
4208 target_ulong pc, cs_base;
4209 uint64_t flags;
4211 tb = tb_find_pc(retaddr);
4212 if (!tb) {
4213 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4214 (void *)retaddr);
4216 n = env->icount_decr.u16.low + tb->icount;
4217 cpu_restore_state(tb, env, retaddr);
4218 /* Calculate how many instructions had been executed before the fault
4219 occurred. */
4220 n = n - env->icount_decr.u16.low;
4221 /* Generate a new TB ending on the I/O insn. */
4222 n++;
4223 /* On MIPS and SH, delay slot instructions can only be restarted if
4224 they were already the first instruction in the TB. If this is not
4225 the first instruction in a TB then re-execute the preceding
4226 branch. */
4227 #if defined(TARGET_MIPS)
4228 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4229 env->active_tc.PC -= 4;
4230 env->icount_decr.u16.low++;
4231 env->hflags &= ~MIPS_HFLAG_BMASK;
4233 #elif defined(TARGET_SH4)
4234 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4235 && n > 1) {
4236 env->pc -= 2;
4237 env->icount_decr.u16.low++;
4238 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4240 #endif
4241 /* This should never happen. */
4242 if (n > CF_COUNT_MASK)
4243 cpu_abort(env, "TB too big during recompile");
4245 cflags = n | CF_LAST_IO;
4246 pc = tb->pc;
4247 cs_base = tb->cs_base;
4248 flags = tb->flags;
4249 tb_phys_invalidate(tb, -1);
4250 /* FIXME: In theory this could raise an exception. In practice
4251 we have already translated the block once so it's probably ok. */
4252 tb_gen_code(env, pc, cs_base, flags, cflags);
4253 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4254 the first in the TB) then we end up generating a whole new TB and
4255 repeating the fault, which is horribly inefficient.
4256 Better would be to execute just this insn uncached, or generate a
4257 second new TB. */
4258 cpu_resume_from_signal(env, NULL);
4261 #if !defined(CONFIG_USER_ONLY)
4263 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4265 int i, target_code_size, max_target_code_size;
4266 int direct_jmp_count, direct_jmp2_count, cross_page;
4267 TranslationBlock *tb;
4269 target_code_size = 0;
4270 max_target_code_size = 0;
4271 cross_page = 0;
4272 direct_jmp_count = 0;
4273 direct_jmp2_count = 0;
4274 for(i = 0; i < nb_tbs; i++) {
4275 tb = &tbs[i];
4276 target_code_size += tb->size;
4277 if (tb->size > max_target_code_size)
4278 max_target_code_size = tb->size;
4279 if (tb->page_addr[1] != -1)
4280 cross_page++;
4281 if (tb->tb_next_offset[0] != 0xffff) {
4282 direct_jmp_count++;
4283 if (tb->tb_next_offset[1] != 0xffff) {
4284 direct_jmp2_count++;
4288 /* XXX: avoid using doubles ? */
4289 cpu_fprintf(f, "Translation buffer state:\n");
4290 cpu_fprintf(f, "gen code size %td/%ld\n",
4291 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4292 cpu_fprintf(f, "TB count %d/%d\n",
4293 nb_tbs, code_gen_max_blocks);
4294 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4295 nb_tbs ? target_code_size / nb_tbs : 0,
4296 max_target_code_size);
4297 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4298 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4299 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4300 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4301 cross_page,
4302 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4303 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4304 direct_jmp_count,
4305 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4306 direct_jmp2_count,
4307 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4308 cpu_fprintf(f, "\nStatistics:\n");
4309 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4310 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4311 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4312 tcg_dump_info(f, cpu_fprintf);
4316 * A helper function for the _utterly broken_ virtio device model to find out if
4317 * it's running on a big endian machine. Don't do this at home kids!
4319 bool virtio_is_big_endian(void);
4320 bool virtio_is_big_endian(void)
4322 #if defined(TARGET_WORDS_BIGENDIAN)
4323 return true;
4324 #else
4325 return false;
4326 #endif
4329 #endif