Merge remote branch 'upstream' into next
[qemu/qemu-dev-zwu.git] / exec.c
blob7b0e1c50af7d61b4304797706aaa94dafa8eda18
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <unistd.h>
32 #include <inttypes.h>
34 #include "cpu.h"
35 #include "exec-all.h"
36 #include "qemu-common.h"
37 #include "cache-utils.h"
39 #if !defined(TARGET_IA64)
40 #include "tcg.h"
41 #endif
42 #include "qemu-kvm.h"
44 #include "hw/hw.h"
45 #include "osdep.h"
46 #include "kvm.h"
47 #include "qemu-timer.h"
48 #if defined(CONFIG_USER_ONLY)
49 #include <qemu.h>
50 #include <signal.h>
51 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
52 #include <sys/param.h>
53 #if __FreeBSD_version >= 700104
54 #define HAVE_KINFO_GETVMMAP
55 #define sigqueue sigqueue_freebsd /* avoid redefinition */
56 #include <sys/time.h>
57 #include <sys/proc.h>
58 #include <machine/profile.h>
59 #define _KERNEL
60 #include <sys/user.h>
61 #undef _KERNEL
62 #undef sigqueue
63 #include <libutil.h>
64 #endif
65 #endif
66 #endif
68 //#define DEBUG_TB_INVALIDATE
69 //#define DEBUG_FLUSH
70 //#define DEBUG_TLB
71 //#define DEBUG_UNASSIGNED
73 /* make various TB consistency checks */
74 //#define DEBUG_TB_CHECK
75 //#define DEBUG_TLB_CHECK
77 //#define DEBUG_IOPORT
78 //#define DEBUG_SUBPAGE
80 #if !defined(CONFIG_USER_ONLY)
81 /* TB consistency checks only implemented for usermode emulation. */
82 #undef DEBUG_TB_CHECK
83 #endif
85 #define SMC_BITMAP_USE_THRESHOLD 10
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 static int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #elif defined(_WIN32)
102 /* Maximum alignment for Win32 is 16. */
103 #define code_gen_section \
104 __attribute__((aligned (16)))
105 #else
106 #define code_gen_section \
107 __attribute__((aligned (32)))
108 #endif
110 uint8_t code_gen_prologue[1024] code_gen_section;
111 static uint8_t *code_gen_buffer;
112 static unsigned long code_gen_buffer_size;
113 /* threshold to flush the translated code buffer */
114 static unsigned long code_gen_buffer_max_size;
115 uint8_t *code_gen_ptr;
117 #if !defined(CONFIG_USER_ONLY)
118 int phys_ram_fd;
119 uint8_t *phys_ram_dirty;
120 static int in_migration;
122 typedef struct RAMBlock {
123 uint8_t *host;
124 ram_addr_t offset;
125 ram_addr_t length;
126 struct RAMBlock *next;
127 } RAMBlock;
129 static RAMBlock *ram_blocks;
130 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
131 then we can no longer assume contiguous ram offsets, and external uses
132 of this variable will break. */
133 ram_addr_t last_ram_offset;
134 #endif
136 CPUState *first_cpu;
137 /* current CPU in the current thread. It is only valid inside
138 cpu_exec() */
139 CPUState *cpu_single_env;
140 /* 0 = Do not count executed instructions.
141 1 = Precise instruction counting.
142 2 = Adaptive rate instruction counting. */
143 int use_icount = 0;
144 /* Current instruction counter. While executing translated code this may
145 include some instructions that have not yet been executed. */
146 int64_t qemu_icount;
148 typedef struct PageDesc {
149 /* list of TBs intersecting this ram page */
150 TranslationBlock *first_tb;
151 /* in order to optimize self modifying code, we count the number
152 of lookups we do to a given page to use a bitmap */
153 unsigned int code_write_count;
154 uint8_t *code_bitmap;
155 #if defined(CONFIG_USER_ONLY)
156 unsigned long flags;
157 #endif
158 } PageDesc;
160 /* In system mode we want L1_MAP to be based on ram offsets,
161 while in user mode we want it to be based on virtual addresses. */
162 #if !defined(CONFIG_USER_ONLY)
163 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
164 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
165 #else
166 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
167 #endif
168 #else
169 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
170 #endif
172 /* Size of the L2 (and L3, etc) page tables. */
173 #define L2_BITS 10
174 #define L2_SIZE (1 << L2_BITS)
176 /* The bits remaining after N lower levels of page tables. */
177 #define P_L1_BITS_REM \
178 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
179 #define V_L1_BITS_REM \
180 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
182 /* Size of the L1 page table. Avoid silly small sizes. */
183 #if P_L1_BITS_REM < 4
184 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
185 #else
186 #define P_L1_BITS P_L1_BITS_REM
187 #endif
189 #if V_L1_BITS_REM < 4
190 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
191 #else
192 #define V_L1_BITS V_L1_BITS_REM
193 #endif
195 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
196 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
198 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
199 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
201 unsigned long qemu_real_host_page_size;
202 unsigned long qemu_host_page_bits;
203 unsigned long qemu_host_page_size;
204 unsigned long qemu_host_page_mask;
206 /* This is a multi-level map on the virtual address space.
207 The bottom level has pointers to PageDesc. */
208 static void *l1_map[V_L1_SIZE];
210 #if !defined(CONFIG_USER_ONLY)
211 typedef struct PhysPageDesc {
212 /* offset in host memory of the page + io_index in the low bits */
213 ram_addr_t phys_offset;
214 ram_addr_t region_offset;
215 } PhysPageDesc;
217 /* This is a multi-level map on the physical address space.
218 The bottom level has pointers to PhysPageDesc. */
219 static void *l1_phys_map[P_L1_SIZE];
221 static void io_mem_init(void);
223 /* io memory support */
224 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
225 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
226 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
227 static char io_mem_used[IO_MEM_NB_ENTRIES];
228 static int io_mem_watch;
229 #endif
231 /* log support */
232 #ifdef WIN32
233 static const char *logfilename = "qemu.log";
234 #else
235 static const char *logfilename = "/tmp/qemu.log";
236 #endif
237 FILE *logfile;
238 int loglevel;
239 static int log_append = 0;
241 /* statistics */
242 #if !defined(CONFIG_USER_ONLY)
243 static int tlb_flush_count;
244 #endif
245 static int tb_flush_count;
246 static int tb_phys_invalidate_count;
248 #ifdef _WIN32
249 static void map_exec(void *addr, long size)
251 DWORD old_protect;
252 VirtualProtect(addr, size,
253 PAGE_EXECUTE_READWRITE, &old_protect);
256 #else
257 static void map_exec(void *addr, long size)
259 unsigned long start, end, page_size;
261 page_size = getpagesize();
262 start = (unsigned long)addr;
263 start &= ~(page_size - 1);
265 end = (unsigned long)addr + size;
266 end += page_size - 1;
267 end &= ~(page_size - 1);
269 mprotect((void *)start, end - start,
270 PROT_READ | PROT_WRITE | PROT_EXEC);
272 #endif
274 static void page_init(void)
276 /* NOTE: we can always suppose that qemu_host_page_size >=
277 TARGET_PAGE_SIZE */
278 #ifdef _WIN32
280 SYSTEM_INFO system_info;
282 GetSystemInfo(&system_info);
283 qemu_real_host_page_size = system_info.dwPageSize;
285 #else
286 qemu_real_host_page_size = getpagesize();
287 #endif
288 if (qemu_host_page_size == 0)
289 qemu_host_page_size = qemu_real_host_page_size;
290 if (qemu_host_page_size < TARGET_PAGE_SIZE)
291 qemu_host_page_size = TARGET_PAGE_SIZE;
292 qemu_host_page_bits = 0;
293 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
294 qemu_host_page_bits++;
295 qemu_host_page_mask = ~(qemu_host_page_size - 1);
297 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
299 #ifdef HAVE_KINFO_GETVMMAP
300 struct kinfo_vmentry *freep;
301 int i, cnt;
303 freep = kinfo_getvmmap(getpid(), &cnt);
304 if (freep) {
305 mmap_lock();
306 for (i = 0; i < cnt; i++) {
307 unsigned long startaddr, endaddr;
309 startaddr = freep[i].kve_start;
310 endaddr = freep[i].kve_end;
311 if (h2g_valid(startaddr)) {
312 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
314 if (h2g_valid(endaddr)) {
315 endaddr = h2g(endaddr);
316 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
317 } else {
318 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
319 endaddr = ~0ul;
320 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
321 #endif
325 free(freep);
326 mmap_unlock();
328 #else
329 FILE *f;
331 last_brk = (unsigned long)sbrk(0);
333 f = fopen("/compat/linux/proc/self/maps", "r");
334 if (f) {
335 mmap_lock();
337 do {
338 unsigned long startaddr, endaddr;
339 int n;
341 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
343 if (n == 2 && h2g_valid(startaddr)) {
344 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
346 if (h2g_valid(endaddr)) {
347 endaddr = h2g(endaddr);
348 } else {
349 endaddr = ~0ul;
351 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
353 } while (!feof(f));
355 fclose(f);
356 mmap_unlock();
358 #endif
360 #endif
363 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
365 PageDesc *pd;
366 void **lp;
367 int i;
369 #if defined(CONFIG_USER_ONLY)
370 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
371 # define ALLOC(P, SIZE) \
372 do { \
373 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
374 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
375 } while (0)
376 #else
377 # define ALLOC(P, SIZE) \
378 do { P = qemu_mallocz(SIZE); } while (0)
379 #endif
381 /* Level 1. Always allocated. */
382 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
384 /* Level 2..N-1. */
385 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
386 void **p = *lp;
388 if (p == NULL) {
389 if (!alloc) {
390 return NULL;
392 ALLOC(p, sizeof(void *) * L2_SIZE);
393 *lp = p;
396 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
399 pd = *lp;
400 if (pd == NULL) {
401 if (!alloc) {
402 return NULL;
404 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
405 *lp = pd;
408 #undef ALLOC
410 return pd + (index & (L2_SIZE - 1));
413 static inline PageDesc *page_find(tb_page_addr_t index)
415 return page_find_alloc(index, 0);
418 #if !defined(CONFIG_USER_ONLY)
419 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
421 PhysPageDesc *pd;
422 void **lp;
423 int i;
425 /* Level 1. Always allocated. */
426 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
428 /* Level 2..N-1. */
429 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
430 void **p = *lp;
431 if (p == NULL) {
432 if (!alloc) {
433 return NULL;
435 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
437 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
440 pd = *lp;
441 if (pd == NULL) {
442 int i;
444 if (!alloc) {
445 return NULL;
448 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
450 for (i = 0; i < L2_SIZE; i++) {
451 pd[i].phys_offset = IO_MEM_UNASSIGNED;
452 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
456 return pd + (index & (L2_SIZE - 1));
459 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
461 return phys_page_find_alloc(index, 0);
464 static void tlb_protect_code(ram_addr_t ram_addr);
465 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
466 target_ulong vaddr);
467 #define mmap_lock() do { } while(0)
468 #define mmap_unlock() do { } while(0)
469 #endif
471 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
473 #if defined(CONFIG_USER_ONLY)
474 /* Currently it is not recommended to allocate big chunks of data in
475 user mode. It will change when a dedicated libc will be used */
476 #define USE_STATIC_CODE_GEN_BUFFER
477 #endif
479 #ifdef USE_STATIC_CODE_GEN_BUFFER
480 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
481 __attribute__((aligned (CODE_GEN_ALIGN)));
482 #endif
484 static void code_gen_alloc(unsigned long tb_size)
486 if (kvm_enabled())
487 return;
489 #ifdef USE_STATIC_CODE_GEN_BUFFER
490 code_gen_buffer = static_code_gen_buffer;
491 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
492 map_exec(code_gen_buffer, code_gen_buffer_size);
493 #else
494 code_gen_buffer_size = tb_size;
495 if (code_gen_buffer_size == 0) {
496 #if defined(CONFIG_USER_ONLY)
497 /* in user mode, phys_ram_size is not meaningful */
498 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
499 #else
500 /* XXX: needs adjustments */
501 code_gen_buffer_size = (unsigned long)(ram_size / 4);
502 #endif
504 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
505 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
506 /* The code gen buffer location may have constraints depending on
507 the host cpu and OS */
508 #if defined(__linux__)
510 int flags;
511 void *start = NULL;
513 flags = MAP_PRIVATE | MAP_ANONYMOUS;
514 #if defined(__x86_64__)
515 flags |= MAP_32BIT;
516 /* Cannot map more than that */
517 if (code_gen_buffer_size > (800 * 1024 * 1024))
518 code_gen_buffer_size = (800 * 1024 * 1024);
519 #elif defined(__sparc_v9__)
520 // Map the buffer below 2G, so we can use direct calls and branches
521 flags |= MAP_FIXED;
522 start = (void *) 0x60000000UL;
523 if (code_gen_buffer_size > (512 * 1024 * 1024))
524 code_gen_buffer_size = (512 * 1024 * 1024);
525 #elif defined(__arm__)
526 /* Map the buffer below 32M, so we can use direct calls and branches */
527 flags |= MAP_FIXED;
528 start = (void *) 0x01000000UL;
529 if (code_gen_buffer_size > 16 * 1024 * 1024)
530 code_gen_buffer_size = 16 * 1024 * 1024;
531 #endif
532 code_gen_buffer = mmap(start, code_gen_buffer_size,
533 PROT_WRITE | PROT_READ | PROT_EXEC,
534 flags, -1, 0);
535 if (code_gen_buffer == MAP_FAILED) {
536 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
537 exit(1);
540 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
542 int flags;
543 void *addr = NULL;
544 flags = MAP_PRIVATE | MAP_ANONYMOUS;
545 #if defined(__x86_64__)
546 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
547 * 0x40000000 is free */
548 flags |= MAP_FIXED;
549 addr = (void *)0x40000000;
550 /* Cannot map more than that */
551 if (code_gen_buffer_size > (800 * 1024 * 1024))
552 code_gen_buffer_size = (800 * 1024 * 1024);
553 #endif
554 code_gen_buffer = mmap(addr, code_gen_buffer_size,
555 PROT_WRITE | PROT_READ | PROT_EXEC,
556 flags, -1, 0);
557 if (code_gen_buffer == MAP_FAILED) {
558 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
559 exit(1);
562 #else
563 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
564 map_exec(code_gen_buffer, code_gen_buffer_size);
565 #endif
566 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
567 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
568 code_gen_buffer_max_size = code_gen_buffer_size -
569 code_gen_max_block_size();
570 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
571 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
574 /* Must be called before using the QEMU cpus. 'tb_size' is the size
575 (in bytes) allocated to the translation buffer. Zero means default
576 size. */
577 void cpu_exec_init_all(unsigned long tb_size)
579 cpu_gen_init();
580 code_gen_alloc(tb_size);
581 code_gen_ptr = code_gen_buffer;
582 page_init();
583 #if !defined(CONFIG_USER_ONLY)
584 io_mem_init();
585 #endif
586 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
587 /* There's no guest base to take into account, so go ahead and
588 initialize the prologue now. */
589 tcg_prologue_init(&tcg_ctx);
590 #endif
593 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
595 static int cpu_common_post_load(void *opaque, int version_id)
597 CPUState *env = opaque;
599 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
600 version_id is increased. */
601 env->interrupt_request &= ~0x01;
602 tlb_flush(env, 1);
604 return 0;
607 static const VMStateDescription vmstate_cpu_common = {
608 .name = "cpu_common",
609 .version_id = 1,
610 .minimum_version_id = 1,
611 .minimum_version_id_old = 1,
612 .post_load = cpu_common_post_load,
613 .fields = (VMStateField []) {
614 VMSTATE_UINT32(halted, CPUState),
615 VMSTATE_UINT32(interrupt_request, CPUState),
616 VMSTATE_END_OF_LIST()
619 #endif
621 CPUState *qemu_get_cpu(int cpu)
623 CPUState *env = first_cpu;
625 while (env) {
626 if (env->cpu_index == cpu)
627 break;
628 env = env->next_cpu;
631 return env;
634 void cpu_exec_init(CPUState *env)
636 CPUState **penv;
637 int cpu_index;
639 #if defined(CONFIG_USER_ONLY)
640 cpu_list_lock();
641 #endif
642 env->next_cpu = NULL;
643 penv = &first_cpu;
644 cpu_index = 0;
645 while (*penv != NULL) {
646 penv = &(*penv)->next_cpu;
647 cpu_index++;
649 env->cpu_index = cpu_index;
650 env->numa_node = 0;
651 QTAILQ_INIT(&env->breakpoints);
652 QTAILQ_INIT(&env->watchpoints);
653 #ifdef __WIN32
654 env->thread_id = GetCurrentProcessId();
655 #else
656 env->thread_id = getpid();
657 #endif
658 *penv = env;
659 #if defined(CONFIG_USER_ONLY)
660 cpu_list_unlock();
661 #endif
662 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
663 vmstate_register(cpu_index, &vmstate_cpu_common, env);
664 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
665 cpu_save, cpu_load, env);
666 #endif
669 static inline void invalidate_page_bitmap(PageDesc *p)
671 if (p->code_bitmap) {
672 qemu_free(p->code_bitmap);
673 p->code_bitmap = NULL;
675 p->code_write_count = 0;
678 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
680 static void page_flush_tb_1 (int level, void **lp)
682 int i;
684 if (*lp == NULL) {
685 return;
687 if (level == 0) {
688 PageDesc *pd = *lp;
689 for (i = 0; i < L2_SIZE; ++i) {
690 pd[i].first_tb = NULL;
691 invalidate_page_bitmap(pd + i);
693 } else {
694 void **pp = *lp;
695 for (i = 0; i < L2_SIZE; ++i) {
696 page_flush_tb_1 (level - 1, pp + i);
701 static void page_flush_tb(void)
703 int i;
704 for (i = 0; i < V_L1_SIZE; i++) {
705 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
709 /* flush all the translation blocks */
710 /* XXX: tb_flush is currently not thread safe */
711 void tb_flush(CPUState *env1)
713 CPUState *env;
714 #if defined(DEBUG_FLUSH)
715 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
716 (unsigned long)(code_gen_ptr - code_gen_buffer),
717 nb_tbs, nb_tbs > 0 ?
718 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
719 #endif
720 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
721 cpu_abort(env1, "Internal error: code buffer overflow\n");
723 nb_tbs = 0;
725 for(env = first_cpu; env != NULL; env = env->next_cpu) {
726 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
729 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
730 page_flush_tb();
732 code_gen_ptr = code_gen_buffer;
733 /* XXX: flush processor icache at this point if cache flush is
734 expensive */
735 tb_flush_count++;
738 #ifdef DEBUG_TB_CHECK
740 static void tb_invalidate_check(target_ulong address)
742 TranslationBlock *tb;
743 int i;
744 address &= TARGET_PAGE_MASK;
745 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
746 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
747 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
748 address >= tb->pc + tb->size)) {
749 printf("ERROR invalidate: address=" TARGET_FMT_lx
750 " PC=%08lx size=%04x\n",
751 address, (long)tb->pc, tb->size);
757 /* verify that all the pages have correct rights for code */
758 static void tb_page_check(void)
760 TranslationBlock *tb;
761 int i, flags1, flags2;
763 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
764 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
765 flags1 = page_get_flags(tb->pc);
766 flags2 = page_get_flags(tb->pc + tb->size - 1);
767 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
768 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
769 (long)tb->pc, tb->size, flags1, flags2);
775 #endif
777 /* invalidate one TB */
778 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
779 int next_offset)
781 TranslationBlock *tb1;
782 for(;;) {
783 tb1 = *ptb;
784 if (tb1 == tb) {
785 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
786 break;
788 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
792 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
794 TranslationBlock *tb1;
795 unsigned int n1;
797 for(;;) {
798 tb1 = *ptb;
799 n1 = (long)tb1 & 3;
800 tb1 = (TranslationBlock *)((long)tb1 & ~3);
801 if (tb1 == tb) {
802 *ptb = tb1->page_next[n1];
803 break;
805 ptb = &tb1->page_next[n1];
809 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
811 TranslationBlock *tb1, **ptb;
812 unsigned int n1;
814 ptb = &tb->jmp_next[n];
815 tb1 = *ptb;
816 if (tb1) {
817 /* find tb(n) in circular list */
818 for(;;) {
819 tb1 = *ptb;
820 n1 = (long)tb1 & 3;
821 tb1 = (TranslationBlock *)((long)tb1 & ~3);
822 if (n1 == n && tb1 == tb)
823 break;
824 if (n1 == 2) {
825 ptb = &tb1->jmp_first;
826 } else {
827 ptb = &tb1->jmp_next[n1];
830 /* now we can suppress tb(n) from the list */
831 *ptb = tb->jmp_next[n];
833 tb->jmp_next[n] = NULL;
837 /* reset the jump entry 'n' of a TB so that it is not chained to
838 another TB */
839 static inline void tb_reset_jump(TranslationBlock *tb, int n)
841 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
844 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
846 CPUState *env;
847 PageDesc *p;
848 unsigned int h, n1;
849 tb_page_addr_t phys_pc;
850 TranslationBlock *tb1, *tb2;
852 /* remove the TB from the hash list */
853 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
854 h = tb_phys_hash_func(phys_pc);
855 tb_remove(&tb_phys_hash[h], tb,
856 offsetof(TranslationBlock, phys_hash_next));
858 /* remove the TB from the page list */
859 if (tb->page_addr[0] != page_addr) {
860 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
861 tb_page_remove(&p->first_tb, tb);
862 invalidate_page_bitmap(p);
864 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
865 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
866 tb_page_remove(&p->first_tb, tb);
867 invalidate_page_bitmap(p);
870 tb_invalidated_flag = 1;
872 /* remove the TB from the hash list */
873 h = tb_jmp_cache_hash_func(tb->pc);
874 for(env = first_cpu; env != NULL; env = env->next_cpu) {
875 if (env->tb_jmp_cache[h] == tb)
876 env->tb_jmp_cache[h] = NULL;
879 /* suppress this TB from the two jump lists */
880 tb_jmp_remove(tb, 0);
881 tb_jmp_remove(tb, 1);
883 /* suppress any remaining jumps to this TB */
884 tb1 = tb->jmp_first;
885 for(;;) {
886 n1 = (long)tb1 & 3;
887 if (n1 == 2)
888 break;
889 tb1 = (TranslationBlock *)((long)tb1 & ~3);
890 tb2 = tb1->jmp_next[n1];
891 tb_reset_jump(tb1, n1);
892 tb1->jmp_next[n1] = NULL;
893 tb1 = tb2;
895 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
897 tb_phys_invalidate_count++;
900 static inline void set_bits(uint8_t *tab, int start, int len)
902 int end, mask, end1;
904 end = start + len;
905 tab += start >> 3;
906 mask = 0xff << (start & 7);
907 if ((start & ~7) == (end & ~7)) {
908 if (start < end) {
909 mask &= ~(0xff << (end & 7));
910 *tab |= mask;
912 } else {
913 *tab++ |= mask;
914 start = (start + 8) & ~7;
915 end1 = end & ~7;
916 while (start < end1) {
917 *tab++ = 0xff;
918 start += 8;
920 if (start < end) {
921 mask = ~(0xff << (end & 7));
922 *tab |= mask;
927 static void build_page_bitmap(PageDesc *p)
929 int n, tb_start, tb_end;
930 TranslationBlock *tb;
932 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
934 tb = p->first_tb;
935 while (tb != NULL) {
936 n = (long)tb & 3;
937 tb = (TranslationBlock *)((long)tb & ~3);
938 /* NOTE: this is subtle as a TB may span two physical pages */
939 if (n == 0) {
940 /* NOTE: tb_end may be after the end of the page, but
941 it is not a problem */
942 tb_start = tb->pc & ~TARGET_PAGE_MASK;
943 tb_end = tb_start + tb->size;
944 if (tb_end > TARGET_PAGE_SIZE)
945 tb_end = TARGET_PAGE_SIZE;
946 } else {
947 tb_start = 0;
948 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
950 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
951 tb = tb->page_next[n];
955 TranslationBlock *tb_gen_code(CPUState *env,
956 target_ulong pc, target_ulong cs_base,
957 int flags, int cflags)
959 TranslationBlock *tb;
960 uint8_t *tc_ptr;
961 tb_page_addr_t phys_pc, phys_page2;
962 target_ulong virt_page2;
963 int code_gen_size;
965 phys_pc = get_page_addr_code(env, pc);
966 tb = tb_alloc(pc);
967 if (!tb) {
968 /* flush must be done */
969 tb_flush(env);
970 /* cannot fail at this point */
971 tb = tb_alloc(pc);
972 /* Don't forget to invalidate previous TB info. */
973 tb_invalidated_flag = 1;
975 tc_ptr = code_gen_ptr;
976 tb->tc_ptr = tc_ptr;
977 tb->cs_base = cs_base;
978 tb->flags = flags;
979 tb->cflags = cflags;
980 cpu_gen_code(env, tb, &code_gen_size);
981 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
983 /* check next page if needed */
984 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
985 phys_page2 = -1;
986 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
987 phys_page2 = get_page_addr_code(env, virt_page2);
989 tb_link_page(tb, phys_pc, phys_page2);
990 return tb;
993 /* invalidate all TBs which intersect with the target physical page
994 starting in range [start;end[. NOTE: start and end must refer to
995 the same physical page. 'is_cpu_write_access' should be true if called
996 from a real cpu write access: the virtual CPU will exit the current
997 TB if code is modified inside this TB. */
998 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
999 int is_cpu_write_access)
1001 TranslationBlock *tb, *tb_next, *saved_tb;
1002 CPUState *env = cpu_single_env;
1003 tb_page_addr_t tb_start, tb_end;
1004 PageDesc *p;
1005 int n;
1006 #ifdef TARGET_HAS_PRECISE_SMC
1007 int current_tb_not_found = is_cpu_write_access;
1008 TranslationBlock *current_tb = NULL;
1009 int current_tb_modified = 0;
1010 target_ulong current_pc = 0;
1011 target_ulong current_cs_base = 0;
1012 int current_flags = 0;
1013 #endif /* TARGET_HAS_PRECISE_SMC */
1015 p = page_find(start >> TARGET_PAGE_BITS);
1016 if (!p)
1017 return;
1018 if (!p->code_bitmap &&
1019 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1020 is_cpu_write_access) {
1021 /* build code bitmap */
1022 build_page_bitmap(p);
1025 /* we remove all the TBs in the range [start, end[ */
1026 /* XXX: see if in some cases it could be faster to invalidate all the code */
1027 tb = p->first_tb;
1028 while (tb != NULL) {
1029 n = (long)tb & 3;
1030 tb = (TranslationBlock *)((long)tb & ~3);
1031 tb_next = tb->page_next[n];
1032 /* NOTE: this is subtle as a TB may span two physical pages */
1033 if (n == 0) {
1034 /* NOTE: tb_end may be after the end of the page, but
1035 it is not a problem */
1036 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1037 tb_end = tb_start + tb->size;
1038 } else {
1039 tb_start = tb->page_addr[1];
1040 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1042 if (!(tb_end <= start || tb_start >= end)) {
1043 #ifdef TARGET_HAS_PRECISE_SMC
1044 if (current_tb_not_found) {
1045 current_tb_not_found = 0;
1046 current_tb = NULL;
1047 if (env->mem_io_pc) {
1048 /* now we have a real cpu fault */
1049 current_tb = tb_find_pc(env->mem_io_pc);
1052 if (current_tb == tb &&
1053 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1054 /* If we are modifying the current TB, we must stop
1055 its execution. We could be more precise by checking
1056 that the modification is after the current PC, but it
1057 would require a specialized function to partially
1058 restore the CPU state */
1060 current_tb_modified = 1;
1061 cpu_restore_state(current_tb, env,
1062 env->mem_io_pc, NULL);
1063 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1064 &current_flags);
1066 #endif /* TARGET_HAS_PRECISE_SMC */
1067 /* we need to do that to handle the case where a signal
1068 occurs while doing tb_phys_invalidate() */
1069 saved_tb = NULL;
1070 if (env) {
1071 saved_tb = env->current_tb;
1072 env->current_tb = NULL;
1074 tb_phys_invalidate(tb, -1);
1075 if (env) {
1076 env->current_tb = saved_tb;
1077 if (env->interrupt_request && env->current_tb)
1078 cpu_interrupt(env, env->interrupt_request);
1081 tb = tb_next;
1083 #if !defined(CONFIG_USER_ONLY)
1084 /* if no code remaining, no need to continue to use slow writes */
1085 if (!p->first_tb) {
1086 invalidate_page_bitmap(p);
1087 if (is_cpu_write_access) {
1088 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1091 #endif
1092 #ifdef TARGET_HAS_PRECISE_SMC
1093 if (current_tb_modified) {
1094 /* we generate a block containing just the instruction
1095 modifying the memory. It will ensure that it cannot modify
1096 itself */
1097 env->current_tb = NULL;
1098 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1099 cpu_resume_from_signal(env, NULL);
1101 #endif
1104 /* len must be <= 8 and start must be a multiple of len */
1105 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1107 PageDesc *p;
1108 int offset, b;
1109 #if 0
1110 if (1) {
1111 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1112 cpu_single_env->mem_io_vaddr, len,
1113 cpu_single_env->eip,
1114 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1116 #endif
1117 p = page_find(start >> TARGET_PAGE_BITS);
1118 if (!p)
1119 return;
1120 if (p->code_bitmap) {
1121 offset = start & ~TARGET_PAGE_MASK;
1122 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1123 if (b & ((1 << len) - 1))
1124 goto do_invalidate;
1125 } else {
1126 do_invalidate:
1127 tb_invalidate_phys_page_range(start, start + len, 1);
1131 #if !defined(CONFIG_SOFTMMU)
1132 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1133 unsigned long pc, void *puc)
1135 TranslationBlock *tb;
1136 PageDesc *p;
1137 int n;
1138 #ifdef TARGET_HAS_PRECISE_SMC
1139 TranslationBlock *current_tb = NULL;
1140 CPUState *env = cpu_single_env;
1141 int current_tb_modified = 0;
1142 target_ulong current_pc = 0;
1143 target_ulong current_cs_base = 0;
1144 int current_flags = 0;
1145 #endif
1147 addr &= TARGET_PAGE_MASK;
1148 p = page_find(addr >> TARGET_PAGE_BITS);
1149 if (!p)
1150 return;
1151 tb = p->first_tb;
1152 #ifdef TARGET_HAS_PRECISE_SMC
1153 if (tb && pc != 0) {
1154 current_tb = tb_find_pc(pc);
1156 #endif
1157 while (tb != NULL) {
1158 n = (long)tb & 3;
1159 tb = (TranslationBlock *)((long)tb & ~3);
1160 #ifdef TARGET_HAS_PRECISE_SMC
1161 if (current_tb == tb &&
1162 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1163 /* If we are modifying the current TB, we must stop
1164 its execution. We could be more precise by checking
1165 that the modification is after the current PC, but it
1166 would require a specialized function to partially
1167 restore the CPU state */
1169 current_tb_modified = 1;
1170 cpu_restore_state(current_tb, env, pc, puc);
1171 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1172 &current_flags);
1174 #endif /* TARGET_HAS_PRECISE_SMC */
1175 tb_phys_invalidate(tb, addr);
1176 tb = tb->page_next[n];
1178 p->first_tb = NULL;
1179 #ifdef TARGET_HAS_PRECISE_SMC
1180 if (current_tb_modified) {
1181 /* we generate a block containing just the instruction
1182 modifying the memory. It will ensure that it cannot modify
1183 itself */
1184 env->current_tb = NULL;
1185 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1186 cpu_resume_from_signal(env, puc);
1188 #endif
1190 #endif
1192 /* add the tb in the target page and protect it if necessary */
1193 static inline void tb_alloc_page(TranslationBlock *tb,
1194 unsigned int n, tb_page_addr_t page_addr)
1196 PageDesc *p;
1197 TranslationBlock *last_first_tb;
1199 tb->page_addr[n] = page_addr;
1200 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1201 tb->page_next[n] = p->first_tb;
1202 last_first_tb = p->first_tb;
1203 p->first_tb = (TranslationBlock *)((long)tb | n);
1204 invalidate_page_bitmap(p);
1206 #if defined(TARGET_HAS_SMC) || 1
1208 #if defined(CONFIG_USER_ONLY)
1209 if (p->flags & PAGE_WRITE) {
1210 target_ulong addr;
1211 PageDesc *p2;
1212 int prot;
1214 /* force the host page as non writable (writes will have a
1215 page fault + mprotect overhead) */
1216 page_addr &= qemu_host_page_mask;
1217 prot = 0;
1218 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1219 addr += TARGET_PAGE_SIZE) {
1221 p2 = page_find (addr >> TARGET_PAGE_BITS);
1222 if (!p2)
1223 continue;
1224 prot |= p2->flags;
1225 p2->flags &= ~PAGE_WRITE;
1227 mprotect(g2h(page_addr), qemu_host_page_size,
1228 (prot & PAGE_BITS) & ~PAGE_WRITE);
1229 #ifdef DEBUG_TB_INVALIDATE
1230 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1231 page_addr);
1232 #endif
1234 #else
1235 /* if some code is already present, then the pages are already
1236 protected. So we handle the case where only the first TB is
1237 allocated in a physical page */
1238 if (!last_first_tb) {
1239 tlb_protect_code(page_addr);
1241 #endif
1243 #endif /* TARGET_HAS_SMC */
1246 /* Allocate a new translation block. Flush the translation buffer if
1247 too many translation blocks or too much generated code. */
1248 TranslationBlock *tb_alloc(target_ulong pc)
1250 TranslationBlock *tb;
1252 if (nb_tbs >= code_gen_max_blocks ||
1253 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1254 return NULL;
1255 tb = &tbs[nb_tbs++];
1256 tb->pc = pc;
1257 tb->cflags = 0;
1258 return tb;
1261 void tb_free(TranslationBlock *tb)
1263 /* In practice this is mostly used for single use temporary TB
1264 Ignore the hard cases and just back up if this TB happens to
1265 be the last one generated. */
1266 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1267 code_gen_ptr = tb->tc_ptr;
1268 nb_tbs--;
1272 /* add a new TB and link it to the physical page tables. phys_page2 is
1273 (-1) to indicate that only one page contains the TB. */
1274 void tb_link_page(TranslationBlock *tb,
1275 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1277 unsigned int h;
1278 TranslationBlock **ptb;
1280 /* Grab the mmap lock to stop another thread invalidating this TB
1281 before we are done. */
1282 mmap_lock();
1283 /* add in the physical hash table */
1284 h = tb_phys_hash_func(phys_pc);
1285 ptb = &tb_phys_hash[h];
1286 tb->phys_hash_next = *ptb;
1287 *ptb = tb;
1289 /* add in the page list */
1290 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1291 if (phys_page2 != -1)
1292 tb_alloc_page(tb, 1, phys_page2);
1293 else
1294 tb->page_addr[1] = -1;
1296 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1297 tb->jmp_next[0] = NULL;
1298 tb->jmp_next[1] = NULL;
1300 /* init original jump addresses */
1301 if (tb->tb_next_offset[0] != 0xffff)
1302 tb_reset_jump(tb, 0);
1303 if (tb->tb_next_offset[1] != 0xffff)
1304 tb_reset_jump(tb, 1);
1306 #ifdef DEBUG_TB_CHECK
1307 tb_page_check();
1308 #endif
1309 mmap_unlock();
1312 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1313 tb[1].tc_ptr. Return NULL if not found */
1314 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1316 int m_min, m_max, m;
1317 unsigned long v;
1318 TranslationBlock *tb;
1320 if (nb_tbs <= 0)
1321 return NULL;
1322 if (tc_ptr < (unsigned long)code_gen_buffer ||
1323 tc_ptr >= (unsigned long)code_gen_ptr)
1324 return NULL;
1325 /* binary search (cf Knuth) */
1326 m_min = 0;
1327 m_max = nb_tbs - 1;
1328 while (m_min <= m_max) {
1329 m = (m_min + m_max) >> 1;
1330 tb = &tbs[m];
1331 v = (unsigned long)tb->tc_ptr;
1332 if (v == tc_ptr)
1333 return tb;
1334 else if (tc_ptr < v) {
1335 m_max = m - 1;
1336 } else {
1337 m_min = m + 1;
1340 return &tbs[m_max];
1343 static void tb_reset_jump_recursive(TranslationBlock *tb);
1345 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1347 TranslationBlock *tb1, *tb_next, **ptb;
1348 unsigned int n1;
1350 tb1 = tb->jmp_next[n];
1351 if (tb1 != NULL) {
1352 /* find head of list */
1353 for(;;) {
1354 n1 = (long)tb1 & 3;
1355 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1356 if (n1 == 2)
1357 break;
1358 tb1 = tb1->jmp_next[n1];
1360 /* we are now sure now that tb jumps to tb1 */
1361 tb_next = tb1;
1363 /* remove tb from the jmp_first list */
1364 ptb = &tb_next->jmp_first;
1365 for(;;) {
1366 tb1 = *ptb;
1367 n1 = (long)tb1 & 3;
1368 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1369 if (n1 == n && tb1 == tb)
1370 break;
1371 ptb = &tb1->jmp_next[n1];
1373 *ptb = tb->jmp_next[n];
1374 tb->jmp_next[n] = NULL;
1376 /* suppress the jump to next tb in generated code */
1377 tb_reset_jump(tb, n);
1379 /* suppress jumps in the tb on which we could have jumped */
1380 tb_reset_jump_recursive(tb_next);
1384 static void tb_reset_jump_recursive(TranslationBlock *tb)
1386 tb_reset_jump_recursive2(tb, 0);
1387 tb_reset_jump_recursive2(tb, 1);
1390 #if defined(TARGET_HAS_ICE)
1391 #if defined(CONFIG_USER_ONLY)
1392 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1394 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1396 #else
1397 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1399 target_phys_addr_t addr;
1400 target_ulong pd;
1401 ram_addr_t ram_addr;
1402 PhysPageDesc *p;
1404 addr = cpu_get_phys_page_debug(env, pc);
1405 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1406 if (!p) {
1407 pd = IO_MEM_UNASSIGNED;
1408 } else {
1409 pd = p->phys_offset;
1411 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1412 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1414 #endif
1415 #endif /* TARGET_HAS_ICE */
1417 #if defined(CONFIG_USER_ONLY)
1418 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1423 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1424 int flags, CPUWatchpoint **watchpoint)
1426 return -ENOSYS;
1428 #else
1429 /* Add a watchpoint. */
1430 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1431 int flags, CPUWatchpoint **watchpoint)
1433 target_ulong len_mask = ~(len - 1);
1434 CPUWatchpoint *wp;
1436 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1437 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1438 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1439 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1440 return -EINVAL;
1442 wp = qemu_malloc(sizeof(*wp));
1444 wp->vaddr = addr;
1445 wp->len_mask = len_mask;
1446 wp->flags = flags;
1448 /* keep all GDB-injected watchpoints in front */
1449 if (flags & BP_GDB)
1450 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1451 else
1452 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1454 tlb_flush_page(env, addr);
1456 if (watchpoint)
1457 *watchpoint = wp;
1458 return 0;
1461 /* Remove a specific watchpoint. */
1462 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1463 int flags)
1465 target_ulong len_mask = ~(len - 1);
1466 CPUWatchpoint *wp;
1468 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1469 if (addr == wp->vaddr && len_mask == wp->len_mask
1470 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1471 cpu_watchpoint_remove_by_ref(env, wp);
1472 return 0;
1475 return -ENOENT;
1478 /* Remove a specific watchpoint by reference. */
1479 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1481 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1483 tlb_flush_page(env, watchpoint->vaddr);
1485 qemu_free(watchpoint);
1488 /* Remove all matching watchpoints. */
1489 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1491 CPUWatchpoint *wp, *next;
1493 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1494 if (wp->flags & mask)
1495 cpu_watchpoint_remove_by_ref(env, wp);
1498 #endif
1500 /* Add a breakpoint. */
1501 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1502 CPUBreakpoint **breakpoint)
1504 #if defined(TARGET_HAS_ICE)
1505 CPUBreakpoint *bp;
1507 bp = qemu_malloc(sizeof(*bp));
1509 bp->pc = pc;
1510 bp->flags = flags;
1512 /* keep all GDB-injected breakpoints in front */
1513 if (flags & BP_GDB)
1514 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1515 else
1516 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1518 breakpoint_invalidate(env, pc);
1520 if (breakpoint)
1521 *breakpoint = bp;
1522 return 0;
1523 #else
1524 return -ENOSYS;
1525 #endif
1528 /* Remove a specific breakpoint. */
1529 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1531 #if defined(TARGET_HAS_ICE)
1532 CPUBreakpoint *bp;
1534 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1535 if (bp->pc == pc && bp->flags == flags) {
1536 cpu_breakpoint_remove_by_ref(env, bp);
1537 return 0;
1540 return -ENOENT;
1541 #else
1542 return -ENOSYS;
1543 #endif
1546 /* Remove a specific breakpoint by reference. */
1547 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1549 #if defined(TARGET_HAS_ICE)
1550 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1552 breakpoint_invalidate(env, breakpoint->pc);
1554 qemu_free(breakpoint);
1555 #endif
1558 /* Remove all matching breakpoints. */
1559 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1561 #if defined(TARGET_HAS_ICE)
1562 CPUBreakpoint *bp, *next;
1564 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1565 if (bp->flags & mask)
1566 cpu_breakpoint_remove_by_ref(env, bp);
1568 #endif
1571 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1572 CPU loop after each instruction */
1573 void cpu_single_step(CPUState *env, int enabled)
1575 #if defined(TARGET_HAS_ICE)
1576 if (env->singlestep_enabled != enabled) {
1577 env->singlestep_enabled = enabled;
1578 if (kvm_enabled())
1579 kvm_update_guest_debug(env, 0);
1580 else {
1581 /* must flush all the translated code to avoid inconsistencies */
1582 /* XXX: only flush what is necessary */
1583 tb_flush(env);
1586 #endif
1589 /* enable or disable low levels log */
1590 void cpu_set_log(int log_flags)
1592 loglevel = log_flags;
1593 if (loglevel && !logfile) {
1594 logfile = fopen(logfilename, log_append ? "a" : "w");
1595 if (!logfile) {
1596 perror(logfilename);
1597 _exit(1);
1599 #if !defined(CONFIG_SOFTMMU)
1600 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1602 static char logfile_buf[4096];
1603 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1605 #elif !defined(_WIN32)
1606 /* Win32 doesn't support line-buffering and requires size >= 2 */
1607 setvbuf(logfile, NULL, _IOLBF, 0);
1608 #endif
1609 log_append = 1;
1611 if (!loglevel && logfile) {
1612 fclose(logfile);
1613 logfile = NULL;
1617 void cpu_set_log_filename(const char *filename)
1619 logfilename = strdup(filename);
1620 if (logfile) {
1621 fclose(logfile);
1622 logfile = NULL;
1624 cpu_set_log(loglevel);
1627 static void cpu_unlink_tb(CPUState *env)
1629 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1630 problem and hope the cpu will stop of its own accord. For userspace
1631 emulation this often isn't actually as bad as it sounds. Often
1632 signals are used primarily to interrupt blocking syscalls. */
1633 TranslationBlock *tb;
1634 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1636 spin_lock(&interrupt_lock);
1637 tb = env->current_tb;
1638 /* if the cpu is currently executing code, we must unlink it and
1639 all the potentially executing TB */
1640 if (tb) {
1641 env->current_tb = NULL;
1642 tb_reset_jump_recursive(tb);
1644 spin_unlock(&interrupt_lock);
1647 /* mask must never be zero, except for A20 change call */
1648 void cpu_interrupt(CPUState *env, int mask)
1650 int old_mask;
1652 old_mask = env->interrupt_request;
1653 env->interrupt_request |= mask;
1654 if (kvm_enabled() && !kvm_irqchip_in_kernel())
1655 kvm_update_interrupt_request(env);
1657 #ifndef CONFIG_USER_ONLY
1659 * If called from iothread context, wake the target cpu in
1660 * case its halted.
1662 if (!qemu_cpu_self(env)) {
1663 qemu_cpu_kick(env);
1664 return;
1666 #endif
1668 if (use_icount) {
1669 env->icount_decr.u16.high = 0xffff;
1670 #ifndef CONFIG_USER_ONLY
1671 if (!can_do_io(env)
1672 && (mask & ~old_mask) != 0) {
1673 cpu_abort(env, "Raised interrupt while not in I/O function");
1675 #endif
1676 } else {
1677 cpu_unlink_tb(env);
1681 void cpu_reset_interrupt(CPUState *env, int mask)
1683 env->interrupt_request &= ~mask;
1686 void cpu_exit(CPUState *env)
1688 env->exit_request = 1;
1689 cpu_unlink_tb(env);
1692 const CPULogItem cpu_log_items[] = {
1693 { CPU_LOG_TB_OUT_ASM, "out_asm",
1694 "show generated host assembly code for each compiled TB" },
1695 { CPU_LOG_TB_IN_ASM, "in_asm",
1696 "show target assembly code for each compiled TB" },
1697 { CPU_LOG_TB_OP, "op",
1698 "show micro ops for each compiled TB" },
1699 { CPU_LOG_TB_OP_OPT, "op_opt",
1700 "show micro ops "
1701 #ifdef TARGET_I386
1702 "before eflags optimization and "
1703 #endif
1704 "after liveness analysis" },
1705 { CPU_LOG_INT, "int",
1706 "show interrupts/exceptions in short format" },
1707 { CPU_LOG_EXEC, "exec",
1708 "show trace before each executed TB (lots of logs)" },
1709 { CPU_LOG_TB_CPU, "cpu",
1710 "show CPU state before block translation" },
1711 #ifdef TARGET_I386
1712 { CPU_LOG_PCALL, "pcall",
1713 "show protected mode far calls/returns/exceptions" },
1714 { CPU_LOG_RESET, "cpu_reset",
1715 "show CPU state before CPU resets" },
1716 #endif
1717 #ifdef DEBUG_IOPORT
1718 { CPU_LOG_IOPORT, "ioport",
1719 "show all i/o ports accesses" },
1720 #endif
1721 { 0, NULL, NULL },
1724 #ifndef CONFIG_USER_ONLY
1725 static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1726 = QLIST_HEAD_INITIALIZER(memory_client_list);
1728 static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1729 ram_addr_t size,
1730 ram_addr_t phys_offset)
1732 CPUPhysMemoryClient *client;
1733 QLIST_FOREACH(client, &memory_client_list, list) {
1734 client->set_memory(client, start_addr, size, phys_offset);
1738 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1739 target_phys_addr_t end)
1741 CPUPhysMemoryClient *client;
1742 QLIST_FOREACH(client, &memory_client_list, list) {
1743 int r = client->sync_dirty_bitmap(client, start, end);
1744 if (r < 0)
1745 return r;
1747 return 0;
1750 static int cpu_notify_migration_log(int enable)
1752 CPUPhysMemoryClient *client;
1753 QLIST_FOREACH(client, &memory_client_list, list) {
1754 int r = client->migration_log(client, enable);
1755 if (r < 0)
1756 return r;
1758 return 0;
1761 static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1762 int level, void **lp)
1764 int i;
1766 if (*lp == NULL) {
1767 return;
1769 if (level == 0) {
1770 PhysPageDesc *pd = *lp;
1771 for (i = 0; i < L2_SIZE; ++i) {
1772 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1773 client->set_memory(client, pd[i].region_offset,
1774 TARGET_PAGE_SIZE, pd[i].phys_offset);
1777 } else {
1778 void **pp = *lp;
1779 for (i = 0; i < L2_SIZE; ++i) {
1780 phys_page_for_each_1(client, level - 1, pp + i);
1785 static void phys_page_for_each(CPUPhysMemoryClient *client)
1787 int i;
1788 for (i = 0; i < P_L1_SIZE; ++i) {
1789 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1790 l1_phys_map + 1);
1794 void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1796 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1797 phys_page_for_each(client);
1800 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1802 QLIST_REMOVE(client, list);
1804 #endif
1806 static int cmp1(const char *s1, int n, const char *s2)
1808 if (strlen(s2) != n)
1809 return 0;
1810 return memcmp(s1, s2, n) == 0;
1813 /* takes a comma separated list of log masks. Return 0 if error. */
1814 int cpu_str_to_log_mask(const char *str)
1816 const CPULogItem *item;
1817 int mask;
1818 const char *p, *p1;
1820 p = str;
1821 mask = 0;
1822 for(;;) {
1823 p1 = strchr(p, ',');
1824 if (!p1)
1825 p1 = p + strlen(p);
1826 if(cmp1(p,p1-p,"all")) {
1827 for(item = cpu_log_items; item->mask != 0; item++) {
1828 mask |= item->mask;
1830 } else {
1831 for(item = cpu_log_items; item->mask != 0; item++) {
1832 if (cmp1(p, p1 - p, item->name))
1833 goto found;
1835 return 0;
1837 found:
1838 mask |= item->mask;
1839 if (*p1 != ',')
1840 break;
1841 p = p1 + 1;
1843 return mask;
1846 void cpu_abort(CPUState *env, const char *fmt, ...)
1848 va_list ap;
1849 va_list ap2;
1851 va_start(ap, fmt);
1852 va_copy(ap2, ap);
1853 fprintf(stderr, "qemu: fatal: ");
1854 vfprintf(stderr, fmt, ap);
1855 fprintf(stderr, "\n");
1856 #ifdef TARGET_I386
1857 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1858 #else
1859 cpu_dump_state(env, stderr, fprintf, 0);
1860 #endif
1861 if (qemu_log_enabled()) {
1862 qemu_log("qemu: fatal: ");
1863 qemu_log_vprintf(fmt, ap2);
1864 qemu_log("\n");
1865 #ifdef TARGET_I386
1866 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1867 #else
1868 log_cpu_state(env, 0);
1869 #endif
1870 qemu_log_flush();
1871 qemu_log_close();
1873 va_end(ap2);
1874 va_end(ap);
1875 #if defined(CONFIG_USER_ONLY)
1877 struct sigaction act;
1878 sigfillset(&act.sa_mask);
1879 act.sa_handler = SIG_DFL;
1880 sigaction(SIGABRT, &act, NULL);
1882 #endif
1883 abort();
1886 CPUState *cpu_copy(CPUState *env)
1888 CPUState *new_env = cpu_init(env->cpu_model_str);
1889 CPUState *next_cpu = new_env->next_cpu;
1890 int cpu_index = new_env->cpu_index;
1891 #if defined(TARGET_HAS_ICE)
1892 CPUBreakpoint *bp;
1893 CPUWatchpoint *wp;
1894 #endif
1896 memcpy(new_env, env, sizeof(CPUState));
1898 /* Preserve chaining and index. */
1899 new_env->next_cpu = next_cpu;
1900 new_env->cpu_index = cpu_index;
1902 /* Clone all break/watchpoints.
1903 Note: Once we support ptrace with hw-debug register access, make sure
1904 BP_CPU break/watchpoints are handled correctly on clone. */
1905 QTAILQ_INIT(&env->breakpoints);
1906 QTAILQ_INIT(&env->watchpoints);
1907 #if defined(TARGET_HAS_ICE)
1908 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1909 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1911 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1912 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1913 wp->flags, NULL);
1915 #endif
1917 return new_env;
1920 #if !defined(CONFIG_USER_ONLY)
1922 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1924 unsigned int i;
1926 /* Discard jump cache entries for any tb which might potentially
1927 overlap the flushed page. */
1928 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1929 memset (&env->tb_jmp_cache[i], 0,
1930 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1932 i = tb_jmp_cache_hash_page(addr);
1933 memset (&env->tb_jmp_cache[i], 0,
1934 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1937 static CPUTLBEntry s_cputlb_empty_entry = {
1938 .addr_read = -1,
1939 .addr_write = -1,
1940 .addr_code = -1,
1941 .addend = -1,
1944 /* NOTE: if flush_global is true, also flush global entries (not
1945 implemented yet) */
1946 void tlb_flush(CPUState *env, int flush_global)
1948 int i;
1950 #if defined(DEBUG_TLB)
1951 printf("tlb_flush:\n");
1952 #endif
1953 /* must reset current TB so that interrupts cannot modify the
1954 links while we are modifying them */
1955 env->current_tb = NULL;
1957 for(i = 0; i < CPU_TLB_SIZE; i++) {
1958 int mmu_idx;
1959 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1960 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1964 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1966 env->tlb_flush_addr = -1;
1967 env->tlb_flush_mask = 0;
1968 tlb_flush_count++;
1971 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1973 if (addr == (tlb_entry->addr_read &
1974 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1975 addr == (tlb_entry->addr_write &
1976 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1977 addr == (tlb_entry->addr_code &
1978 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1979 *tlb_entry = s_cputlb_empty_entry;
1983 void tlb_flush_page(CPUState *env, target_ulong addr)
1985 int i;
1986 int mmu_idx;
1988 #if defined(DEBUG_TLB)
1989 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1990 #endif
1991 /* Check if we need to flush due to large pages. */
1992 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1993 #if defined(DEBUG_TLB)
1994 printf("tlb_flush_page: forced full flush ("
1995 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1996 env->tlb_flush_addr, env->tlb_flush_mask);
1997 #endif
1998 tlb_flush(env, 1);
1999 return;
2001 /* must reset current TB so that interrupts cannot modify the
2002 links while we are modifying them */
2003 env->current_tb = NULL;
2005 addr &= TARGET_PAGE_MASK;
2006 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2007 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2008 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2010 tlb_flush_jmp_cache(env, addr);
2013 /* update the TLBs so that writes to code in the virtual page 'addr'
2014 can be detected */
2015 static void tlb_protect_code(ram_addr_t ram_addr)
2017 cpu_physical_memory_reset_dirty(ram_addr,
2018 ram_addr + TARGET_PAGE_SIZE,
2019 CODE_DIRTY_FLAG);
2022 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2023 tested for self modifying code */
2024 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2025 target_ulong vaddr)
2027 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2030 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2031 unsigned long start, unsigned long length)
2033 unsigned long addr;
2034 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2035 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2036 if ((addr - start) < length) {
2037 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2042 /* Note: start and end must be within the same ram block. */
2043 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2044 int dirty_flags)
2046 CPUState *env;
2047 unsigned long length, start1;
2048 int i;
2050 start &= TARGET_PAGE_MASK;
2051 end = TARGET_PAGE_ALIGN(end);
2053 length = end - start;
2054 if (length == 0)
2055 return;
2056 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2058 /* we modify the TLB cache so that the dirty bit will be set again
2059 when accessing the range */
2060 start1 = (unsigned long)qemu_get_ram_ptr(start);
2061 /* Chek that we don't span multiple blocks - this breaks the
2062 address comparisons below. */
2063 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2064 != (end - 1) - start) {
2065 abort();
2068 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2069 int mmu_idx;
2070 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2071 for(i = 0; i < CPU_TLB_SIZE; i++)
2072 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2073 start1, length);
2078 int cpu_physical_memory_set_dirty_tracking(int enable)
2080 int ret = 0;
2081 in_migration = enable;
2082 ret = cpu_notify_migration_log(!!enable);
2083 return ret;
2086 int cpu_physical_memory_get_dirty_tracking(void)
2088 return in_migration;
2091 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2092 target_phys_addr_t end_addr)
2094 int ret;
2096 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2097 return ret;
2100 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2102 ram_addr_t ram_addr;
2103 void *p;
2105 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2106 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2107 + tlb_entry->addend);
2108 ram_addr = qemu_ram_addr_from_host(p);
2109 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2110 tlb_entry->addr_write |= TLB_NOTDIRTY;
2115 /* update the TLB according to the current state of the dirty bits */
2116 void cpu_tlb_update_dirty(CPUState *env)
2118 int i;
2119 int mmu_idx;
2120 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2121 for(i = 0; i < CPU_TLB_SIZE; i++)
2122 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2126 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2128 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2129 tlb_entry->addr_write = vaddr;
2132 /* update the TLB corresponding to virtual page vaddr
2133 so that it is no longer dirty */
2134 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2136 int i;
2137 int mmu_idx;
2139 vaddr &= TARGET_PAGE_MASK;
2140 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2141 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2142 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2145 /* Our TLB does not support large pages, so remember the area covered by
2146 large pages and trigger a full TLB flush if these are invalidated. */
2147 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2148 target_ulong size)
2150 target_ulong mask = ~(size - 1);
2152 if (env->tlb_flush_addr == (target_ulong)-1) {
2153 env->tlb_flush_addr = vaddr & mask;
2154 env->tlb_flush_mask = mask;
2155 return;
2157 /* Extend the existing region to include the new page.
2158 This is a compromise between unnecessary flushes and the cost
2159 of maintaining a full variable size TLB. */
2160 mask &= env->tlb_flush_mask;
2161 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2162 mask <<= 1;
2164 env->tlb_flush_addr &= mask;
2165 env->tlb_flush_mask = mask;
2168 /* Add a new TLB entry. At most one entry for a given virtual address
2169 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2170 supplied size is only used by tlb_flush_page. */
2171 void tlb_set_page(CPUState *env, target_ulong vaddr,
2172 target_phys_addr_t paddr, int prot,
2173 int mmu_idx, target_ulong size)
2175 PhysPageDesc *p;
2176 unsigned long pd;
2177 unsigned int index;
2178 target_ulong address;
2179 target_ulong code_address;
2180 unsigned long addend;
2181 CPUTLBEntry *te;
2182 CPUWatchpoint *wp;
2183 target_phys_addr_t iotlb;
2185 assert(size >= TARGET_PAGE_SIZE);
2186 if (size != TARGET_PAGE_SIZE) {
2187 tlb_add_large_page(env, vaddr, size);
2189 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2190 if (!p) {
2191 pd = IO_MEM_UNASSIGNED;
2192 } else {
2193 pd = p->phys_offset;
2195 #if defined(DEBUG_TLB)
2196 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2197 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2198 #endif
2200 address = vaddr;
2201 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2202 /* IO memory case (romd handled later) */
2203 address |= TLB_MMIO;
2205 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2206 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2207 /* Normal RAM. */
2208 iotlb = pd & TARGET_PAGE_MASK;
2209 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2210 iotlb |= IO_MEM_NOTDIRTY;
2211 else
2212 iotlb |= IO_MEM_ROM;
2213 } else {
2214 /* IO handlers are currently passed a physical address.
2215 It would be nice to pass an offset from the base address
2216 of that region. This would avoid having to special case RAM,
2217 and avoid full address decoding in every device.
2218 We can't use the high bits of pd for this because
2219 IO_MEM_ROMD uses these as a ram address. */
2220 iotlb = (pd & ~TARGET_PAGE_MASK);
2221 if (p) {
2222 iotlb += p->region_offset;
2223 } else {
2224 iotlb += paddr;
2228 code_address = address;
2229 /* Make accesses to pages with watchpoints go via the
2230 watchpoint trap routines. */
2231 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2232 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2233 iotlb = io_mem_watch + paddr;
2234 /* TODO: The memory case can be optimized by not trapping
2235 reads of pages with a write breakpoint. */
2236 address |= TLB_MMIO;
2240 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2241 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2242 te = &env->tlb_table[mmu_idx][index];
2243 te->addend = addend - vaddr;
2244 if (prot & PAGE_READ) {
2245 te->addr_read = address;
2246 } else {
2247 te->addr_read = -1;
2250 if (prot & PAGE_EXEC) {
2251 te->addr_code = code_address;
2252 } else {
2253 te->addr_code = -1;
2255 if (prot & PAGE_WRITE) {
2256 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2257 (pd & IO_MEM_ROMD)) {
2258 /* Write access calls the I/O callback. */
2259 te->addr_write = address | TLB_MMIO;
2260 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2261 !cpu_physical_memory_is_dirty(pd)) {
2262 te->addr_write = address | TLB_NOTDIRTY;
2263 } else {
2264 te->addr_write = address;
2266 } else {
2267 te->addr_write = -1;
2271 #else
2273 void tlb_flush(CPUState *env, int flush_global)
2277 void tlb_flush_page(CPUState *env, target_ulong addr)
2282 * Walks guest process memory "regions" one by one
2283 * and calls callback function 'fn' for each region.
2286 struct walk_memory_regions_data
2288 walk_memory_regions_fn fn;
2289 void *priv;
2290 unsigned long start;
2291 int prot;
2294 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2295 abi_ulong end, int new_prot)
2297 if (data->start != -1ul) {
2298 int rc = data->fn(data->priv, data->start, end, data->prot);
2299 if (rc != 0) {
2300 return rc;
2304 data->start = (new_prot ? end : -1ul);
2305 data->prot = new_prot;
2307 return 0;
2310 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2311 abi_ulong base, int level, void **lp)
2313 abi_ulong pa;
2314 int i, rc;
2316 if (*lp == NULL) {
2317 return walk_memory_regions_end(data, base, 0);
2320 if (level == 0) {
2321 PageDesc *pd = *lp;
2322 for (i = 0; i < L2_SIZE; ++i) {
2323 int prot = pd[i].flags;
2325 pa = base | (i << TARGET_PAGE_BITS);
2326 if (prot != data->prot) {
2327 rc = walk_memory_regions_end(data, pa, prot);
2328 if (rc != 0) {
2329 return rc;
2333 } else {
2334 void **pp = *lp;
2335 for (i = 0; i < L2_SIZE; ++i) {
2336 pa = base | ((abi_ulong)i <<
2337 (TARGET_PAGE_BITS + L2_BITS * level));
2338 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2339 if (rc != 0) {
2340 return rc;
2345 return 0;
2348 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2350 struct walk_memory_regions_data data;
2351 unsigned long i;
2353 data.fn = fn;
2354 data.priv = priv;
2355 data.start = -1ul;
2356 data.prot = 0;
2358 for (i = 0; i < V_L1_SIZE; i++) {
2359 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2360 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2361 if (rc != 0) {
2362 return rc;
2366 return walk_memory_regions_end(&data, 0, 0);
2369 static int dump_region(void *priv, abi_ulong start,
2370 abi_ulong end, unsigned long prot)
2372 FILE *f = (FILE *)priv;
2374 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2375 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2376 start, end, end - start,
2377 ((prot & PAGE_READ) ? 'r' : '-'),
2378 ((prot & PAGE_WRITE) ? 'w' : '-'),
2379 ((prot & PAGE_EXEC) ? 'x' : '-'));
2381 return (0);
2384 /* dump memory mappings */
2385 void page_dump(FILE *f)
2387 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2388 "start", "end", "size", "prot");
2389 walk_memory_regions(f, dump_region);
2392 int page_get_flags(target_ulong address)
2394 PageDesc *p;
2396 p = page_find(address >> TARGET_PAGE_BITS);
2397 if (!p)
2398 return 0;
2399 return p->flags;
2402 /* Modify the flags of a page and invalidate the code if necessary.
2403 The flag PAGE_WRITE_ORG is positioned automatically depending
2404 on PAGE_WRITE. The mmap_lock should already be held. */
2405 void page_set_flags(target_ulong start, target_ulong end, int flags)
2407 target_ulong addr, len;
2409 /* This function should never be called with addresses outside the
2410 guest address space. If this assert fires, it probably indicates
2411 a missing call to h2g_valid. */
2412 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2413 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2414 #endif
2415 assert(start < end);
2417 start = start & TARGET_PAGE_MASK;
2418 end = TARGET_PAGE_ALIGN(end);
2420 if (flags & PAGE_WRITE) {
2421 flags |= PAGE_WRITE_ORG;
2424 for (addr = start, len = end - start;
2425 len != 0;
2426 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2427 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2429 /* If the write protection bit is set, then we invalidate
2430 the code inside. */
2431 if (!(p->flags & PAGE_WRITE) &&
2432 (flags & PAGE_WRITE) &&
2433 p->first_tb) {
2434 tb_invalidate_phys_page(addr, 0, NULL);
2436 p->flags = flags;
2440 int page_check_range(target_ulong start, target_ulong len, int flags)
2442 PageDesc *p;
2443 target_ulong end;
2444 target_ulong addr;
2446 /* This function should never be called with addresses outside the
2447 guest address space. If this assert fires, it probably indicates
2448 a missing call to h2g_valid. */
2449 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2450 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2451 #endif
2453 if (len == 0) {
2454 return 0;
2456 if (start + len - 1 < start) {
2457 /* We've wrapped around. */
2458 return -1;
2461 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2462 start = start & TARGET_PAGE_MASK;
2464 for (addr = start, len = end - start;
2465 len != 0;
2466 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2467 p = page_find(addr >> TARGET_PAGE_BITS);
2468 if( !p )
2469 return -1;
2470 if( !(p->flags & PAGE_VALID) )
2471 return -1;
2473 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2474 return -1;
2475 if (flags & PAGE_WRITE) {
2476 if (!(p->flags & PAGE_WRITE_ORG))
2477 return -1;
2478 /* unprotect the page if it was put read-only because it
2479 contains translated code */
2480 if (!(p->flags & PAGE_WRITE)) {
2481 if (!page_unprotect(addr, 0, NULL))
2482 return -1;
2484 return 0;
2487 return 0;
2490 /* called from signal handler: invalidate the code and unprotect the
2491 page. Return TRUE if the fault was successfully handled. */
2492 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2494 unsigned int prot;
2495 PageDesc *p;
2496 target_ulong host_start, host_end, addr;
2498 /* Technically this isn't safe inside a signal handler. However we
2499 know this only ever happens in a synchronous SEGV handler, so in
2500 practice it seems to be ok. */
2501 mmap_lock();
2503 p = page_find(address >> TARGET_PAGE_BITS);
2504 if (!p) {
2505 mmap_unlock();
2506 return 0;
2509 /* if the page was really writable, then we change its
2510 protection back to writable */
2511 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2512 host_start = address & qemu_host_page_mask;
2513 host_end = host_start + qemu_host_page_size;
2515 prot = 0;
2516 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2517 p = page_find(addr >> TARGET_PAGE_BITS);
2518 p->flags |= PAGE_WRITE;
2519 prot |= p->flags;
2521 /* and since the content will be modified, we must invalidate
2522 the corresponding translated code. */
2523 tb_invalidate_phys_page(addr, pc, puc);
2524 #ifdef DEBUG_TB_CHECK
2525 tb_invalidate_check(addr);
2526 #endif
2528 mprotect((void *)g2h(host_start), qemu_host_page_size,
2529 prot & PAGE_BITS);
2531 mmap_unlock();
2532 return 1;
2534 mmap_unlock();
2535 return 0;
2538 static inline void tlb_set_dirty(CPUState *env,
2539 unsigned long addr, target_ulong vaddr)
2542 #endif /* defined(CONFIG_USER_ONLY) */
2544 #if !defined(CONFIG_USER_ONLY)
2546 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2547 typedef struct subpage_t {
2548 target_phys_addr_t base;
2549 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2550 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2551 } subpage_t;
2553 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2554 ram_addr_t memory, ram_addr_t region_offset);
2555 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2556 ram_addr_t orig_memory,
2557 ram_addr_t region_offset);
2558 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2559 need_subpage) \
2560 do { \
2561 if (addr > start_addr) \
2562 start_addr2 = 0; \
2563 else { \
2564 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2565 if (start_addr2 > 0) \
2566 need_subpage = 1; \
2569 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2570 end_addr2 = TARGET_PAGE_SIZE - 1; \
2571 else { \
2572 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2573 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2574 need_subpage = 1; \
2576 } while (0)
2578 /* register physical memory.
2579 For RAM, 'size' must be a multiple of the target page size.
2580 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2581 io memory page. The address used when calling the IO function is
2582 the offset from the start of the region, plus region_offset. Both
2583 start_addr and region_offset are rounded down to a page boundary
2584 before calculating this offset. This should not be a problem unless
2585 the low bits of start_addr and region_offset differ. */
2586 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2587 ram_addr_t size,
2588 ram_addr_t phys_offset,
2589 ram_addr_t region_offset)
2591 target_phys_addr_t addr, end_addr;
2592 PhysPageDesc *p;
2593 CPUState *env;
2594 ram_addr_t orig_size = size;
2595 subpage_t *subpage;
2597 cpu_notify_set_memory(start_addr, size, phys_offset);
2599 if (phys_offset == IO_MEM_UNASSIGNED) {
2600 region_offset = start_addr;
2602 region_offset &= TARGET_PAGE_MASK;
2603 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2604 end_addr = start_addr + (target_phys_addr_t)size;
2605 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2606 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2607 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2608 ram_addr_t orig_memory = p->phys_offset;
2609 target_phys_addr_t start_addr2, end_addr2;
2610 int need_subpage = 0;
2612 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2613 need_subpage);
2614 if (need_subpage) {
2615 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2616 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2617 &p->phys_offset, orig_memory,
2618 p->region_offset);
2619 } else {
2620 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2621 >> IO_MEM_SHIFT];
2623 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2624 region_offset);
2625 p->region_offset = 0;
2626 } else {
2627 p->phys_offset = phys_offset;
2628 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2629 (phys_offset & IO_MEM_ROMD))
2630 phys_offset += TARGET_PAGE_SIZE;
2632 } else {
2633 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2634 p->phys_offset = phys_offset;
2635 p->region_offset = region_offset;
2636 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2637 (phys_offset & IO_MEM_ROMD)) {
2638 phys_offset += TARGET_PAGE_SIZE;
2639 } else {
2640 target_phys_addr_t start_addr2, end_addr2;
2641 int need_subpage = 0;
2643 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2644 end_addr2, need_subpage);
2646 if (need_subpage) {
2647 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2648 &p->phys_offset, IO_MEM_UNASSIGNED,
2649 addr & TARGET_PAGE_MASK);
2650 subpage_register(subpage, start_addr2, end_addr2,
2651 phys_offset, region_offset);
2652 p->region_offset = 0;
2656 region_offset += TARGET_PAGE_SIZE;
2659 /* since each CPU stores ram addresses in its TLB cache, we must
2660 reset the modified entries */
2661 /* XXX: slow ! */
2662 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2663 tlb_flush(env, 1);
2667 /* XXX: temporary until new memory mapping API */
2668 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2670 PhysPageDesc *p;
2672 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2673 if (!p)
2674 return IO_MEM_UNASSIGNED;
2675 return p->phys_offset;
2678 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2680 if (kvm_enabled())
2681 kvm_coalesce_mmio_region(addr, size);
2684 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2686 if (kvm_enabled())
2687 kvm_uncoalesce_mmio_region(addr, size);
2690 void qemu_flush_coalesced_mmio_buffer(void)
2692 if (kvm_enabled())
2693 kvm_flush_coalesced_mmio_buffer();
2696 #if defined(__linux__) && !defined(TARGET_S390X)
2698 #include <sys/vfs.h>
2700 #define HUGETLBFS_MAGIC 0x958458f6
2702 static long gethugepagesize(const char *path)
2704 struct statfs fs;
2705 int ret;
2707 do {
2708 ret = statfs(path, &fs);
2709 } while (ret != 0 && errno == EINTR);
2711 if (ret != 0) {
2712 perror(path);
2713 return 0;
2716 if (fs.f_type != HUGETLBFS_MAGIC)
2717 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2719 return fs.f_bsize;
2722 static void *file_ram_alloc(ram_addr_t memory, const char *path)
2724 char *filename;
2725 void *area;
2726 int fd;
2727 #ifdef MAP_POPULATE
2728 int flags;
2729 #endif
2730 unsigned long hpagesize;
2732 hpagesize = gethugepagesize(path);
2733 if (!hpagesize) {
2734 return NULL;
2737 if (memory < hpagesize) {
2738 return NULL;
2741 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2742 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2743 return NULL;
2746 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2747 return NULL;
2750 fd = mkstemp(filename);
2751 if (fd < 0) {
2752 perror("unable to create backing store for hugepages");
2753 free(filename);
2754 return NULL;
2756 unlink(filename);
2757 free(filename);
2759 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2762 * ftruncate is not supported by hugetlbfs in older
2763 * hosts, so don't bother bailing out on errors.
2764 * If anything goes wrong with it under other filesystems,
2765 * mmap will fail.
2767 if (ftruncate(fd, memory))
2768 perror("ftruncate");
2770 #ifdef MAP_POPULATE
2771 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2772 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2773 * to sidestep this quirk.
2775 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2776 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2777 #else
2778 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2779 #endif
2780 if (area == MAP_FAILED) {
2781 perror("file_ram_alloc: can't mmap RAM pages");
2782 close(fd);
2783 return (NULL);
2785 return area;
2787 #endif
2789 ram_addr_t qemu_ram_map(ram_addr_t size, void *host)
2791 RAMBlock *new_block;
2793 size = TARGET_PAGE_ALIGN(size);
2794 new_block = qemu_malloc(sizeof(*new_block));
2796 new_block->host = host;
2798 new_block->offset = last_ram_offset;
2799 new_block->length = size;
2801 new_block->next = ram_blocks;
2802 ram_blocks = new_block;
2804 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2805 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2806 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2807 0xff, size >> TARGET_PAGE_BITS);
2809 last_ram_offset += size;
2811 if (kvm_enabled())
2812 kvm_setup_guest_memory(new_block->host, size);
2814 return new_block->offset;
2817 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2819 RAMBlock *new_block;
2821 size = TARGET_PAGE_ALIGN(size);
2822 new_block = qemu_malloc(sizeof(*new_block));
2824 if (mem_path) {
2825 #if defined (__linux__) && !defined(TARGET_S390X)
2826 new_block->host = file_ram_alloc(size, mem_path);
2827 if (!new_block->host) {
2828 new_block->host = qemu_vmalloc(size);
2829 #ifdef MADV_MERGEABLE
2830 madvise(new_block->host, size, MADV_MERGEABLE);
2831 #endif
2833 #else
2834 fprintf(stderr, "-mem-path option unsupported\n");
2835 exit(1);
2836 #endif
2837 } else {
2838 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2839 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2840 new_block->host = mmap((void*)0x1000000, size,
2841 PROT_EXEC|PROT_READ|PROT_WRITE,
2842 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2843 #else
2844 new_block->host = qemu_vmalloc(size);
2845 #endif
2846 #ifdef MADV_MERGEABLE
2847 madvise(new_block->host, size, MADV_MERGEABLE);
2848 #endif
2850 new_block->offset = last_ram_offset;
2851 new_block->length = size;
2853 new_block->next = ram_blocks;
2854 ram_blocks = new_block;
2856 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2857 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2858 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2859 0xff, size >> TARGET_PAGE_BITS);
2861 last_ram_offset += size;
2863 if (kvm_enabled())
2864 kvm_setup_guest_memory(new_block->host, size);
2866 return new_block->offset;
2869 void qemu_ram_free(ram_addr_t addr)
2871 /* TODO: implement this. */
2874 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2875 With the exception of the softmmu code in this file, this should
2876 only be used for local memory (e.g. video ram) that the device owns,
2877 and knows it isn't going to access beyond the end of the block.
2879 It should not be used for general purpose DMA.
2880 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2882 void *qemu_get_ram_ptr(ram_addr_t addr)
2884 RAMBlock *prev;
2885 RAMBlock **prevp;
2886 RAMBlock *block;
2888 prev = NULL;
2889 prevp = &ram_blocks;
2890 block = ram_blocks;
2891 while (block && (block->offset > addr
2892 || block->offset + block->length <= addr)) {
2893 if (prev)
2894 prevp = &prev->next;
2895 prev = block;
2896 block = block->next;
2898 if (!block) {
2899 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2900 abort();
2902 /* Move this entry to to start of the list. */
2903 if (prev) {
2904 prev->next = block->next;
2905 block->next = *prevp;
2906 *prevp = block;
2908 return block->host + (addr - block->offset);
2911 int do_qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2913 RAMBlock *block;
2914 uint8_t *host = ptr;
2916 block = ram_blocks;
2917 while (block && (block->host > host
2918 || block->host + block->length <= host)) {
2919 block = block->next;
2921 if (!block)
2922 return -1;
2923 *ram_addr = block->offset + (host - block->host);
2924 return 0;
2927 /* Some of the softmmu routines need to translate from a host pointer
2928 (typically a TLB entry) back to a ram offset. */
2929 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2931 ram_addr_t ram_addr;
2933 if (do_qemu_ram_addr_from_host(ptr, &ram_addr)) {
2934 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2935 abort();
2937 return ram_addr;
2940 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2942 #ifdef DEBUG_UNASSIGNED
2943 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2944 #endif
2945 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2946 do_unassigned_access(addr, 0, 0, 0, 1);
2947 #endif
2948 return 0;
2951 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2953 #ifdef DEBUG_UNASSIGNED
2954 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2955 #endif
2956 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2957 do_unassigned_access(addr, 0, 0, 0, 2);
2958 #endif
2959 return 0;
2962 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2964 #ifdef DEBUG_UNASSIGNED
2965 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2966 #endif
2967 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2968 do_unassigned_access(addr, 0, 0, 0, 4);
2969 #endif
2970 return 0;
2973 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2975 #ifdef DEBUG_UNASSIGNED
2976 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2977 #endif
2978 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2979 do_unassigned_access(addr, 1, 0, 0, 1);
2980 #endif
2983 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2985 #ifdef DEBUG_UNASSIGNED
2986 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2987 #endif
2988 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2989 do_unassigned_access(addr, 1, 0, 0, 2);
2990 #endif
2993 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2995 #ifdef DEBUG_UNASSIGNED
2996 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2997 #endif
2998 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2999 do_unassigned_access(addr, 1, 0, 0, 4);
3000 #endif
3003 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3004 unassigned_mem_readb,
3005 unassigned_mem_readw,
3006 unassigned_mem_readl,
3009 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3010 unassigned_mem_writeb,
3011 unassigned_mem_writew,
3012 unassigned_mem_writel,
3015 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3016 uint32_t val)
3018 int dirty_flags;
3019 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3020 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3021 #if !defined(CONFIG_USER_ONLY)
3022 tb_invalidate_phys_page_fast(ram_addr, 1);
3023 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3024 #endif
3026 stb_p(qemu_get_ram_ptr(ram_addr), val);
3027 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3028 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3029 /* we remove the notdirty callback only if the code has been
3030 flushed */
3031 if (dirty_flags == 0xff)
3032 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3035 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3036 uint32_t val)
3038 int dirty_flags;
3039 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3040 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3041 #if !defined(CONFIG_USER_ONLY)
3042 tb_invalidate_phys_page_fast(ram_addr, 2);
3043 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3044 #endif
3046 stw_p(qemu_get_ram_ptr(ram_addr), val);
3047 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3048 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3049 /* we remove the notdirty callback only if the code has been
3050 flushed */
3051 if (dirty_flags == 0xff)
3052 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3055 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3056 uint32_t val)
3058 int dirty_flags;
3059 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3060 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3061 #if !defined(CONFIG_USER_ONLY)
3062 tb_invalidate_phys_page_fast(ram_addr, 4);
3063 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3064 #endif
3066 stl_p(qemu_get_ram_ptr(ram_addr), val);
3067 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3068 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3069 /* we remove the notdirty callback only if the code has been
3070 flushed */
3071 if (dirty_flags == 0xff)
3072 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3075 static CPUReadMemoryFunc * const error_mem_read[3] = {
3076 NULL, /* never used */
3077 NULL, /* never used */
3078 NULL, /* never used */
3081 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3082 notdirty_mem_writeb,
3083 notdirty_mem_writew,
3084 notdirty_mem_writel,
3087 /* Generate a debug exception if a watchpoint has been hit. */
3088 static void check_watchpoint(int offset, int len_mask, int flags)
3090 CPUState *env = cpu_single_env;
3091 target_ulong pc, cs_base;
3092 TranslationBlock *tb;
3093 target_ulong vaddr;
3094 CPUWatchpoint *wp;
3095 int cpu_flags;
3097 if (env->watchpoint_hit) {
3098 /* We re-entered the check after replacing the TB. Now raise
3099 * the debug interrupt so that is will trigger after the
3100 * current instruction. */
3101 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3102 return;
3104 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3105 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3106 if ((vaddr == (wp->vaddr & len_mask) ||
3107 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3108 wp->flags |= BP_WATCHPOINT_HIT;
3109 if (!env->watchpoint_hit) {
3110 env->watchpoint_hit = wp;
3111 tb = tb_find_pc(env->mem_io_pc);
3112 if (!tb) {
3113 cpu_abort(env, "check_watchpoint: could not find TB for "
3114 "pc=%p", (void *)env->mem_io_pc);
3116 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3117 tb_phys_invalidate(tb, -1);
3118 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3119 env->exception_index = EXCP_DEBUG;
3120 } else {
3121 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3122 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3124 cpu_resume_from_signal(env, NULL);
3126 } else {
3127 wp->flags &= ~BP_WATCHPOINT_HIT;
3132 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3133 so these check for a hit then pass through to the normal out-of-line
3134 phys routines. */
3135 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3137 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3138 return ldub_phys(addr);
3141 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3143 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3144 return lduw_phys(addr);
3147 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3149 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3150 return ldl_phys(addr);
3153 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3154 uint32_t val)
3156 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3157 stb_phys(addr, val);
3160 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3161 uint32_t val)
3163 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3164 stw_phys(addr, val);
3167 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3168 uint32_t val)
3170 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3171 stl_phys(addr, val);
3174 static CPUReadMemoryFunc * const watch_mem_read[3] = {
3175 watch_mem_readb,
3176 watch_mem_readw,
3177 watch_mem_readl,
3180 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3181 watch_mem_writeb,
3182 watch_mem_writew,
3183 watch_mem_writel,
3186 static inline uint32_t subpage_readlen (subpage_t *mmio,
3187 target_phys_addr_t addr,
3188 unsigned int len)
3190 unsigned int idx = SUBPAGE_IDX(addr);
3191 #if defined(DEBUG_SUBPAGE)
3192 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3193 mmio, len, addr, idx);
3194 #endif
3196 addr += mmio->region_offset[idx];
3197 idx = mmio->sub_io_index[idx];
3198 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3201 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3202 uint32_t value, unsigned int len)
3204 unsigned int idx = SUBPAGE_IDX(addr);
3205 #if defined(DEBUG_SUBPAGE)
3206 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3207 __func__, mmio, len, addr, idx, value);
3208 #endif
3210 addr += mmio->region_offset[idx];
3211 idx = mmio->sub_io_index[idx];
3212 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3215 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3217 return subpage_readlen(opaque, addr, 0);
3220 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3221 uint32_t value)
3223 subpage_writelen(opaque, addr, value, 0);
3226 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3228 return subpage_readlen(opaque, addr, 1);
3231 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3232 uint32_t value)
3234 subpage_writelen(opaque, addr, value, 1);
3237 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3239 return subpage_readlen(opaque, addr, 2);
3242 static void subpage_writel (void *opaque, target_phys_addr_t addr,
3243 uint32_t value)
3245 subpage_writelen(opaque, addr, value, 2);
3248 static CPUReadMemoryFunc * const subpage_read[] = {
3249 &subpage_readb,
3250 &subpage_readw,
3251 &subpage_readl,
3254 static CPUWriteMemoryFunc * const subpage_write[] = {
3255 &subpage_writeb,
3256 &subpage_writew,
3257 &subpage_writel,
3260 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3261 ram_addr_t memory, ram_addr_t region_offset)
3263 int idx, eidx;
3265 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3266 return -1;
3267 idx = SUBPAGE_IDX(start);
3268 eidx = SUBPAGE_IDX(end);
3269 #if defined(DEBUG_SUBPAGE)
3270 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3271 mmio, start, end, idx, eidx, memory);
3272 #endif
3273 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3274 for (; idx <= eidx; idx++) {
3275 mmio->sub_io_index[idx] = memory;
3276 mmio->region_offset[idx] = region_offset;
3279 return 0;
3282 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3283 ram_addr_t orig_memory,
3284 ram_addr_t region_offset)
3286 subpage_t *mmio;
3287 int subpage_memory;
3289 mmio = qemu_mallocz(sizeof(subpage_t));
3291 mmio->base = base;
3292 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3293 #if defined(DEBUG_SUBPAGE)
3294 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3295 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3296 #endif
3297 *phys = subpage_memory | IO_MEM_SUBPAGE;
3298 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3300 return mmio;
3303 static int get_free_io_mem_idx(void)
3305 int i;
3307 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3308 if (!io_mem_used[i]) {
3309 io_mem_used[i] = 1;
3310 return i;
3312 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3313 return -1;
3316 /* mem_read and mem_write are arrays of functions containing the
3317 function to access byte (index 0), word (index 1) and dword (index
3318 2). Functions can be omitted with a NULL function pointer.
3319 If io_index is non zero, the corresponding io zone is
3320 modified. If it is zero, a new io zone is allocated. The return
3321 value can be used with cpu_register_physical_memory(). (-1) is
3322 returned if error. */
3323 static int cpu_register_io_memory_fixed(int io_index,
3324 CPUReadMemoryFunc * const *mem_read,
3325 CPUWriteMemoryFunc * const *mem_write,
3326 void *opaque)
3328 int i;
3330 if (io_index <= 0) {
3331 io_index = get_free_io_mem_idx();
3332 if (io_index == -1)
3333 return io_index;
3334 } else {
3335 io_index >>= IO_MEM_SHIFT;
3336 if (io_index >= IO_MEM_NB_ENTRIES)
3337 return -1;
3340 for (i = 0; i < 3; ++i) {
3341 io_mem_read[io_index][i]
3342 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3344 for (i = 0; i < 3; ++i) {
3345 io_mem_write[io_index][i]
3346 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3348 io_mem_opaque[io_index] = opaque;
3350 return (io_index << IO_MEM_SHIFT);
3353 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3354 CPUWriteMemoryFunc * const *mem_write,
3355 void *opaque)
3357 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3360 void cpu_unregister_io_memory(int io_table_address)
3362 int i;
3363 int io_index = io_table_address >> IO_MEM_SHIFT;
3365 for (i=0;i < 3; i++) {
3366 io_mem_read[io_index][i] = unassigned_mem_read[i];
3367 io_mem_write[io_index][i] = unassigned_mem_write[i];
3369 io_mem_opaque[io_index] = NULL;
3370 io_mem_used[io_index] = 0;
3373 static void io_mem_init(void)
3375 int i;
3377 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3378 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3379 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3380 for (i=0; i<5; i++)
3381 io_mem_used[i] = 1;
3383 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3384 watch_mem_write, NULL);
3387 #endif /* !defined(CONFIG_USER_ONLY) */
3389 /* physical memory access (slow version, mainly for debug) */
3390 #if defined(CONFIG_USER_ONLY)
3391 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3392 uint8_t *buf, int len, int is_write)
3394 int l, flags;
3395 target_ulong page;
3396 void * p;
3398 while (len > 0) {
3399 page = addr & TARGET_PAGE_MASK;
3400 l = (page + TARGET_PAGE_SIZE) - addr;
3401 if (l > len)
3402 l = len;
3403 flags = page_get_flags(page);
3404 if (!(flags & PAGE_VALID))
3405 return -1;
3406 if (is_write) {
3407 if (!(flags & PAGE_WRITE))
3408 return -1;
3409 /* XXX: this code should not depend on lock_user */
3410 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3411 return -1;
3412 memcpy(p, buf, l);
3413 unlock_user(p, addr, l);
3414 } else {
3415 if (!(flags & PAGE_READ))
3416 return -1;
3417 /* XXX: this code should not depend on lock_user */
3418 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3419 return -1;
3420 memcpy(buf, p, l);
3421 unlock_user(p, addr, 0);
3423 len -= l;
3424 buf += l;
3425 addr += l;
3427 return 0;
3430 #else
3431 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3432 int len, int is_write)
3434 int l, io_index;
3435 uint8_t *ptr;
3436 uint32_t val;
3437 target_phys_addr_t page;
3438 unsigned long pd;
3439 PhysPageDesc *p;
3441 while (len > 0) {
3442 page = addr & TARGET_PAGE_MASK;
3443 l = (page + TARGET_PAGE_SIZE) - addr;
3444 if (l > len)
3445 l = len;
3446 p = phys_page_find(page >> TARGET_PAGE_BITS);
3447 if (!p) {
3448 pd = IO_MEM_UNASSIGNED;
3449 } else {
3450 pd = p->phys_offset;
3453 if (is_write) {
3454 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3455 target_phys_addr_t addr1 = addr;
3456 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3457 if (p)
3458 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3459 /* XXX: could force cpu_single_env to NULL to avoid
3460 potential bugs */
3461 if (l >= 4 && ((addr1 & 3) == 0)) {
3462 /* 32 bit write access */
3463 val = ldl_p(buf);
3464 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3465 l = 4;
3466 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3467 /* 16 bit write access */
3468 val = lduw_p(buf);
3469 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3470 l = 2;
3471 } else {
3472 /* 8 bit write access */
3473 val = ldub_p(buf);
3474 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3475 l = 1;
3477 } else {
3478 unsigned long addr1;
3479 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3480 /* RAM case */
3481 ptr = qemu_get_ram_ptr(addr1);
3482 memcpy(ptr, buf, l);
3483 if (!cpu_physical_memory_is_dirty(addr1)) {
3484 /* invalidate code */
3485 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3486 /* set dirty bit */
3487 cpu_physical_memory_set_dirty_flags(
3488 addr1, (0xff & ~CODE_DIRTY_FLAG));
3490 /* qemu doesn't execute guest code directly, but kvm does
3491 therefore flush instruction caches */
3492 if (kvm_enabled())
3493 flush_icache_range((unsigned long)ptr,
3494 ((unsigned long)ptr)+l);
3496 } else {
3497 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3498 !(pd & IO_MEM_ROMD)) {
3499 target_phys_addr_t addr1 = addr;
3500 /* I/O case */
3501 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3502 if (p)
3503 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3504 if (l >= 4 && ((addr1 & 3) == 0)) {
3505 /* 32 bit read access */
3506 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3507 stl_p(buf, val);
3508 l = 4;
3509 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3510 /* 16 bit read access */
3511 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3512 stw_p(buf, val);
3513 l = 2;
3514 } else {
3515 /* 8 bit read access */
3516 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3517 stb_p(buf, val);
3518 l = 1;
3520 } else {
3521 /* RAM case */
3522 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3523 (addr & ~TARGET_PAGE_MASK);
3524 memcpy(buf, ptr, l);
3527 len -= l;
3528 buf += l;
3529 addr += l;
3533 /* used for ROM loading : can write in RAM and ROM */
3534 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3535 const uint8_t *buf, int len)
3537 int l;
3538 uint8_t *ptr;
3539 target_phys_addr_t page;
3540 unsigned long pd;
3541 PhysPageDesc *p;
3543 while (len > 0) {
3544 page = addr & TARGET_PAGE_MASK;
3545 l = (page + TARGET_PAGE_SIZE) - addr;
3546 if (l > len)
3547 l = len;
3548 p = phys_page_find(page >> TARGET_PAGE_BITS);
3549 if (!p) {
3550 pd = IO_MEM_UNASSIGNED;
3551 } else {
3552 pd = p->phys_offset;
3555 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3556 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3557 !(pd & IO_MEM_ROMD)) {
3558 /* do nothing */
3559 } else {
3560 unsigned long addr1;
3561 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3562 /* ROM/RAM case */
3563 ptr = qemu_get_ram_ptr(addr1);
3564 memcpy(ptr, buf, l);
3566 len -= l;
3567 buf += l;
3568 addr += l;
3572 typedef struct {
3573 void *buffer;
3574 target_phys_addr_t addr;
3575 target_phys_addr_t len;
3576 } BounceBuffer;
3578 static BounceBuffer bounce;
3580 typedef struct MapClient {
3581 void *opaque;
3582 void (*callback)(void *opaque);
3583 QLIST_ENTRY(MapClient) link;
3584 } MapClient;
3586 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3587 = QLIST_HEAD_INITIALIZER(map_client_list);
3589 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3591 MapClient *client = qemu_malloc(sizeof(*client));
3593 client->opaque = opaque;
3594 client->callback = callback;
3595 QLIST_INSERT_HEAD(&map_client_list, client, link);
3596 return client;
3599 void cpu_unregister_map_client(void *_client)
3601 MapClient *client = (MapClient *)_client;
3603 QLIST_REMOVE(client, link);
3604 qemu_free(client);
3607 static void cpu_notify_map_clients(void)
3609 MapClient *client;
3611 while (!QLIST_EMPTY(&map_client_list)) {
3612 client = QLIST_FIRST(&map_client_list);
3613 client->callback(client->opaque);
3614 cpu_unregister_map_client(client);
3618 /* Map a physical memory region into a host virtual address.
3619 * May map a subset of the requested range, given by and returned in *plen.
3620 * May return NULL if resources needed to perform the mapping are exhausted.
3621 * Use only for reads OR writes - not for read-modify-write operations.
3622 * Use cpu_register_map_client() to know when retrying the map operation is
3623 * likely to succeed.
3625 void *cpu_physical_memory_map(target_phys_addr_t addr,
3626 target_phys_addr_t *plen,
3627 int is_write)
3629 target_phys_addr_t len = *plen;
3630 target_phys_addr_t done = 0;
3631 int l;
3632 uint8_t *ret = NULL;
3633 uint8_t *ptr;
3634 target_phys_addr_t page;
3635 unsigned long pd;
3636 PhysPageDesc *p;
3637 unsigned long addr1;
3639 while (len > 0) {
3640 page = addr & TARGET_PAGE_MASK;
3641 l = (page + TARGET_PAGE_SIZE) - addr;
3642 if (l > len)
3643 l = len;
3644 p = phys_page_find(page >> TARGET_PAGE_BITS);
3645 if (!p) {
3646 pd = IO_MEM_UNASSIGNED;
3647 } else {
3648 pd = p->phys_offset;
3651 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3652 if (done || bounce.buffer) {
3653 break;
3655 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3656 bounce.addr = addr;
3657 bounce.len = l;
3658 if (!is_write) {
3659 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3661 ptr = bounce.buffer;
3662 } else {
3663 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3664 ptr = qemu_get_ram_ptr(addr1);
3666 if (!done) {
3667 ret = ptr;
3668 } else if (ret + done != ptr) {
3669 break;
3672 len -= l;
3673 addr += l;
3674 done += l;
3676 *plen = done;
3677 return ret;
3680 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3681 * Will also mark the memory as dirty if is_write == 1. access_len gives
3682 * the amount of memory that was actually read or written by the caller.
3684 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3685 int is_write, target_phys_addr_t access_len)
3687 unsigned long flush_len = (unsigned long)access_len;
3689 if (buffer != bounce.buffer) {
3690 if (is_write) {
3691 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3692 while (access_len) {
3693 unsigned l;
3694 l = TARGET_PAGE_SIZE;
3695 if (l > access_len)
3696 l = access_len;
3697 if (!cpu_physical_memory_is_dirty(addr1)) {
3698 /* invalidate code */
3699 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3700 /* set dirty bit */
3701 cpu_physical_memory_set_dirty_flags(
3702 addr1, (0xff & ~CODE_DIRTY_FLAG));
3704 addr1 += l;
3705 access_len -= l;
3707 dma_flush_range((unsigned long)buffer,
3708 (unsigned long)buffer + flush_len);
3710 return;
3712 if (is_write) {
3713 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3715 qemu_vfree(bounce.buffer);
3716 bounce.buffer = NULL;
3717 cpu_notify_map_clients();
3720 /* warning: addr must be aligned */
3721 uint32_t ldl_phys(target_phys_addr_t addr)
3723 int io_index;
3724 uint8_t *ptr;
3725 uint32_t val;
3726 unsigned long pd;
3727 PhysPageDesc *p;
3729 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3730 if (!p) {
3731 pd = IO_MEM_UNASSIGNED;
3732 } else {
3733 pd = p->phys_offset;
3736 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3737 !(pd & IO_MEM_ROMD)) {
3738 /* I/O case */
3739 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3740 if (p)
3741 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3742 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3743 } else {
3744 /* RAM case */
3745 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3746 (addr & ~TARGET_PAGE_MASK);
3747 val = ldl_p(ptr);
3749 return val;
3752 /* warning: addr must be aligned */
3753 uint64_t ldq_phys(target_phys_addr_t addr)
3755 int io_index;
3756 uint8_t *ptr;
3757 uint64_t val;
3758 unsigned long pd;
3759 PhysPageDesc *p;
3761 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3762 if (!p) {
3763 pd = IO_MEM_UNASSIGNED;
3764 } else {
3765 pd = p->phys_offset;
3768 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3769 !(pd & IO_MEM_ROMD)) {
3770 /* I/O case */
3771 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3772 if (p)
3773 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3774 #ifdef TARGET_WORDS_BIGENDIAN
3775 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3776 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3777 #else
3778 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3779 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3780 #endif
3781 } else {
3782 /* RAM case */
3783 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3784 (addr & ~TARGET_PAGE_MASK);
3785 val = ldq_p(ptr);
3787 return val;
3790 /* XXX: optimize */
3791 uint32_t ldub_phys(target_phys_addr_t addr)
3793 uint8_t val;
3794 cpu_physical_memory_read(addr, &val, 1);
3795 return val;
3798 /* warning: addr must be aligned */
3799 uint32_t lduw_phys(target_phys_addr_t addr)
3801 int io_index;
3802 uint8_t *ptr;
3803 uint64_t val;
3804 unsigned long pd;
3805 PhysPageDesc *p;
3807 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3808 if (!p) {
3809 pd = IO_MEM_UNASSIGNED;
3810 } else {
3811 pd = p->phys_offset;
3814 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3815 !(pd & IO_MEM_ROMD)) {
3816 /* I/O case */
3817 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3818 if (p)
3819 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3820 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3821 } else {
3822 /* RAM case */
3823 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3824 (addr & ~TARGET_PAGE_MASK);
3825 val = lduw_p(ptr);
3827 return val;
3830 /* warning: addr must be aligned. The ram page is not masked as dirty
3831 and the code inside is not invalidated. It is useful if the dirty
3832 bits are used to track modified PTEs */
3833 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3835 int io_index;
3836 uint8_t *ptr;
3837 unsigned long pd;
3838 PhysPageDesc *p;
3840 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3841 if (!p) {
3842 pd = IO_MEM_UNASSIGNED;
3843 } else {
3844 pd = p->phys_offset;
3847 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3848 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3849 if (p)
3850 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3851 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3852 } else {
3853 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3854 ptr = qemu_get_ram_ptr(addr1);
3855 stl_p(ptr, val);
3857 if (unlikely(in_migration)) {
3858 if (!cpu_physical_memory_is_dirty(addr1)) {
3859 /* invalidate code */
3860 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3861 /* set dirty bit */
3862 cpu_physical_memory_set_dirty_flags(
3863 addr1, (0xff & ~CODE_DIRTY_FLAG));
3869 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3871 int io_index;
3872 uint8_t *ptr;
3873 unsigned long pd;
3874 PhysPageDesc *p;
3876 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3877 if (!p) {
3878 pd = IO_MEM_UNASSIGNED;
3879 } else {
3880 pd = p->phys_offset;
3883 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3884 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3885 if (p)
3886 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3887 #ifdef TARGET_WORDS_BIGENDIAN
3888 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3889 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3890 #else
3891 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3892 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3893 #endif
3894 } else {
3895 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3896 (addr & ~TARGET_PAGE_MASK);
3897 stq_p(ptr, val);
3901 /* warning: addr must be aligned */
3902 void stl_phys(target_phys_addr_t addr, uint32_t val)
3904 int io_index;
3905 uint8_t *ptr;
3906 unsigned long pd;
3907 PhysPageDesc *p;
3909 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3910 if (!p) {
3911 pd = IO_MEM_UNASSIGNED;
3912 } else {
3913 pd = p->phys_offset;
3916 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3917 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3918 if (p)
3919 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3920 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3921 } else {
3922 unsigned long addr1;
3923 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3924 /* RAM case */
3925 ptr = qemu_get_ram_ptr(addr1);
3926 stl_p(ptr, val);
3927 if (!cpu_physical_memory_is_dirty(addr1)) {
3928 /* invalidate code */
3929 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3930 /* set dirty bit */
3931 cpu_physical_memory_set_dirty_flags(addr1,
3932 (0xff & ~CODE_DIRTY_FLAG));
3937 /* XXX: optimize */
3938 void stb_phys(target_phys_addr_t addr, uint32_t val)
3940 uint8_t v = val;
3941 cpu_physical_memory_write(addr, &v, 1);
3944 /* warning: addr must be aligned */
3945 void stw_phys(target_phys_addr_t addr, uint32_t val)
3947 int io_index;
3948 uint8_t *ptr;
3949 unsigned long pd;
3950 PhysPageDesc *p;
3952 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3953 if (!p) {
3954 pd = IO_MEM_UNASSIGNED;
3955 } else {
3956 pd = p->phys_offset;
3959 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3960 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3961 if (p)
3962 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3963 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3964 } else {
3965 unsigned long addr1;
3966 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3967 /* RAM case */
3968 ptr = qemu_get_ram_ptr(addr1);
3969 stw_p(ptr, val);
3970 if (!cpu_physical_memory_is_dirty(addr1)) {
3971 /* invalidate code */
3972 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3973 /* set dirty bit */
3974 cpu_physical_memory_set_dirty_flags(addr1,
3975 (0xff & ~CODE_DIRTY_FLAG));
3980 /* XXX: optimize */
3981 void stq_phys(target_phys_addr_t addr, uint64_t val)
3983 val = tswap64(val);
3984 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3987 /* virtual memory access for debug (includes writing to ROM) */
3988 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3989 uint8_t *buf, int len, int is_write)
3991 int l;
3992 target_phys_addr_t phys_addr;
3993 target_ulong page;
3995 while (len > 0) {
3996 page = addr & TARGET_PAGE_MASK;
3997 phys_addr = cpu_get_phys_page_debug(env, page);
3998 /* if no physical page mapped, return an error */
3999 if (phys_addr == -1)
4000 return -1;
4001 l = (page + TARGET_PAGE_SIZE) - addr;
4002 if (l > len)
4003 l = len;
4004 phys_addr += (addr & ~TARGET_PAGE_MASK);
4005 if (is_write)
4006 cpu_physical_memory_write_rom(phys_addr, buf, l);
4007 else
4008 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4009 len -= l;
4010 buf += l;
4011 addr += l;
4013 return 0;
4015 #endif
4017 /* in deterministic execution mode, instructions doing device I/Os
4018 must be at the end of the TB */
4019 void cpu_io_recompile(CPUState *env, void *retaddr)
4021 TranslationBlock *tb;
4022 uint32_t n, cflags;
4023 target_ulong pc, cs_base;
4024 uint64_t flags;
4026 tb = tb_find_pc((unsigned long)retaddr);
4027 if (!tb) {
4028 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4029 retaddr);
4031 n = env->icount_decr.u16.low + tb->icount;
4032 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4033 /* Calculate how many instructions had been executed before the fault
4034 occurred. */
4035 n = n - env->icount_decr.u16.low;
4036 /* Generate a new TB ending on the I/O insn. */
4037 n++;
4038 /* On MIPS and SH, delay slot instructions can only be restarted if
4039 they were already the first instruction in the TB. If this is not
4040 the first instruction in a TB then re-execute the preceding
4041 branch. */
4042 #if defined(TARGET_MIPS)
4043 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4044 env->active_tc.PC -= 4;
4045 env->icount_decr.u16.low++;
4046 env->hflags &= ~MIPS_HFLAG_BMASK;
4048 #elif defined(TARGET_SH4)
4049 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4050 && n > 1) {
4051 env->pc -= 2;
4052 env->icount_decr.u16.low++;
4053 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4055 #endif
4056 /* This should never happen. */
4057 if (n > CF_COUNT_MASK)
4058 cpu_abort(env, "TB too big during recompile");
4060 cflags = n | CF_LAST_IO;
4061 pc = tb->pc;
4062 cs_base = tb->cs_base;
4063 flags = tb->flags;
4064 tb_phys_invalidate(tb, -1);
4065 /* FIXME: In theory this could raise an exception. In practice
4066 we have already translated the block once so it's probably ok. */
4067 tb_gen_code(env, pc, cs_base, flags, cflags);
4068 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4069 the first in the TB) then we end up generating a whole new TB and
4070 repeating the fault, which is horribly inefficient.
4071 Better would be to execute just this insn uncached, or generate a
4072 second new TB. */
4073 cpu_resume_from_signal(env, NULL);
4076 #if !defined(CONFIG_USER_ONLY)
4078 void dump_exec_info(FILE *f,
4079 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4081 int i, target_code_size, max_target_code_size;
4082 int direct_jmp_count, direct_jmp2_count, cross_page;
4083 TranslationBlock *tb;
4085 target_code_size = 0;
4086 max_target_code_size = 0;
4087 cross_page = 0;
4088 direct_jmp_count = 0;
4089 direct_jmp2_count = 0;
4090 for(i = 0; i < nb_tbs; i++) {
4091 tb = &tbs[i];
4092 target_code_size += tb->size;
4093 if (tb->size > max_target_code_size)
4094 max_target_code_size = tb->size;
4095 if (tb->page_addr[1] != -1)
4096 cross_page++;
4097 if (tb->tb_next_offset[0] != 0xffff) {
4098 direct_jmp_count++;
4099 if (tb->tb_next_offset[1] != 0xffff) {
4100 direct_jmp2_count++;
4104 /* XXX: avoid using doubles ? */
4105 cpu_fprintf(f, "Translation buffer state:\n");
4106 cpu_fprintf(f, "gen code size %ld/%ld\n",
4107 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4108 cpu_fprintf(f, "TB count %d/%d\n",
4109 nb_tbs, code_gen_max_blocks);
4110 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4111 nb_tbs ? target_code_size / nb_tbs : 0,
4112 max_target_code_size);
4113 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4114 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4115 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4116 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4117 cross_page,
4118 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4119 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4120 direct_jmp_count,
4121 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4122 direct_jmp2_count,
4123 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4124 cpu_fprintf(f, "\nStatistics:\n");
4125 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4126 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4127 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4128 #ifdef CONFIG_PROFILER
4129 tcg_dump_info(f, cpu_fprintf);
4130 #endif
4133 #define MMUSUFFIX _cmmu
4134 #define GETPC() NULL
4135 #define env cpu_single_env
4136 #define SOFTMMU_CODE_ACCESS
4138 #define SHIFT 0
4139 #include "softmmu_template.h"
4141 #define SHIFT 1
4142 #include "softmmu_template.h"
4144 #define SHIFT 2
4145 #include "softmmu_template.h"
4147 #define SHIFT 3
4148 #include "softmmu_template.h"
4150 #undef env
4152 #endif