Merge branch 'upstream-merge'
[qemu-kvm/stefanha.git] / exec.c
blobe4094c4526e26e140fc3965513637f08d79a4235
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <unistd.h>
32 #include <inttypes.h>
34 #include "cpu.h"
35 #include "exec-all.h"
36 #include "qemu-common.h"
37 #include "cache-utils.h"
39 #if !defined(TARGET_IA64)
40 #include "tcg.h"
41 #endif
42 #include "qemu-kvm.h"
44 #include "hw/hw.h"
45 #include "osdep.h"
46 #include "kvm.h"
47 #include "qemu-timer.h"
48 #if defined(CONFIG_USER_ONLY)
49 #include <qemu.h>
50 #include <signal.h>
51 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
52 #include <sys/param.h>
53 #if __FreeBSD_version >= 700104
54 #define HAVE_KINFO_GETVMMAP
55 #define sigqueue sigqueue_freebsd /* avoid redefinition */
56 #include <sys/time.h>
57 #include <sys/proc.h>
58 #include <machine/profile.h>
59 #define _KERNEL
60 #include <sys/user.h>
61 #undef _KERNEL
62 #undef sigqueue
63 #include <libutil.h>
64 #endif
65 #endif
66 #endif
68 //#define DEBUG_TB_INVALIDATE
69 //#define DEBUG_FLUSH
70 //#define DEBUG_TLB
71 //#define DEBUG_UNASSIGNED
73 /* make various TB consistency checks */
74 //#define DEBUG_TB_CHECK
75 //#define DEBUG_TLB_CHECK
77 //#define DEBUG_IOPORT
78 //#define DEBUG_SUBPAGE
80 #if !defined(CONFIG_USER_ONLY)
81 /* TB consistency checks only implemented for usermode emulation. */
82 #undef DEBUG_TB_CHECK
83 #endif
85 #define SMC_BITMAP_USE_THRESHOLD 10
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 static int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #elif defined(_WIN32)
102 /* Maximum alignment for Win32 is 16. */
103 #define code_gen_section \
104 __attribute__((aligned (16)))
105 #else
106 #define code_gen_section \
107 __attribute__((aligned (32)))
108 #endif
110 uint8_t code_gen_prologue[1024] code_gen_section;
111 static uint8_t *code_gen_buffer;
112 static unsigned long code_gen_buffer_size;
113 /* threshold to flush the translated code buffer */
114 static unsigned long code_gen_buffer_max_size;
115 uint8_t *code_gen_ptr;
117 #if !defined(CONFIG_USER_ONLY)
118 int phys_ram_fd;
119 static int in_migration;
121 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
122 #endif
124 CPUState *first_cpu;
125 /* current CPU in the current thread. It is only valid inside
126 cpu_exec() */
127 CPUState *cpu_single_env;
128 /* 0 = Do not count executed instructions.
129 1 = Precise instruction counting.
130 2 = Adaptive rate instruction counting. */
131 int use_icount = 0;
132 /* Current instruction counter. While executing translated code this may
133 include some instructions that have not yet been executed. */
134 int64_t qemu_icount;
136 typedef struct PageDesc {
137 /* list of TBs intersecting this ram page */
138 TranslationBlock *first_tb;
139 /* in order to optimize self modifying code, we count the number
140 of lookups we do to a given page to use a bitmap */
141 unsigned int code_write_count;
142 uint8_t *code_bitmap;
143 #if defined(CONFIG_USER_ONLY)
144 unsigned long flags;
145 #endif
146 } PageDesc;
148 /* In system mode we want L1_MAP to be based on ram offsets,
149 while in user mode we want it to be based on virtual addresses. */
150 #if !defined(CONFIG_USER_ONLY)
151 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
152 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
153 #else
154 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
155 #endif
156 #else
157 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
158 #endif
160 /* Size of the L2 (and L3, etc) page tables. */
161 #define L2_BITS 10
162 #define L2_SIZE (1 << L2_BITS)
164 /* The bits remaining after N lower levels of page tables. */
165 #define P_L1_BITS_REM \
166 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
167 #define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
170 /* Size of the L1 page table. Avoid silly small sizes. */
171 #if P_L1_BITS_REM < 4
172 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
173 #else
174 #define P_L1_BITS P_L1_BITS_REM
175 #endif
177 #if V_L1_BITS_REM < 4
178 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
179 #else
180 #define V_L1_BITS V_L1_BITS_REM
181 #endif
183 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
184 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
186 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
187 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
189 unsigned long qemu_real_host_page_size;
190 unsigned long qemu_host_page_bits;
191 unsigned long qemu_host_page_size;
192 unsigned long qemu_host_page_mask;
194 /* This is a multi-level map on the virtual address space.
195 The bottom level has pointers to PageDesc. */
196 static void *l1_map[V_L1_SIZE];
198 #if !defined(CONFIG_USER_ONLY)
199 typedef struct PhysPageDesc {
200 /* offset in host memory of the page + io_index in the low bits */
201 ram_addr_t phys_offset;
202 ram_addr_t region_offset;
203 } PhysPageDesc;
205 /* This is a multi-level map on the physical address space.
206 The bottom level has pointers to PhysPageDesc. */
207 static void *l1_phys_map[P_L1_SIZE];
209 static void io_mem_init(void);
211 /* io memory support */
212 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
213 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
214 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
215 static char io_mem_used[IO_MEM_NB_ENTRIES];
216 static int io_mem_watch;
217 #endif
219 /* log support */
220 #ifdef WIN32
221 static const char *logfilename = "qemu.log";
222 #else
223 static const char *logfilename = "/tmp/qemu.log";
224 #endif
225 FILE *logfile;
226 int loglevel;
227 static int log_append = 0;
229 /* statistics */
230 #if !defined(CONFIG_USER_ONLY)
231 static int tlb_flush_count;
232 #endif
233 static int tb_flush_count;
234 static int tb_phys_invalidate_count;
236 #ifdef _WIN32
237 static void map_exec(void *addr, long size)
239 DWORD old_protect;
240 VirtualProtect(addr, size,
241 PAGE_EXECUTE_READWRITE, &old_protect);
244 #else
245 static void map_exec(void *addr, long size)
247 unsigned long start, end, page_size;
249 page_size = getpagesize();
250 start = (unsigned long)addr;
251 start &= ~(page_size - 1);
253 end = (unsigned long)addr + size;
254 end += page_size - 1;
255 end &= ~(page_size - 1);
257 mprotect((void *)start, end - start,
258 PROT_READ | PROT_WRITE | PROT_EXEC);
260 #endif
262 static void page_init(void)
264 /* NOTE: we can always suppose that qemu_host_page_size >=
265 TARGET_PAGE_SIZE */
266 #ifdef _WIN32
268 SYSTEM_INFO system_info;
270 GetSystemInfo(&system_info);
271 qemu_real_host_page_size = system_info.dwPageSize;
273 #else
274 qemu_real_host_page_size = getpagesize();
275 #endif
276 if (qemu_host_page_size == 0)
277 qemu_host_page_size = qemu_real_host_page_size;
278 if (qemu_host_page_size < TARGET_PAGE_SIZE)
279 qemu_host_page_size = TARGET_PAGE_SIZE;
280 qemu_host_page_bits = 0;
281 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
282 qemu_host_page_bits++;
283 qemu_host_page_mask = ~(qemu_host_page_size - 1);
285 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
287 #ifdef HAVE_KINFO_GETVMMAP
288 struct kinfo_vmentry *freep;
289 int i, cnt;
291 freep = kinfo_getvmmap(getpid(), &cnt);
292 if (freep) {
293 mmap_lock();
294 for (i = 0; i < cnt; i++) {
295 unsigned long startaddr, endaddr;
297 startaddr = freep[i].kve_start;
298 endaddr = freep[i].kve_end;
299 if (h2g_valid(startaddr)) {
300 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
302 if (h2g_valid(endaddr)) {
303 endaddr = h2g(endaddr);
304 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
305 } else {
306 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
307 endaddr = ~0ul;
308 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
309 #endif
313 free(freep);
314 mmap_unlock();
316 #else
317 FILE *f;
319 last_brk = (unsigned long)sbrk(0);
321 f = fopen("/compat/linux/proc/self/maps", "r");
322 if (f) {
323 mmap_lock();
325 do {
326 unsigned long startaddr, endaddr;
327 int n;
329 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
331 if (n == 2 && h2g_valid(startaddr)) {
332 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
334 if (h2g_valid(endaddr)) {
335 endaddr = h2g(endaddr);
336 } else {
337 endaddr = ~0ul;
339 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
341 } while (!feof(f));
343 fclose(f);
344 mmap_unlock();
346 #endif
348 #endif
351 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
353 PageDesc *pd;
354 void **lp;
355 int i;
357 #if defined(CONFIG_USER_ONLY)
358 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
359 # define ALLOC(P, SIZE) \
360 do { \
361 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
362 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
363 } while (0)
364 #else
365 # define ALLOC(P, SIZE) \
366 do { P = qemu_mallocz(SIZE); } while (0)
367 #endif
369 /* Level 1. Always allocated. */
370 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
372 /* Level 2..N-1. */
373 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
374 void **p = *lp;
376 if (p == NULL) {
377 if (!alloc) {
378 return NULL;
380 ALLOC(p, sizeof(void *) * L2_SIZE);
381 *lp = p;
384 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
387 pd = *lp;
388 if (pd == NULL) {
389 if (!alloc) {
390 return NULL;
392 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
393 *lp = pd;
396 #undef ALLOC
398 return pd + (index & (L2_SIZE - 1));
401 static inline PageDesc *page_find(tb_page_addr_t index)
403 return page_find_alloc(index, 0);
406 #if !defined(CONFIG_USER_ONLY)
407 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
409 PhysPageDesc *pd;
410 void **lp;
411 int i;
413 /* Level 1. Always allocated. */
414 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
416 /* Level 2..N-1. */
417 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
418 void **p = *lp;
419 if (p == NULL) {
420 if (!alloc) {
421 return NULL;
423 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
425 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
428 pd = *lp;
429 if (pd == NULL) {
430 int i;
432 if (!alloc) {
433 return NULL;
436 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
438 for (i = 0; i < L2_SIZE; i++) {
439 pd[i].phys_offset = IO_MEM_UNASSIGNED;
440 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
444 return pd + (index & (L2_SIZE - 1));
447 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
449 return phys_page_find_alloc(index, 0);
452 static void tlb_protect_code(ram_addr_t ram_addr);
453 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
454 target_ulong vaddr);
455 #define mmap_lock() do { } while(0)
456 #define mmap_unlock() do { } while(0)
457 #endif
459 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
461 #if defined(CONFIG_USER_ONLY)
462 /* Currently it is not recommended to allocate big chunks of data in
463 user mode. It will change when a dedicated libc will be used */
464 #define USE_STATIC_CODE_GEN_BUFFER
465 #endif
467 #ifdef USE_STATIC_CODE_GEN_BUFFER
468 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
469 __attribute__((aligned (CODE_GEN_ALIGN)));
470 #endif
472 static void code_gen_alloc(unsigned long tb_size)
474 if (kvm_enabled())
475 return;
477 #ifdef USE_STATIC_CODE_GEN_BUFFER
478 code_gen_buffer = static_code_gen_buffer;
479 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
480 map_exec(code_gen_buffer, code_gen_buffer_size);
481 #else
482 code_gen_buffer_size = tb_size;
483 if (code_gen_buffer_size == 0) {
484 #if defined(CONFIG_USER_ONLY)
485 /* in user mode, phys_ram_size is not meaningful */
486 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
487 #else
488 /* XXX: needs adjustments */
489 code_gen_buffer_size = (unsigned long)(ram_size / 4);
490 #endif
492 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
493 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
494 /* The code gen buffer location may have constraints depending on
495 the host cpu and OS */
496 #if defined(__linux__)
498 int flags;
499 void *start = NULL;
501 flags = MAP_PRIVATE | MAP_ANONYMOUS;
502 #if defined(__x86_64__)
503 flags |= MAP_32BIT;
504 /* Cannot map more than that */
505 if (code_gen_buffer_size > (800 * 1024 * 1024))
506 code_gen_buffer_size = (800 * 1024 * 1024);
507 #elif defined(__sparc_v9__)
508 // Map the buffer below 2G, so we can use direct calls and branches
509 flags |= MAP_FIXED;
510 start = (void *) 0x60000000UL;
511 if (code_gen_buffer_size > (512 * 1024 * 1024))
512 code_gen_buffer_size = (512 * 1024 * 1024);
513 #elif defined(__arm__)
514 /* Map the buffer below 32M, so we can use direct calls and branches */
515 flags |= MAP_FIXED;
516 start = (void *) 0x01000000UL;
517 if (code_gen_buffer_size > 16 * 1024 * 1024)
518 code_gen_buffer_size = 16 * 1024 * 1024;
519 #elif defined(__s390x__)
520 /* Map the buffer so that we can use direct calls and branches. */
521 /* We have a +- 4GB range on the branches; leave some slop. */
522 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
523 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
525 start = (void *)0x90000000UL;
526 #endif
527 code_gen_buffer = mmap(start, code_gen_buffer_size,
528 PROT_WRITE | PROT_READ | PROT_EXEC,
529 flags, -1, 0);
530 if (code_gen_buffer == MAP_FAILED) {
531 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
532 exit(1);
535 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
537 int flags;
538 void *addr = NULL;
539 flags = MAP_PRIVATE | MAP_ANONYMOUS;
540 #if defined(__x86_64__)
541 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542 * 0x40000000 is free */
543 flags |= MAP_FIXED;
544 addr = (void *)0x40000000;
545 /* Cannot map more than that */
546 if (code_gen_buffer_size > (800 * 1024 * 1024))
547 code_gen_buffer_size = (800 * 1024 * 1024);
548 #endif
549 code_gen_buffer = mmap(addr, code_gen_buffer_size,
550 PROT_WRITE | PROT_READ | PROT_EXEC,
551 flags, -1, 0);
552 if (code_gen_buffer == MAP_FAILED) {
553 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
554 exit(1);
557 #else
558 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
559 map_exec(code_gen_buffer, code_gen_buffer_size);
560 #endif
561 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
562 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
563 code_gen_buffer_max_size = code_gen_buffer_size -
564 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
565 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
566 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
569 /* Must be called before using the QEMU cpus. 'tb_size' is the size
570 (in bytes) allocated to the translation buffer. Zero means default
571 size. */
572 void cpu_exec_init_all(unsigned long tb_size)
574 cpu_gen_init();
575 code_gen_alloc(tb_size);
576 code_gen_ptr = code_gen_buffer;
577 page_init();
578 #if !defined(CONFIG_USER_ONLY)
579 io_mem_init();
580 #endif
581 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
582 /* There's no guest base to take into account, so go ahead and
583 initialize the prologue now. */
584 tcg_prologue_init(&tcg_ctx);
585 #endif
588 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
590 static int cpu_common_post_load(void *opaque, int version_id)
592 CPUState *env = opaque;
594 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
595 version_id is increased. */
596 env->interrupt_request &= ~0x01;
597 tlb_flush(env, 1);
599 return 0;
602 static const VMStateDescription vmstate_cpu_common = {
603 .name = "cpu_common",
604 .version_id = 1,
605 .minimum_version_id = 1,
606 .minimum_version_id_old = 1,
607 .post_load = cpu_common_post_load,
608 .fields = (VMStateField []) {
609 VMSTATE_UINT32(halted, CPUState),
610 VMSTATE_UINT32(interrupt_request, CPUState),
611 VMSTATE_END_OF_LIST()
614 #endif
616 CPUState *qemu_get_cpu(int cpu)
618 CPUState *env = first_cpu;
620 while (env) {
621 if (env->cpu_index == cpu)
622 break;
623 env = env->next_cpu;
626 return env;
629 void cpu_exec_init(CPUState *env)
631 CPUState **penv;
632 int cpu_index;
634 #if defined(CONFIG_USER_ONLY)
635 cpu_list_lock();
636 #endif
637 env->next_cpu = NULL;
638 penv = &first_cpu;
639 cpu_index = 0;
640 while (*penv != NULL) {
641 penv = &(*penv)->next_cpu;
642 cpu_index++;
644 env->cpu_index = cpu_index;
645 env->numa_node = 0;
646 QTAILQ_INIT(&env->breakpoints);
647 QTAILQ_INIT(&env->watchpoints);
648 #ifdef __WIN32
649 env->thread_id = GetCurrentProcessId();
650 #else
651 env->thread_id = getpid();
652 #endif
653 *penv = env;
654 #if defined(CONFIG_USER_ONLY)
655 cpu_list_unlock();
656 #endif
657 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
658 vmstate_register(cpu_index, &vmstate_cpu_common, env);
659 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
660 cpu_save, cpu_load, env);
661 #endif
664 static inline void invalidate_page_bitmap(PageDesc *p)
666 if (p->code_bitmap) {
667 qemu_free(p->code_bitmap);
668 p->code_bitmap = NULL;
670 p->code_write_count = 0;
673 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
675 static void page_flush_tb_1 (int level, void **lp)
677 int i;
679 if (*lp == NULL) {
680 return;
682 if (level == 0) {
683 PageDesc *pd = *lp;
684 for (i = 0; i < L2_SIZE; ++i) {
685 pd[i].first_tb = NULL;
686 invalidate_page_bitmap(pd + i);
688 } else {
689 void **pp = *lp;
690 for (i = 0; i < L2_SIZE; ++i) {
691 page_flush_tb_1 (level - 1, pp + i);
696 static void page_flush_tb(void)
698 int i;
699 for (i = 0; i < V_L1_SIZE; i++) {
700 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
704 /* flush all the translation blocks */
705 /* XXX: tb_flush is currently not thread safe */
706 void tb_flush(CPUState *env1)
708 CPUState *env;
709 #if defined(DEBUG_FLUSH)
710 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
711 (unsigned long)(code_gen_ptr - code_gen_buffer),
712 nb_tbs, nb_tbs > 0 ?
713 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
714 #endif
715 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
716 cpu_abort(env1, "Internal error: code buffer overflow\n");
718 nb_tbs = 0;
720 for(env = first_cpu; env != NULL; env = env->next_cpu) {
721 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
724 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
725 page_flush_tb();
727 code_gen_ptr = code_gen_buffer;
728 /* XXX: flush processor icache at this point if cache flush is
729 expensive */
730 tb_flush_count++;
733 #ifdef DEBUG_TB_CHECK
735 static void tb_invalidate_check(target_ulong address)
737 TranslationBlock *tb;
738 int i;
739 address &= TARGET_PAGE_MASK;
740 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
741 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
742 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
743 address >= tb->pc + tb->size)) {
744 printf("ERROR invalidate: address=" TARGET_FMT_lx
745 " PC=%08lx size=%04x\n",
746 address, (long)tb->pc, tb->size);
752 /* verify that all the pages have correct rights for code */
753 static void tb_page_check(void)
755 TranslationBlock *tb;
756 int i, flags1, flags2;
758 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
759 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
760 flags1 = page_get_flags(tb->pc);
761 flags2 = page_get_flags(tb->pc + tb->size - 1);
762 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
763 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
764 (long)tb->pc, tb->size, flags1, flags2);
770 #endif
772 /* invalidate one TB */
773 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
774 int next_offset)
776 TranslationBlock *tb1;
777 for(;;) {
778 tb1 = *ptb;
779 if (tb1 == tb) {
780 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
781 break;
783 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
787 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
789 TranslationBlock *tb1;
790 unsigned int n1;
792 for(;;) {
793 tb1 = *ptb;
794 n1 = (long)tb1 & 3;
795 tb1 = (TranslationBlock *)((long)tb1 & ~3);
796 if (tb1 == tb) {
797 *ptb = tb1->page_next[n1];
798 break;
800 ptb = &tb1->page_next[n1];
804 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
806 TranslationBlock *tb1, **ptb;
807 unsigned int n1;
809 ptb = &tb->jmp_next[n];
810 tb1 = *ptb;
811 if (tb1) {
812 /* find tb(n) in circular list */
813 for(;;) {
814 tb1 = *ptb;
815 n1 = (long)tb1 & 3;
816 tb1 = (TranslationBlock *)((long)tb1 & ~3);
817 if (n1 == n && tb1 == tb)
818 break;
819 if (n1 == 2) {
820 ptb = &tb1->jmp_first;
821 } else {
822 ptb = &tb1->jmp_next[n1];
825 /* now we can suppress tb(n) from the list */
826 *ptb = tb->jmp_next[n];
828 tb->jmp_next[n] = NULL;
832 /* reset the jump entry 'n' of a TB so that it is not chained to
833 another TB */
834 static inline void tb_reset_jump(TranslationBlock *tb, int n)
836 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
839 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
841 CPUState *env;
842 PageDesc *p;
843 unsigned int h, n1;
844 tb_page_addr_t phys_pc;
845 TranslationBlock *tb1, *tb2;
847 /* remove the TB from the hash list */
848 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
849 h = tb_phys_hash_func(phys_pc);
850 tb_remove(&tb_phys_hash[h], tb,
851 offsetof(TranslationBlock, phys_hash_next));
853 /* remove the TB from the page list */
854 if (tb->page_addr[0] != page_addr) {
855 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
856 tb_page_remove(&p->first_tb, tb);
857 invalidate_page_bitmap(p);
859 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
860 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
861 tb_page_remove(&p->first_tb, tb);
862 invalidate_page_bitmap(p);
865 tb_invalidated_flag = 1;
867 /* remove the TB from the hash list */
868 h = tb_jmp_cache_hash_func(tb->pc);
869 for(env = first_cpu; env != NULL; env = env->next_cpu) {
870 if (env->tb_jmp_cache[h] == tb)
871 env->tb_jmp_cache[h] = NULL;
874 /* suppress this TB from the two jump lists */
875 tb_jmp_remove(tb, 0);
876 tb_jmp_remove(tb, 1);
878 /* suppress any remaining jumps to this TB */
879 tb1 = tb->jmp_first;
880 for(;;) {
881 n1 = (long)tb1 & 3;
882 if (n1 == 2)
883 break;
884 tb1 = (TranslationBlock *)((long)tb1 & ~3);
885 tb2 = tb1->jmp_next[n1];
886 tb_reset_jump(tb1, n1);
887 tb1->jmp_next[n1] = NULL;
888 tb1 = tb2;
890 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
892 tb_phys_invalidate_count++;
895 static inline void set_bits(uint8_t *tab, int start, int len)
897 int end, mask, end1;
899 end = start + len;
900 tab += start >> 3;
901 mask = 0xff << (start & 7);
902 if ((start & ~7) == (end & ~7)) {
903 if (start < end) {
904 mask &= ~(0xff << (end & 7));
905 *tab |= mask;
907 } else {
908 *tab++ |= mask;
909 start = (start + 8) & ~7;
910 end1 = end & ~7;
911 while (start < end1) {
912 *tab++ = 0xff;
913 start += 8;
915 if (start < end) {
916 mask = ~(0xff << (end & 7));
917 *tab |= mask;
922 static void build_page_bitmap(PageDesc *p)
924 int n, tb_start, tb_end;
925 TranslationBlock *tb;
927 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
929 tb = p->first_tb;
930 while (tb != NULL) {
931 n = (long)tb & 3;
932 tb = (TranslationBlock *)((long)tb & ~3);
933 /* NOTE: this is subtle as a TB may span two physical pages */
934 if (n == 0) {
935 /* NOTE: tb_end may be after the end of the page, but
936 it is not a problem */
937 tb_start = tb->pc & ~TARGET_PAGE_MASK;
938 tb_end = tb_start + tb->size;
939 if (tb_end > TARGET_PAGE_SIZE)
940 tb_end = TARGET_PAGE_SIZE;
941 } else {
942 tb_start = 0;
943 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
945 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
946 tb = tb->page_next[n];
950 TranslationBlock *tb_gen_code(CPUState *env,
951 target_ulong pc, target_ulong cs_base,
952 int flags, int cflags)
954 TranslationBlock *tb;
955 uint8_t *tc_ptr;
956 tb_page_addr_t phys_pc, phys_page2;
957 target_ulong virt_page2;
958 int code_gen_size;
960 phys_pc = get_page_addr_code(env, pc);
961 tb = tb_alloc(pc);
962 if (!tb) {
963 /* flush must be done */
964 tb_flush(env);
965 /* cannot fail at this point */
966 tb = tb_alloc(pc);
967 /* Don't forget to invalidate previous TB info. */
968 tb_invalidated_flag = 1;
970 tc_ptr = code_gen_ptr;
971 tb->tc_ptr = tc_ptr;
972 tb->cs_base = cs_base;
973 tb->flags = flags;
974 tb->cflags = cflags;
975 cpu_gen_code(env, tb, &code_gen_size);
976 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
978 /* check next page if needed */
979 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
980 phys_page2 = -1;
981 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
982 phys_page2 = get_page_addr_code(env, virt_page2);
984 tb_link_page(tb, phys_pc, phys_page2);
985 return tb;
988 /* invalidate all TBs which intersect with the target physical page
989 starting in range [start;end[. NOTE: start and end must refer to
990 the same physical page. 'is_cpu_write_access' should be true if called
991 from a real cpu write access: the virtual CPU will exit the current
992 TB if code is modified inside this TB. */
993 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
994 int is_cpu_write_access)
996 TranslationBlock *tb, *tb_next, *saved_tb;
997 CPUState *env = cpu_single_env;
998 tb_page_addr_t tb_start, tb_end;
999 PageDesc *p;
1000 int n;
1001 #ifdef TARGET_HAS_PRECISE_SMC
1002 int current_tb_not_found = is_cpu_write_access;
1003 TranslationBlock *current_tb = NULL;
1004 int current_tb_modified = 0;
1005 target_ulong current_pc = 0;
1006 target_ulong current_cs_base = 0;
1007 int current_flags = 0;
1008 #endif /* TARGET_HAS_PRECISE_SMC */
1010 p = page_find(start >> TARGET_PAGE_BITS);
1011 if (!p)
1012 return;
1013 if (!p->code_bitmap &&
1014 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1015 is_cpu_write_access) {
1016 /* build code bitmap */
1017 build_page_bitmap(p);
1020 /* we remove all the TBs in the range [start, end[ */
1021 /* XXX: see if in some cases it could be faster to invalidate all the code */
1022 tb = p->first_tb;
1023 while (tb != NULL) {
1024 n = (long)tb & 3;
1025 tb = (TranslationBlock *)((long)tb & ~3);
1026 tb_next = tb->page_next[n];
1027 /* NOTE: this is subtle as a TB may span two physical pages */
1028 if (n == 0) {
1029 /* NOTE: tb_end may be after the end of the page, but
1030 it is not a problem */
1031 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1032 tb_end = tb_start + tb->size;
1033 } else {
1034 tb_start = tb->page_addr[1];
1035 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1037 if (!(tb_end <= start || tb_start >= end)) {
1038 #ifdef TARGET_HAS_PRECISE_SMC
1039 if (current_tb_not_found) {
1040 current_tb_not_found = 0;
1041 current_tb = NULL;
1042 if (env->mem_io_pc) {
1043 /* now we have a real cpu fault */
1044 current_tb = tb_find_pc(env->mem_io_pc);
1047 if (current_tb == tb &&
1048 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1049 /* If we are modifying the current TB, we must stop
1050 its execution. We could be more precise by checking
1051 that the modification is after the current PC, but it
1052 would require a specialized function to partially
1053 restore the CPU state */
1055 current_tb_modified = 1;
1056 cpu_restore_state(current_tb, env,
1057 env->mem_io_pc, NULL);
1058 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1059 &current_flags);
1061 #endif /* TARGET_HAS_PRECISE_SMC */
1062 /* we need to do that to handle the case where a signal
1063 occurs while doing tb_phys_invalidate() */
1064 saved_tb = NULL;
1065 if (env) {
1066 saved_tb = env->current_tb;
1067 env->current_tb = NULL;
1069 tb_phys_invalidate(tb, -1);
1070 if (env) {
1071 env->current_tb = saved_tb;
1072 if (env->interrupt_request && env->current_tb)
1073 cpu_interrupt(env, env->interrupt_request);
1076 tb = tb_next;
1078 #if !defined(CONFIG_USER_ONLY)
1079 /* if no code remaining, no need to continue to use slow writes */
1080 if (!p->first_tb) {
1081 invalidate_page_bitmap(p);
1082 if (is_cpu_write_access) {
1083 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1086 #endif
1087 #ifdef TARGET_HAS_PRECISE_SMC
1088 if (current_tb_modified) {
1089 /* we generate a block containing just the instruction
1090 modifying the memory. It will ensure that it cannot modify
1091 itself */
1092 env->current_tb = NULL;
1093 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1094 cpu_resume_from_signal(env, NULL);
1096 #endif
1099 /* len must be <= 8 and start must be a multiple of len */
1100 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1102 PageDesc *p;
1103 int offset, b;
1104 #if 0
1105 if (1) {
1106 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1107 cpu_single_env->mem_io_vaddr, len,
1108 cpu_single_env->eip,
1109 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1111 #endif
1112 p = page_find(start >> TARGET_PAGE_BITS);
1113 if (!p)
1114 return;
1115 if (p->code_bitmap) {
1116 offset = start & ~TARGET_PAGE_MASK;
1117 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1118 if (b & ((1 << len) - 1))
1119 goto do_invalidate;
1120 } else {
1121 do_invalidate:
1122 tb_invalidate_phys_page_range(start, start + len, 1);
1126 #if !defined(CONFIG_SOFTMMU)
1127 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1128 unsigned long pc, void *puc)
1130 TranslationBlock *tb;
1131 PageDesc *p;
1132 int n;
1133 #ifdef TARGET_HAS_PRECISE_SMC
1134 TranslationBlock *current_tb = NULL;
1135 CPUState *env = cpu_single_env;
1136 int current_tb_modified = 0;
1137 target_ulong current_pc = 0;
1138 target_ulong current_cs_base = 0;
1139 int current_flags = 0;
1140 #endif
1142 addr &= TARGET_PAGE_MASK;
1143 p = page_find(addr >> TARGET_PAGE_BITS);
1144 if (!p)
1145 return;
1146 tb = p->first_tb;
1147 #ifdef TARGET_HAS_PRECISE_SMC
1148 if (tb && pc != 0) {
1149 current_tb = tb_find_pc(pc);
1151 #endif
1152 while (tb != NULL) {
1153 n = (long)tb & 3;
1154 tb = (TranslationBlock *)((long)tb & ~3);
1155 #ifdef TARGET_HAS_PRECISE_SMC
1156 if (current_tb == tb &&
1157 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1158 /* If we are modifying the current TB, we must stop
1159 its execution. We could be more precise by checking
1160 that the modification is after the current PC, but it
1161 would require a specialized function to partially
1162 restore the CPU state */
1164 current_tb_modified = 1;
1165 cpu_restore_state(current_tb, env, pc, puc);
1166 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1167 &current_flags);
1169 #endif /* TARGET_HAS_PRECISE_SMC */
1170 tb_phys_invalidate(tb, addr);
1171 tb = tb->page_next[n];
1173 p->first_tb = NULL;
1174 #ifdef TARGET_HAS_PRECISE_SMC
1175 if (current_tb_modified) {
1176 /* we generate a block containing just the instruction
1177 modifying the memory. It will ensure that it cannot modify
1178 itself */
1179 env->current_tb = NULL;
1180 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1181 cpu_resume_from_signal(env, puc);
1183 #endif
1185 #endif
1187 /* add the tb in the target page and protect it if necessary */
1188 static inline void tb_alloc_page(TranslationBlock *tb,
1189 unsigned int n, tb_page_addr_t page_addr)
1191 PageDesc *p;
1192 TranslationBlock *last_first_tb;
1194 tb->page_addr[n] = page_addr;
1195 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1196 tb->page_next[n] = p->first_tb;
1197 last_first_tb = p->first_tb;
1198 p->first_tb = (TranslationBlock *)((long)tb | n);
1199 invalidate_page_bitmap(p);
1201 #if defined(TARGET_HAS_SMC) || 1
1203 #if defined(CONFIG_USER_ONLY)
1204 if (p->flags & PAGE_WRITE) {
1205 target_ulong addr;
1206 PageDesc *p2;
1207 int prot;
1209 /* force the host page as non writable (writes will have a
1210 page fault + mprotect overhead) */
1211 page_addr &= qemu_host_page_mask;
1212 prot = 0;
1213 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1214 addr += TARGET_PAGE_SIZE) {
1216 p2 = page_find (addr >> TARGET_PAGE_BITS);
1217 if (!p2)
1218 continue;
1219 prot |= p2->flags;
1220 p2->flags &= ~PAGE_WRITE;
1222 mprotect(g2h(page_addr), qemu_host_page_size,
1223 (prot & PAGE_BITS) & ~PAGE_WRITE);
1224 #ifdef DEBUG_TB_INVALIDATE
1225 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1226 page_addr);
1227 #endif
1229 #else
1230 /* if some code is already present, then the pages are already
1231 protected. So we handle the case where only the first TB is
1232 allocated in a physical page */
1233 if (!last_first_tb) {
1234 tlb_protect_code(page_addr);
1236 #endif
1238 #endif /* TARGET_HAS_SMC */
1241 /* Allocate a new translation block. Flush the translation buffer if
1242 too many translation blocks or too much generated code. */
1243 TranslationBlock *tb_alloc(target_ulong pc)
1245 TranslationBlock *tb;
1247 if (nb_tbs >= code_gen_max_blocks ||
1248 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1249 return NULL;
1250 tb = &tbs[nb_tbs++];
1251 tb->pc = pc;
1252 tb->cflags = 0;
1253 return tb;
1256 void tb_free(TranslationBlock *tb)
1258 /* In practice this is mostly used for single use temporary TB
1259 Ignore the hard cases and just back up if this TB happens to
1260 be the last one generated. */
1261 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1262 code_gen_ptr = tb->tc_ptr;
1263 nb_tbs--;
1267 /* add a new TB and link it to the physical page tables. phys_page2 is
1268 (-1) to indicate that only one page contains the TB. */
1269 void tb_link_page(TranslationBlock *tb,
1270 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1272 unsigned int h;
1273 TranslationBlock **ptb;
1275 /* Grab the mmap lock to stop another thread invalidating this TB
1276 before we are done. */
1277 mmap_lock();
1278 /* add in the physical hash table */
1279 h = tb_phys_hash_func(phys_pc);
1280 ptb = &tb_phys_hash[h];
1281 tb->phys_hash_next = *ptb;
1282 *ptb = tb;
1284 /* add in the page list */
1285 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1286 if (phys_page2 != -1)
1287 tb_alloc_page(tb, 1, phys_page2);
1288 else
1289 tb->page_addr[1] = -1;
1291 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1292 tb->jmp_next[0] = NULL;
1293 tb->jmp_next[1] = NULL;
1295 /* init original jump addresses */
1296 if (tb->tb_next_offset[0] != 0xffff)
1297 tb_reset_jump(tb, 0);
1298 if (tb->tb_next_offset[1] != 0xffff)
1299 tb_reset_jump(tb, 1);
1301 #ifdef DEBUG_TB_CHECK
1302 tb_page_check();
1303 #endif
1304 mmap_unlock();
1307 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1308 tb[1].tc_ptr. Return NULL if not found */
1309 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1311 int m_min, m_max, m;
1312 unsigned long v;
1313 TranslationBlock *tb;
1315 if (nb_tbs <= 0)
1316 return NULL;
1317 if (tc_ptr < (unsigned long)code_gen_buffer ||
1318 tc_ptr >= (unsigned long)code_gen_ptr)
1319 return NULL;
1320 /* binary search (cf Knuth) */
1321 m_min = 0;
1322 m_max = nb_tbs - 1;
1323 while (m_min <= m_max) {
1324 m = (m_min + m_max) >> 1;
1325 tb = &tbs[m];
1326 v = (unsigned long)tb->tc_ptr;
1327 if (v == tc_ptr)
1328 return tb;
1329 else if (tc_ptr < v) {
1330 m_max = m - 1;
1331 } else {
1332 m_min = m + 1;
1335 return &tbs[m_max];
1338 static void tb_reset_jump_recursive(TranslationBlock *tb);
1340 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1342 TranslationBlock *tb1, *tb_next, **ptb;
1343 unsigned int n1;
1345 tb1 = tb->jmp_next[n];
1346 if (tb1 != NULL) {
1347 /* find head of list */
1348 for(;;) {
1349 n1 = (long)tb1 & 3;
1350 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1351 if (n1 == 2)
1352 break;
1353 tb1 = tb1->jmp_next[n1];
1355 /* we are now sure now that tb jumps to tb1 */
1356 tb_next = tb1;
1358 /* remove tb from the jmp_first list */
1359 ptb = &tb_next->jmp_first;
1360 for(;;) {
1361 tb1 = *ptb;
1362 n1 = (long)tb1 & 3;
1363 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1364 if (n1 == n && tb1 == tb)
1365 break;
1366 ptb = &tb1->jmp_next[n1];
1368 *ptb = tb->jmp_next[n];
1369 tb->jmp_next[n] = NULL;
1371 /* suppress the jump to next tb in generated code */
1372 tb_reset_jump(tb, n);
1374 /* suppress jumps in the tb on which we could have jumped */
1375 tb_reset_jump_recursive(tb_next);
1379 static void tb_reset_jump_recursive(TranslationBlock *tb)
1381 tb_reset_jump_recursive2(tb, 0);
1382 tb_reset_jump_recursive2(tb, 1);
1385 #if defined(TARGET_HAS_ICE)
1386 #if defined(CONFIG_USER_ONLY)
1387 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1389 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1391 #else
1392 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1394 target_phys_addr_t addr;
1395 target_ulong pd;
1396 ram_addr_t ram_addr;
1397 PhysPageDesc *p;
1399 addr = cpu_get_phys_page_debug(env, pc);
1400 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1401 if (!p) {
1402 pd = IO_MEM_UNASSIGNED;
1403 } else {
1404 pd = p->phys_offset;
1406 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1407 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1409 #endif
1410 #endif /* TARGET_HAS_ICE */
1412 #if defined(CONFIG_USER_ONLY)
1413 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1418 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1419 int flags, CPUWatchpoint **watchpoint)
1421 return -ENOSYS;
1423 #else
1424 /* Add a watchpoint. */
1425 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1426 int flags, CPUWatchpoint **watchpoint)
1428 target_ulong len_mask = ~(len - 1);
1429 CPUWatchpoint *wp;
1431 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1432 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1433 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1434 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1435 return -EINVAL;
1437 wp = qemu_malloc(sizeof(*wp));
1439 wp->vaddr = addr;
1440 wp->len_mask = len_mask;
1441 wp->flags = flags;
1443 /* keep all GDB-injected watchpoints in front */
1444 if (flags & BP_GDB)
1445 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1446 else
1447 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1449 tlb_flush_page(env, addr);
1451 if (watchpoint)
1452 *watchpoint = wp;
1453 return 0;
1456 /* Remove a specific watchpoint. */
1457 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1458 int flags)
1460 target_ulong len_mask = ~(len - 1);
1461 CPUWatchpoint *wp;
1463 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1464 if (addr == wp->vaddr && len_mask == wp->len_mask
1465 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1466 cpu_watchpoint_remove_by_ref(env, wp);
1467 return 0;
1470 return -ENOENT;
1473 /* Remove a specific watchpoint by reference. */
1474 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1476 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1478 tlb_flush_page(env, watchpoint->vaddr);
1480 qemu_free(watchpoint);
1483 /* Remove all matching watchpoints. */
1484 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1486 CPUWatchpoint *wp, *next;
1488 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1489 if (wp->flags & mask)
1490 cpu_watchpoint_remove_by_ref(env, wp);
1493 #endif
1495 /* Add a breakpoint. */
1496 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1497 CPUBreakpoint **breakpoint)
1499 #if defined(TARGET_HAS_ICE)
1500 CPUBreakpoint *bp;
1502 bp = qemu_malloc(sizeof(*bp));
1504 bp->pc = pc;
1505 bp->flags = flags;
1507 /* keep all GDB-injected breakpoints in front */
1508 if (flags & BP_GDB)
1509 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1510 else
1511 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1513 breakpoint_invalidate(env, pc);
1515 if (breakpoint)
1516 *breakpoint = bp;
1517 return 0;
1518 #else
1519 return -ENOSYS;
1520 #endif
1523 /* Remove a specific breakpoint. */
1524 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1526 #if defined(TARGET_HAS_ICE)
1527 CPUBreakpoint *bp;
1529 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1530 if (bp->pc == pc && bp->flags == flags) {
1531 cpu_breakpoint_remove_by_ref(env, bp);
1532 return 0;
1535 return -ENOENT;
1536 #else
1537 return -ENOSYS;
1538 #endif
1541 /* Remove a specific breakpoint by reference. */
1542 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1544 #if defined(TARGET_HAS_ICE)
1545 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1547 breakpoint_invalidate(env, breakpoint->pc);
1549 qemu_free(breakpoint);
1550 #endif
1553 /* Remove all matching breakpoints. */
1554 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1556 #if defined(TARGET_HAS_ICE)
1557 CPUBreakpoint *bp, *next;
1559 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1560 if (bp->flags & mask)
1561 cpu_breakpoint_remove_by_ref(env, bp);
1563 #endif
1566 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1567 CPU loop after each instruction */
1568 void cpu_single_step(CPUState *env, int enabled)
1570 #if defined(TARGET_HAS_ICE)
1571 if (env->singlestep_enabled != enabled) {
1572 env->singlestep_enabled = enabled;
1573 if (kvm_enabled())
1574 kvm_update_guest_debug(env, 0);
1575 else {
1576 /* must flush all the translated code to avoid inconsistencies */
1577 /* XXX: only flush what is necessary */
1578 tb_flush(env);
1581 #endif
1584 /* enable or disable low levels log */
1585 void cpu_set_log(int log_flags)
1587 loglevel = log_flags;
1588 if (loglevel && !logfile) {
1589 logfile = fopen(logfilename, log_append ? "a" : "w");
1590 if (!logfile) {
1591 perror(logfilename);
1592 _exit(1);
1594 #if !defined(CONFIG_SOFTMMU)
1595 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1597 static char logfile_buf[4096];
1598 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1600 #elif !defined(_WIN32)
1601 /* Win32 doesn't support line-buffering and requires size >= 2 */
1602 setvbuf(logfile, NULL, _IOLBF, 0);
1603 #endif
1604 log_append = 1;
1606 if (!loglevel && logfile) {
1607 fclose(logfile);
1608 logfile = NULL;
1612 void cpu_set_log_filename(const char *filename)
1614 logfilename = strdup(filename);
1615 if (logfile) {
1616 fclose(logfile);
1617 logfile = NULL;
1619 cpu_set_log(loglevel);
1622 static void cpu_unlink_tb(CPUState *env)
1624 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1625 problem and hope the cpu will stop of its own accord. For userspace
1626 emulation this often isn't actually as bad as it sounds. Often
1627 signals are used primarily to interrupt blocking syscalls. */
1628 TranslationBlock *tb;
1629 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1631 spin_lock(&interrupt_lock);
1632 tb = env->current_tb;
1633 /* if the cpu is currently executing code, we must unlink it and
1634 all the potentially executing TB */
1635 if (tb) {
1636 env->current_tb = NULL;
1637 tb_reset_jump_recursive(tb);
1639 spin_unlock(&interrupt_lock);
1642 /* mask must never be zero, except for A20 change call */
1643 void cpu_interrupt(CPUState *env, int mask)
1645 int old_mask;
1647 old_mask = env->interrupt_request;
1648 env->interrupt_request |= mask;
1649 if (kvm_enabled() && !kvm_irqchip_in_kernel())
1650 kvm_update_interrupt_request(env);
1652 #ifndef CONFIG_USER_ONLY
1654 * If called from iothread context, wake the target cpu in
1655 * case its halted.
1657 if (!qemu_cpu_self(env)) {
1658 qemu_cpu_kick(env);
1659 return;
1661 #endif
1663 if (use_icount) {
1664 env->icount_decr.u16.high = 0xffff;
1665 #ifndef CONFIG_USER_ONLY
1666 if (!can_do_io(env)
1667 && (mask & ~old_mask) != 0) {
1668 cpu_abort(env, "Raised interrupt while not in I/O function");
1670 #endif
1671 } else {
1672 cpu_unlink_tb(env);
1676 void cpu_reset_interrupt(CPUState *env, int mask)
1678 env->interrupt_request &= ~mask;
1681 void cpu_exit(CPUState *env)
1683 env->exit_request = 1;
1684 cpu_unlink_tb(env);
1687 const CPULogItem cpu_log_items[] = {
1688 { CPU_LOG_TB_OUT_ASM, "out_asm",
1689 "show generated host assembly code for each compiled TB" },
1690 { CPU_LOG_TB_IN_ASM, "in_asm",
1691 "show target assembly code for each compiled TB" },
1692 { CPU_LOG_TB_OP, "op",
1693 "show micro ops for each compiled TB" },
1694 { CPU_LOG_TB_OP_OPT, "op_opt",
1695 "show micro ops "
1696 #ifdef TARGET_I386
1697 "before eflags optimization and "
1698 #endif
1699 "after liveness analysis" },
1700 { CPU_LOG_INT, "int",
1701 "show interrupts/exceptions in short format" },
1702 { CPU_LOG_EXEC, "exec",
1703 "show trace before each executed TB (lots of logs)" },
1704 { CPU_LOG_TB_CPU, "cpu",
1705 "show CPU state before block translation" },
1706 #ifdef TARGET_I386
1707 { CPU_LOG_PCALL, "pcall",
1708 "show protected mode far calls/returns/exceptions" },
1709 { CPU_LOG_RESET, "cpu_reset",
1710 "show CPU state before CPU resets" },
1711 #endif
1712 #ifdef DEBUG_IOPORT
1713 { CPU_LOG_IOPORT, "ioport",
1714 "show all i/o ports accesses" },
1715 #endif
1716 { 0, NULL, NULL },
1719 #ifndef CONFIG_USER_ONLY
1720 static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1721 = QLIST_HEAD_INITIALIZER(memory_client_list);
1723 static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1724 ram_addr_t size,
1725 ram_addr_t phys_offset)
1727 CPUPhysMemoryClient *client;
1728 QLIST_FOREACH(client, &memory_client_list, list) {
1729 client->set_memory(client, start_addr, size, phys_offset);
1733 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1734 target_phys_addr_t end)
1736 CPUPhysMemoryClient *client;
1737 QLIST_FOREACH(client, &memory_client_list, list) {
1738 int r = client->sync_dirty_bitmap(client, start, end);
1739 if (r < 0)
1740 return r;
1742 return 0;
1745 static int cpu_notify_migration_log(int enable)
1747 CPUPhysMemoryClient *client;
1748 QLIST_FOREACH(client, &memory_client_list, list) {
1749 int r = client->migration_log(client, enable);
1750 if (r < 0)
1751 return r;
1753 return 0;
1756 static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1757 int level, void **lp)
1759 int i;
1761 if (*lp == NULL) {
1762 return;
1764 if (level == 0) {
1765 PhysPageDesc *pd = *lp;
1766 for (i = 0; i < L2_SIZE; ++i) {
1767 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1768 client->set_memory(client, pd[i].region_offset,
1769 TARGET_PAGE_SIZE, pd[i].phys_offset);
1772 } else {
1773 void **pp = *lp;
1774 for (i = 0; i < L2_SIZE; ++i) {
1775 phys_page_for_each_1(client, level - 1, pp + i);
1780 static void phys_page_for_each(CPUPhysMemoryClient *client)
1782 int i;
1783 for (i = 0; i < P_L1_SIZE; ++i) {
1784 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1785 l1_phys_map + 1);
1789 void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1791 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1792 phys_page_for_each(client);
1795 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1797 QLIST_REMOVE(client, list);
1799 #endif
1801 static int cmp1(const char *s1, int n, const char *s2)
1803 if (strlen(s2) != n)
1804 return 0;
1805 return memcmp(s1, s2, n) == 0;
1808 /* takes a comma separated list of log masks. Return 0 if error. */
1809 int cpu_str_to_log_mask(const char *str)
1811 const CPULogItem *item;
1812 int mask;
1813 const char *p, *p1;
1815 p = str;
1816 mask = 0;
1817 for(;;) {
1818 p1 = strchr(p, ',');
1819 if (!p1)
1820 p1 = p + strlen(p);
1821 if(cmp1(p,p1-p,"all")) {
1822 for(item = cpu_log_items; item->mask != 0; item++) {
1823 mask |= item->mask;
1825 } else {
1826 for(item = cpu_log_items; item->mask != 0; item++) {
1827 if (cmp1(p, p1 - p, item->name))
1828 goto found;
1830 return 0;
1832 found:
1833 mask |= item->mask;
1834 if (*p1 != ',')
1835 break;
1836 p = p1 + 1;
1838 return mask;
1841 void cpu_abort(CPUState *env, const char *fmt, ...)
1843 va_list ap;
1844 va_list ap2;
1846 va_start(ap, fmt);
1847 va_copy(ap2, ap);
1848 fprintf(stderr, "qemu: fatal: ");
1849 vfprintf(stderr, fmt, ap);
1850 fprintf(stderr, "\n");
1851 #ifdef TARGET_I386
1852 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1853 #else
1854 cpu_dump_state(env, stderr, fprintf, 0);
1855 #endif
1856 if (qemu_log_enabled()) {
1857 qemu_log("qemu: fatal: ");
1858 qemu_log_vprintf(fmt, ap2);
1859 qemu_log("\n");
1860 #ifdef TARGET_I386
1861 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1862 #else
1863 log_cpu_state(env, 0);
1864 #endif
1865 qemu_log_flush();
1866 qemu_log_close();
1868 va_end(ap2);
1869 va_end(ap);
1870 #if defined(CONFIG_USER_ONLY)
1872 struct sigaction act;
1873 sigfillset(&act.sa_mask);
1874 act.sa_handler = SIG_DFL;
1875 sigaction(SIGABRT, &act, NULL);
1877 #endif
1878 abort();
1881 CPUState *cpu_copy(CPUState *env)
1883 CPUState *new_env = cpu_init(env->cpu_model_str);
1884 CPUState *next_cpu = new_env->next_cpu;
1885 int cpu_index = new_env->cpu_index;
1886 #if defined(TARGET_HAS_ICE)
1887 CPUBreakpoint *bp;
1888 CPUWatchpoint *wp;
1889 #endif
1891 memcpy(new_env, env, sizeof(CPUState));
1893 /* Preserve chaining and index. */
1894 new_env->next_cpu = next_cpu;
1895 new_env->cpu_index = cpu_index;
1897 /* Clone all break/watchpoints.
1898 Note: Once we support ptrace with hw-debug register access, make sure
1899 BP_CPU break/watchpoints are handled correctly on clone. */
1900 QTAILQ_INIT(&env->breakpoints);
1901 QTAILQ_INIT(&env->watchpoints);
1902 #if defined(TARGET_HAS_ICE)
1903 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1904 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1906 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1907 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1908 wp->flags, NULL);
1910 #endif
1912 return new_env;
1915 #if !defined(CONFIG_USER_ONLY)
1917 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1919 unsigned int i;
1921 /* Discard jump cache entries for any tb which might potentially
1922 overlap the flushed page. */
1923 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1924 memset (&env->tb_jmp_cache[i], 0,
1925 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1927 i = tb_jmp_cache_hash_page(addr);
1928 memset (&env->tb_jmp_cache[i], 0,
1929 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1932 static CPUTLBEntry s_cputlb_empty_entry = {
1933 .addr_read = -1,
1934 .addr_write = -1,
1935 .addr_code = -1,
1936 .addend = -1,
1939 /* NOTE: if flush_global is true, also flush global entries (not
1940 implemented yet) */
1941 void tlb_flush(CPUState *env, int flush_global)
1943 int i;
1945 #if defined(DEBUG_TLB)
1946 printf("tlb_flush:\n");
1947 #endif
1948 /* must reset current TB so that interrupts cannot modify the
1949 links while we are modifying them */
1950 env->current_tb = NULL;
1952 for(i = 0; i < CPU_TLB_SIZE; i++) {
1953 int mmu_idx;
1954 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1955 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1959 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1961 env->tlb_flush_addr = -1;
1962 env->tlb_flush_mask = 0;
1963 tlb_flush_count++;
1966 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1968 if (addr == (tlb_entry->addr_read &
1969 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1970 addr == (tlb_entry->addr_write &
1971 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1972 addr == (tlb_entry->addr_code &
1973 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1974 *tlb_entry = s_cputlb_empty_entry;
1978 void tlb_flush_page(CPUState *env, target_ulong addr)
1980 int i;
1981 int mmu_idx;
1983 #if defined(DEBUG_TLB)
1984 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1985 #endif
1986 /* Check if we need to flush due to large pages. */
1987 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1988 #if defined(DEBUG_TLB)
1989 printf("tlb_flush_page: forced full flush ("
1990 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1991 env->tlb_flush_addr, env->tlb_flush_mask);
1992 #endif
1993 tlb_flush(env, 1);
1994 return;
1996 /* must reset current TB so that interrupts cannot modify the
1997 links while we are modifying them */
1998 env->current_tb = NULL;
2000 addr &= TARGET_PAGE_MASK;
2001 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2002 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2003 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2005 tlb_flush_jmp_cache(env, addr);
2008 /* update the TLBs so that writes to code in the virtual page 'addr'
2009 can be detected */
2010 static void tlb_protect_code(ram_addr_t ram_addr)
2012 cpu_physical_memory_reset_dirty(ram_addr,
2013 ram_addr + TARGET_PAGE_SIZE,
2014 CODE_DIRTY_FLAG);
2017 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2018 tested for self modifying code */
2019 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2020 target_ulong vaddr)
2022 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2025 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2026 unsigned long start, unsigned long length)
2028 unsigned long addr;
2029 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2030 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2031 if ((addr - start) < length) {
2032 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2037 /* Note: start and end must be within the same ram block. */
2038 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2039 int dirty_flags)
2041 CPUState *env;
2042 unsigned long length, start1;
2043 int i;
2045 start &= TARGET_PAGE_MASK;
2046 end = TARGET_PAGE_ALIGN(end);
2048 length = end - start;
2049 if (length == 0)
2050 return;
2051 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2053 /* we modify the TLB cache so that the dirty bit will be set again
2054 when accessing the range */
2055 start1 = (unsigned long)qemu_get_ram_ptr(start);
2056 /* Chek that we don't span multiple blocks - this breaks the
2057 address comparisons below. */
2058 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2059 != (end - 1) - start) {
2060 abort();
2063 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2064 int mmu_idx;
2065 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2066 for(i = 0; i < CPU_TLB_SIZE; i++)
2067 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2068 start1, length);
2073 int cpu_physical_memory_set_dirty_tracking(int enable)
2075 int ret = 0;
2076 in_migration = enable;
2077 ret = cpu_notify_migration_log(!!enable);
2078 return ret;
2081 int cpu_physical_memory_get_dirty_tracking(void)
2083 return in_migration;
2086 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2087 target_phys_addr_t end_addr)
2089 int ret;
2091 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2092 return ret;
2095 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2097 ram_addr_t ram_addr;
2098 void *p;
2100 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2101 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2102 + tlb_entry->addend);
2103 ram_addr = qemu_ram_addr_from_host(p);
2104 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2105 tlb_entry->addr_write |= TLB_NOTDIRTY;
2110 /* update the TLB according to the current state of the dirty bits */
2111 void cpu_tlb_update_dirty(CPUState *env)
2113 int i;
2114 int mmu_idx;
2115 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2116 for(i = 0; i < CPU_TLB_SIZE; i++)
2117 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2121 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2123 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2124 tlb_entry->addr_write = vaddr;
2127 /* update the TLB corresponding to virtual page vaddr
2128 so that it is no longer dirty */
2129 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2131 int i;
2132 int mmu_idx;
2134 vaddr &= TARGET_PAGE_MASK;
2135 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2136 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2137 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2140 /* Our TLB does not support large pages, so remember the area covered by
2141 large pages and trigger a full TLB flush if these are invalidated. */
2142 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2143 target_ulong size)
2145 target_ulong mask = ~(size - 1);
2147 if (env->tlb_flush_addr == (target_ulong)-1) {
2148 env->tlb_flush_addr = vaddr & mask;
2149 env->tlb_flush_mask = mask;
2150 return;
2152 /* Extend the existing region to include the new page.
2153 This is a compromise between unnecessary flushes and the cost
2154 of maintaining a full variable size TLB. */
2155 mask &= env->tlb_flush_mask;
2156 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2157 mask <<= 1;
2159 env->tlb_flush_addr &= mask;
2160 env->tlb_flush_mask = mask;
2163 /* Add a new TLB entry. At most one entry for a given virtual address
2164 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2165 supplied size is only used by tlb_flush_page. */
2166 void tlb_set_page(CPUState *env, target_ulong vaddr,
2167 target_phys_addr_t paddr, int prot,
2168 int mmu_idx, target_ulong size)
2170 PhysPageDesc *p;
2171 unsigned long pd;
2172 unsigned int index;
2173 target_ulong address;
2174 target_ulong code_address;
2175 unsigned long addend;
2176 CPUTLBEntry *te;
2177 CPUWatchpoint *wp;
2178 target_phys_addr_t iotlb;
2180 assert(size >= TARGET_PAGE_SIZE);
2181 if (size != TARGET_PAGE_SIZE) {
2182 tlb_add_large_page(env, vaddr, size);
2184 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2185 if (!p) {
2186 pd = IO_MEM_UNASSIGNED;
2187 } else {
2188 pd = p->phys_offset;
2190 #if defined(DEBUG_TLB)
2191 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2192 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2193 #endif
2195 address = vaddr;
2196 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2197 /* IO memory case (romd handled later) */
2198 address |= TLB_MMIO;
2200 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2201 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2202 /* Normal RAM. */
2203 iotlb = pd & TARGET_PAGE_MASK;
2204 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2205 iotlb |= IO_MEM_NOTDIRTY;
2206 else
2207 iotlb |= IO_MEM_ROM;
2208 } else {
2209 /* IO handlers are currently passed a physical address.
2210 It would be nice to pass an offset from the base address
2211 of that region. This would avoid having to special case RAM,
2212 and avoid full address decoding in every device.
2213 We can't use the high bits of pd for this because
2214 IO_MEM_ROMD uses these as a ram address. */
2215 iotlb = (pd & ~TARGET_PAGE_MASK);
2216 if (p) {
2217 iotlb += p->region_offset;
2218 } else {
2219 iotlb += paddr;
2223 code_address = address;
2224 /* Make accesses to pages with watchpoints go via the
2225 watchpoint trap routines. */
2226 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2227 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2228 /* Avoid trapping reads of pages with a write breakpoint. */
2229 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2230 iotlb = io_mem_watch + paddr;
2231 address |= TLB_MMIO;
2232 break;
2237 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2238 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2239 te = &env->tlb_table[mmu_idx][index];
2240 te->addend = addend - vaddr;
2241 if (prot & PAGE_READ) {
2242 te->addr_read = address;
2243 } else {
2244 te->addr_read = -1;
2247 if (prot & PAGE_EXEC) {
2248 te->addr_code = code_address;
2249 } else {
2250 te->addr_code = -1;
2252 if (prot & PAGE_WRITE) {
2253 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2254 (pd & IO_MEM_ROMD)) {
2255 /* Write access calls the I/O callback. */
2256 te->addr_write = address | TLB_MMIO;
2257 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2258 !cpu_physical_memory_is_dirty(pd)) {
2259 te->addr_write = address | TLB_NOTDIRTY;
2260 } else {
2261 te->addr_write = address;
2263 } else {
2264 te->addr_write = -1;
2268 #else
2270 void tlb_flush(CPUState *env, int flush_global)
2274 void tlb_flush_page(CPUState *env, target_ulong addr)
2279 * Walks guest process memory "regions" one by one
2280 * and calls callback function 'fn' for each region.
2283 struct walk_memory_regions_data
2285 walk_memory_regions_fn fn;
2286 void *priv;
2287 unsigned long start;
2288 int prot;
2291 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2292 abi_ulong end, int new_prot)
2294 if (data->start != -1ul) {
2295 int rc = data->fn(data->priv, data->start, end, data->prot);
2296 if (rc != 0) {
2297 return rc;
2301 data->start = (new_prot ? end : -1ul);
2302 data->prot = new_prot;
2304 return 0;
2307 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2308 abi_ulong base, int level, void **lp)
2310 abi_ulong pa;
2311 int i, rc;
2313 if (*lp == NULL) {
2314 return walk_memory_regions_end(data, base, 0);
2317 if (level == 0) {
2318 PageDesc *pd = *lp;
2319 for (i = 0; i < L2_SIZE; ++i) {
2320 int prot = pd[i].flags;
2322 pa = base | (i << TARGET_PAGE_BITS);
2323 if (prot != data->prot) {
2324 rc = walk_memory_regions_end(data, pa, prot);
2325 if (rc != 0) {
2326 return rc;
2330 } else {
2331 void **pp = *lp;
2332 for (i = 0; i < L2_SIZE; ++i) {
2333 pa = base | ((abi_ulong)i <<
2334 (TARGET_PAGE_BITS + L2_BITS * level));
2335 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2336 if (rc != 0) {
2337 return rc;
2342 return 0;
2345 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2347 struct walk_memory_regions_data data;
2348 unsigned long i;
2350 data.fn = fn;
2351 data.priv = priv;
2352 data.start = -1ul;
2353 data.prot = 0;
2355 for (i = 0; i < V_L1_SIZE; i++) {
2356 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2357 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2358 if (rc != 0) {
2359 return rc;
2363 return walk_memory_regions_end(&data, 0, 0);
2366 static int dump_region(void *priv, abi_ulong start,
2367 abi_ulong end, unsigned long prot)
2369 FILE *f = (FILE *)priv;
2371 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2372 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2373 start, end, end - start,
2374 ((prot & PAGE_READ) ? 'r' : '-'),
2375 ((prot & PAGE_WRITE) ? 'w' : '-'),
2376 ((prot & PAGE_EXEC) ? 'x' : '-'));
2378 return (0);
2381 /* dump memory mappings */
2382 void page_dump(FILE *f)
2384 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2385 "start", "end", "size", "prot");
2386 walk_memory_regions(f, dump_region);
2389 int page_get_flags(target_ulong address)
2391 PageDesc *p;
2393 p = page_find(address >> TARGET_PAGE_BITS);
2394 if (!p)
2395 return 0;
2396 return p->flags;
2399 /* Modify the flags of a page and invalidate the code if necessary.
2400 The flag PAGE_WRITE_ORG is positioned automatically depending
2401 on PAGE_WRITE. The mmap_lock should already be held. */
2402 void page_set_flags(target_ulong start, target_ulong end, int flags)
2404 target_ulong addr, len;
2406 /* This function should never be called with addresses outside the
2407 guest address space. If this assert fires, it probably indicates
2408 a missing call to h2g_valid. */
2409 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2410 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2411 #endif
2412 assert(start < end);
2414 start = start & TARGET_PAGE_MASK;
2415 end = TARGET_PAGE_ALIGN(end);
2417 if (flags & PAGE_WRITE) {
2418 flags |= PAGE_WRITE_ORG;
2421 for (addr = start, len = end - start;
2422 len != 0;
2423 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2424 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2426 /* If the write protection bit is set, then we invalidate
2427 the code inside. */
2428 if (!(p->flags & PAGE_WRITE) &&
2429 (flags & PAGE_WRITE) &&
2430 p->first_tb) {
2431 tb_invalidate_phys_page(addr, 0, NULL);
2433 p->flags = flags;
2437 int page_check_range(target_ulong start, target_ulong len, int flags)
2439 PageDesc *p;
2440 target_ulong end;
2441 target_ulong addr;
2443 /* This function should never be called with addresses outside the
2444 guest address space. If this assert fires, it probably indicates
2445 a missing call to h2g_valid. */
2446 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2447 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2448 #endif
2450 if (len == 0) {
2451 return 0;
2453 if (start + len - 1 < start) {
2454 /* We've wrapped around. */
2455 return -1;
2458 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2459 start = start & TARGET_PAGE_MASK;
2461 for (addr = start, len = end - start;
2462 len != 0;
2463 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2464 p = page_find(addr >> TARGET_PAGE_BITS);
2465 if( !p )
2466 return -1;
2467 if( !(p->flags & PAGE_VALID) )
2468 return -1;
2470 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2471 return -1;
2472 if (flags & PAGE_WRITE) {
2473 if (!(p->flags & PAGE_WRITE_ORG))
2474 return -1;
2475 /* unprotect the page if it was put read-only because it
2476 contains translated code */
2477 if (!(p->flags & PAGE_WRITE)) {
2478 if (!page_unprotect(addr, 0, NULL))
2479 return -1;
2481 return 0;
2484 return 0;
2487 /* called from signal handler: invalidate the code and unprotect the
2488 page. Return TRUE if the fault was successfully handled. */
2489 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2491 unsigned int prot;
2492 PageDesc *p;
2493 target_ulong host_start, host_end, addr;
2495 /* Technically this isn't safe inside a signal handler. However we
2496 know this only ever happens in a synchronous SEGV handler, so in
2497 practice it seems to be ok. */
2498 mmap_lock();
2500 p = page_find(address >> TARGET_PAGE_BITS);
2501 if (!p) {
2502 mmap_unlock();
2503 return 0;
2506 /* if the page was really writable, then we change its
2507 protection back to writable */
2508 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2509 host_start = address & qemu_host_page_mask;
2510 host_end = host_start + qemu_host_page_size;
2512 prot = 0;
2513 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2514 p = page_find(addr >> TARGET_PAGE_BITS);
2515 p->flags |= PAGE_WRITE;
2516 prot |= p->flags;
2518 /* and since the content will be modified, we must invalidate
2519 the corresponding translated code. */
2520 tb_invalidate_phys_page(addr, pc, puc);
2521 #ifdef DEBUG_TB_CHECK
2522 tb_invalidate_check(addr);
2523 #endif
2525 mprotect((void *)g2h(host_start), qemu_host_page_size,
2526 prot & PAGE_BITS);
2528 mmap_unlock();
2529 return 1;
2531 mmap_unlock();
2532 return 0;
2535 static inline void tlb_set_dirty(CPUState *env,
2536 unsigned long addr, target_ulong vaddr)
2539 #endif /* defined(CONFIG_USER_ONLY) */
2541 #if !defined(CONFIG_USER_ONLY)
2543 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2544 typedef struct subpage_t {
2545 target_phys_addr_t base;
2546 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2547 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2548 } subpage_t;
2550 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2551 ram_addr_t memory, ram_addr_t region_offset);
2552 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2553 ram_addr_t orig_memory,
2554 ram_addr_t region_offset);
2555 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2556 need_subpage) \
2557 do { \
2558 if (addr > start_addr) \
2559 start_addr2 = 0; \
2560 else { \
2561 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2562 if (start_addr2 > 0) \
2563 need_subpage = 1; \
2566 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2567 end_addr2 = TARGET_PAGE_SIZE - 1; \
2568 else { \
2569 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2570 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2571 need_subpage = 1; \
2573 } while (0)
2575 /* register physical memory.
2576 For RAM, 'size' must be a multiple of the target page size.
2577 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2578 io memory page. The address used when calling the IO function is
2579 the offset from the start of the region, plus region_offset. Both
2580 start_addr and region_offset are rounded down to a page boundary
2581 before calculating this offset. This should not be a problem unless
2582 the low bits of start_addr and region_offset differ. */
2583 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2584 ram_addr_t size,
2585 ram_addr_t phys_offset,
2586 ram_addr_t region_offset)
2588 target_phys_addr_t addr, end_addr;
2589 PhysPageDesc *p;
2590 CPUState *env;
2591 ram_addr_t orig_size = size;
2592 subpage_t *subpage;
2594 cpu_notify_set_memory(start_addr, size, phys_offset);
2596 if (phys_offset == IO_MEM_UNASSIGNED) {
2597 region_offset = start_addr;
2599 region_offset &= TARGET_PAGE_MASK;
2600 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2601 end_addr = start_addr + (target_phys_addr_t)size;
2602 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2603 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2604 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2605 ram_addr_t orig_memory = p->phys_offset;
2606 target_phys_addr_t start_addr2, end_addr2;
2607 int need_subpage = 0;
2609 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2610 need_subpage);
2611 if (need_subpage) {
2612 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2613 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2614 &p->phys_offset, orig_memory,
2615 p->region_offset);
2616 } else {
2617 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2618 >> IO_MEM_SHIFT];
2620 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2621 region_offset);
2622 p->region_offset = 0;
2623 } else {
2624 p->phys_offset = phys_offset;
2625 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2626 (phys_offset & IO_MEM_ROMD))
2627 phys_offset += TARGET_PAGE_SIZE;
2629 } else {
2630 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2631 p->phys_offset = phys_offset;
2632 p->region_offset = region_offset;
2633 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2634 (phys_offset & IO_MEM_ROMD)) {
2635 phys_offset += TARGET_PAGE_SIZE;
2636 } else {
2637 target_phys_addr_t start_addr2, end_addr2;
2638 int need_subpage = 0;
2640 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2641 end_addr2, need_subpage);
2643 if (need_subpage) {
2644 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2645 &p->phys_offset, IO_MEM_UNASSIGNED,
2646 addr & TARGET_PAGE_MASK);
2647 subpage_register(subpage, start_addr2, end_addr2,
2648 phys_offset, region_offset);
2649 p->region_offset = 0;
2653 region_offset += TARGET_PAGE_SIZE;
2656 /* since each CPU stores ram addresses in its TLB cache, we must
2657 reset the modified entries */
2658 /* XXX: slow ! */
2659 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2660 tlb_flush(env, 1);
2664 /* XXX: temporary until new memory mapping API */
2665 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2667 PhysPageDesc *p;
2669 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2670 if (!p)
2671 return IO_MEM_UNASSIGNED;
2672 return p->phys_offset;
2675 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2677 if (kvm_enabled())
2678 kvm_coalesce_mmio_region(addr, size);
2681 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2683 if (kvm_enabled())
2684 kvm_uncoalesce_mmio_region(addr, size);
2687 void qemu_flush_coalesced_mmio_buffer(void)
2689 if (kvm_enabled())
2690 kvm_flush_coalesced_mmio_buffer();
2693 #if defined(__linux__) && !defined(TARGET_S390X)
2695 #include <sys/vfs.h>
2697 #define HUGETLBFS_MAGIC 0x958458f6
2699 static long gethugepagesize(const char *path)
2701 struct statfs fs;
2702 int ret;
2704 do {
2705 ret = statfs(path, &fs);
2706 } while (ret != 0 && errno == EINTR);
2708 if (ret != 0) {
2709 perror(path);
2710 return 0;
2713 if (fs.f_type != HUGETLBFS_MAGIC)
2714 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2716 return fs.f_bsize;
2719 static void *file_ram_alloc(ram_addr_t memory, const char *path)
2721 char *filename;
2722 void *area;
2723 int fd;
2724 #ifdef MAP_POPULATE
2725 int flags;
2726 #endif
2727 unsigned long hpagesize;
2729 hpagesize = gethugepagesize(path);
2730 if (!hpagesize) {
2731 return NULL;
2734 if (memory < hpagesize) {
2735 return NULL;
2738 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2739 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2740 return NULL;
2743 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2744 return NULL;
2747 fd = mkstemp(filename);
2748 if (fd < 0) {
2749 perror("unable to create backing store for hugepages");
2750 free(filename);
2751 return NULL;
2753 unlink(filename);
2754 free(filename);
2756 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2759 * ftruncate is not supported by hugetlbfs in older
2760 * hosts, so don't bother bailing out on errors.
2761 * If anything goes wrong with it under other filesystems,
2762 * mmap will fail.
2764 if (ftruncate(fd, memory))
2765 perror("ftruncate");
2767 #ifdef MAP_POPULATE
2768 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2769 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2770 * to sidestep this quirk.
2772 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2773 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2774 #else
2775 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2776 #endif
2777 if (area == MAP_FAILED) {
2778 perror("file_ram_alloc: can't mmap RAM pages");
2779 close(fd);
2780 return (NULL);
2782 return area;
2784 #endif
2786 ram_addr_t qemu_ram_map(ram_addr_t size, void *host)
2788 RAMBlock *new_block;
2790 size = TARGET_PAGE_ALIGN(size);
2791 new_block = qemu_malloc(sizeof(*new_block));
2793 new_block->host = host;
2795 new_block->offset = ram_list.last_offset;
2796 new_block->length = size;
2798 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2800 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2801 (ram_list.last_offset + size) >> TARGET_PAGE_BITS);
2802 memset(ram_list.phys_dirty + (ram_list.last_offset >> TARGET_PAGE_BITS),
2803 0xff, size >> TARGET_PAGE_BITS);
2805 ram_list.last_offset += size;
2807 if (kvm_enabled())
2808 kvm_setup_guest_memory(new_block->host, size);
2810 return new_block->offset;
2813 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2815 RAMBlock *new_block;
2817 size = TARGET_PAGE_ALIGN(size);
2818 new_block = qemu_malloc(sizeof(*new_block));
2820 if (mem_path) {
2821 #if defined (__linux__) && !defined(TARGET_S390X)
2822 new_block->host = file_ram_alloc(size, mem_path);
2823 if (!new_block->host) {
2824 new_block->host = qemu_vmalloc(size);
2825 #ifdef MADV_MERGEABLE
2826 madvise(new_block->host, size, MADV_MERGEABLE);
2827 #endif
2829 #else
2830 fprintf(stderr, "-mem-path option unsupported\n");
2831 exit(1);
2832 #endif
2833 } else {
2834 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2835 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2836 new_block->host = mmap((void*)0x1000000, size,
2837 PROT_EXEC|PROT_READ|PROT_WRITE,
2838 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2839 #else
2840 new_block->host = qemu_vmalloc(size);
2841 #endif
2842 #ifdef MADV_MERGEABLE
2843 madvise(new_block->host, size, MADV_MERGEABLE);
2844 #endif
2846 new_block->offset = ram_list.last_offset;
2847 new_block->length = size;
2849 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2851 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2852 (ram_list.last_offset + size) >> TARGET_PAGE_BITS);
2853 memset(ram_list.phys_dirty + (ram_list.last_offset >> TARGET_PAGE_BITS),
2854 0xff, size >> TARGET_PAGE_BITS);
2856 ram_list.last_offset += size;
2858 if (kvm_enabled())
2859 kvm_setup_guest_memory(new_block->host, size);
2861 return new_block->offset;
2864 void qemu_ram_free(ram_addr_t addr)
2866 /* TODO: implement this. */
2869 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2870 With the exception of the softmmu code in this file, this should
2871 only be used for local memory (e.g. video ram) that the device owns,
2872 and knows it isn't going to access beyond the end of the block.
2874 It should not be used for general purpose DMA.
2875 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2877 void *qemu_get_ram_ptr(ram_addr_t addr)
2879 RAMBlock *block;
2881 QLIST_FOREACH(block, &ram_list.blocks, next) {
2882 if (addr - block->offset < block->length) {
2883 QLIST_REMOVE(block, next);
2884 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2885 return block->host + (addr - block->offset);
2889 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2890 abort();
2892 return NULL;
2895 int do_qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2897 RAMBlock *block;
2898 uint8_t *host = ptr;
2900 QLIST_FOREACH(block, &ram_list.blocks, next) {
2901 if (host - block->host < block->length) {
2902 *ram_addr = block->offset + (host - block->host);
2903 return 0;
2906 return -1;
2909 /* Some of the softmmu routines need to translate from a host pointer
2910 (typically a TLB entry) back to a ram offset. */
2911 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2913 ram_addr_t ram_addr;
2915 if (do_qemu_ram_addr_from_host(ptr, &ram_addr)) {
2916 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2917 abort();
2919 return ram_addr;
2922 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2924 #ifdef DEBUG_UNASSIGNED
2925 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2926 #endif
2927 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2928 do_unassigned_access(addr, 0, 0, 0, 1);
2929 #endif
2930 return 0;
2933 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2935 #ifdef DEBUG_UNASSIGNED
2936 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2937 #endif
2938 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2939 do_unassigned_access(addr, 0, 0, 0, 2);
2940 #endif
2941 return 0;
2944 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2946 #ifdef DEBUG_UNASSIGNED
2947 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2948 #endif
2949 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2950 do_unassigned_access(addr, 0, 0, 0, 4);
2951 #endif
2952 return 0;
2955 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2957 #ifdef DEBUG_UNASSIGNED
2958 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2959 #endif
2960 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2961 do_unassigned_access(addr, 1, 0, 0, 1);
2962 #endif
2965 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2967 #ifdef DEBUG_UNASSIGNED
2968 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2969 #endif
2970 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2971 do_unassigned_access(addr, 1, 0, 0, 2);
2972 #endif
2975 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2977 #ifdef DEBUG_UNASSIGNED
2978 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2979 #endif
2980 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2981 do_unassigned_access(addr, 1, 0, 0, 4);
2982 #endif
2985 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2986 unassigned_mem_readb,
2987 unassigned_mem_readw,
2988 unassigned_mem_readl,
2991 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2992 unassigned_mem_writeb,
2993 unassigned_mem_writew,
2994 unassigned_mem_writel,
2997 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2998 uint32_t val)
3000 int dirty_flags;
3001 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3002 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3003 #if !defined(CONFIG_USER_ONLY)
3004 tb_invalidate_phys_page_fast(ram_addr, 1);
3005 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3006 #endif
3008 stb_p(qemu_get_ram_ptr(ram_addr), val);
3009 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3010 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3011 /* we remove the notdirty callback only if the code has been
3012 flushed */
3013 if (dirty_flags == 0xff)
3014 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3017 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3018 uint32_t val)
3020 int dirty_flags;
3021 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3022 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3023 #if !defined(CONFIG_USER_ONLY)
3024 tb_invalidate_phys_page_fast(ram_addr, 2);
3025 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3026 #endif
3028 stw_p(qemu_get_ram_ptr(ram_addr), val);
3029 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3030 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3031 /* we remove the notdirty callback only if the code has been
3032 flushed */
3033 if (dirty_flags == 0xff)
3034 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3037 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3038 uint32_t val)
3040 int dirty_flags;
3041 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3042 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3043 #if !defined(CONFIG_USER_ONLY)
3044 tb_invalidate_phys_page_fast(ram_addr, 4);
3045 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3046 #endif
3048 stl_p(qemu_get_ram_ptr(ram_addr), val);
3049 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3050 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3051 /* we remove the notdirty callback only if the code has been
3052 flushed */
3053 if (dirty_flags == 0xff)
3054 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3057 static CPUReadMemoryFunc * const error_mem_read[3] = {
3058 NULL, /* never used */
3059 NULL, /* never used */
3060 NULL, /* never used */
3063 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3064 notdirty_mem_writeb,
3065 notdirty_mem_writew,
3066 notdirty_mem_writel,
3069 /* Generate a debug exception if a watchpoint has been hit. */
3070 static void check_watchpoint(int offset, int len_mask, int flags)
3072 CPUState *env = cpu_single_env;
3073 target_ulong pc, cs_base;
3074 TranslationBlock *tb;
3075 target_ulong vaddr;
3076 CPUWatchpoint *wp;
3077 int cpu_flags;
3079 if (env->watchpoint_hit) {
3080 /* We re-entered the check after replacing the TB. Now raise
3081 * the debug interrupt so that is will trigger after the
3082 * current instruction. */
3083 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3084 return;
3086 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3087 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3088 if ((vaddr == (wp->vaddr & len_mask) ||
3089 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3090 wp->flags |= BP_WATCHPOINT_HIT;
3091 if (!env->watchpoint_hit) {
3092 env->watchpoint_hit = wp;
3093 tb = tb_find_pc(env->mem_io_pc);
3094 if (!tb) {
3095 cpu_abort(env, "check_watchpoint: could not find TB for "
3096 "pc=%p", (void *)env->mem_io_pc);
3098 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3099 tb_phys_invalidate(tb, -1);
3100 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3101 env->exception_index = EXCP_DEBUG;
3102 } else {
3103 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3104 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3106 cpu_resume_from_signal(env, NULL);
3108 } else {
3109 wp->flags &= ~BP_WATCHPOINT_HIT;
3114 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3115 so these check for a hit then pass through to the normal out-of-line
3116 phys routines. */
3117 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3119 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3120 return ldub_phys(addr);
3123 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3125 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3126 return lduw_phys(addr);
3129 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3131 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3132 return ldl_phys(addr);
3135 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3136 uint32_t val)
3138 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3139 stb_phys(addr, val);
3142 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3143 uint32_t val)
3145 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3146 stw_phys(addr, val);
3149 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3150 uint32_t val)
3152 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3153 stl_phys(addr, val);
3156 static CPUReadMemoryFunc * const watch_mem_read[3] = {
3157 watch_mem_readb,
3158 watch_mem_readw,
3159 watch_mem_readl,
3162 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3163 watch_mem_writeb,
3164 watch_mem_writew,
3165 watch_mem_writel,
3168 static inline uint32_t subpage_readlen (subpage_t *mmio,
3169 target_phys_addr_t addr,
3170 unsigned int len)
3172 unsigned int idx = SUBPAGE_IDX(addr);
3173 #if defined(DEBUG_SUBPAGE)
3174 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3175 mmio, len, addr, idx);
3176 #endif
3178 addr += mmio->region_offset[idx];
3179 idx = mmio->sub_io_index[idx];
3180 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3183 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3184 uint32_t value, unsigned int len)
3186 unsigned int idx = SUBPAGE_IDX(addr);
3187 #if defined(DEBUG_SUBPAGE)
3188 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3189 __func__, mmio, len, addr, idx, value);
3190 #endif
3192 addr += mmio->region_offset[idx];
3193 idx = mmio->sub_io_index[idx];
3194 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3197 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3199 return subpage_readlen(opaque, addr, 0);
3202 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3203 uint32_t value)
3205 subpage_writelen(opaque, addr, value, 0);
3208 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3210 return subpage_readlen(opaque, addr, 1);
3213 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3214 uint32_t value)
3216 subpage_writelen(opaque, addr, value, 1);
3219 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3221 return subpage_readlen(opaque, addr, 2);
3224 static void subpage_writel (void *opaque, target_phys_addr_t addr,
3225 uint32_t value)
3227 subpage_writelen(opaque, addr, value, 2);
3230 static CPUReadMemoryFunc * const subpage_read[] = {
3231 &subpage_readb,
3232 &subpage_readw,
3233 &subpage_readl,
3236 static CPUWriteMemoryFunc * const subpage_write[] = {
3237 &subpage_writeb,
3238 &subpage_writew,
3239 &subpage_writel,
3242 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3243 ram_addr_t memory, ram_addr_t region_offset)
3245 int idx, eidx;
3247 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3248 return -1;
3249 idx = SUBPAGE_IDX(start);
3250 eidx = SUBPAGE_IDX(end);
3251 #if defined(DEBUG_SUBPAGE)
3252 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3253 mmio, start, end, idx, eidx, memory);
3254 #endif
3255 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3256 for (; idx <= eidx; idx++) {
3257 mmio->sub_io_index[idx] = memory;
3258 mmio->region_offset[idx] = region_offset;
3261 return 0;
3264 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3265 ram_addr_t orig_memory,
3266 ram_addr_t region_offset)
3268 subpage_t *mmio;
3269 int subpage_memory;
3271 mmio = qemu_mallocz(sizeof(subpage_t));
3273 mmio->base = base;
3274 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3275 #if defined(DEBUG_SUBPAGE)
3276 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3277 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3278 #endif
3279 *phys = subpage_memory | IO_MEM_SUBPAGE;
3280 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3282 return mmio;
3285 static int get_free_io_mem_idx(void)
3287 int i;
3289 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3290 if (!io_mem_used[i]) {
3291 io_mem_used[i] = 1;
3292 return i;
3294 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3295 return -1;
3298 /* mem_read and mem_write are arrays of functions containing the
3299 function to access byte (index 0), word (index 1) and dword (index
3300 2). Functions can be omitted with a NULL function pointer.
3301 If io_index is non zero, the corresponding io zone is
3302 modified. If it is zero, a new io zone is allocated. The return
3303 value can be used with cpu_register_physical_memory(). (-1) is
3304 returned if error. */
3305 static int cpu_register_io_memory_fixed(int io_index,
3306 CPUReadMemoryFunc * const *mem_read,
3307 CPUWriteMemoryFunc * const *mem_write,
3308 void *opaque)
3310 int i;
3312 if (io_index <= 0) {
3313 io_index = get_free_io_mem_idx();
3314 if (io_index == -1)
3315 return io_index;
3316 } else {
3317 io_index >>= IO_MEM_SHIFT;
3318 if (io_index >= IO_MEM_NB_ENTRIES)
3319 return -1;
3322 for (i = 0; i < 3; ++i) {
3323 io_mem_read[io_index][i]
3324 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3326 for (i = 0; i < 3; ++i) {
3327 io_mem_write[io_index][i]
3328 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3330 io_mem_opaque[io_index] = opaque;
3332 return (io_index << IO_MEM_SHIFT);
3335 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3336 CPUWriteMemoryFunc * const *mem_write,
3337 void *opaque)
3339 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3342 void cpu_unregister_io_memory(int io_table_address)
3344 int i;
3345 int io_index = io_table_address >> IO_MEM_SHIFT;
3347 for (i=0;i < 3; i++) {
3348 io_mem_read[io_index][i] = unassigned_mem_read[i];
3349 io_mem_write[io_index][i] = unassigned_mem_write[i];
3351 io_mem_opaque[io_index] = NULL;
3352 io_mem_used[io_index] = 0;
3355 static void io_mem_init(void)
3357 int i;
3359 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3360 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3361 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3362 for (i=0; i<5; i++)
3363 io_mem_used[i] = 1;
3365 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3366 watch_mem_write, NULL);
3369 #endif /* !defined(CONFIG_USER_ONLY) */
3371 /* physical memory access (slow version, mainly for debug) */
3372 #if defined(CONFIG_USER_ONLY)
3373 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3374 uint8_t *buf, int len, int is_write)
3376 int l, flags;
3377 target_ulong page;
3378 void * p;
3380 while (len > 0) {
3381 page = addr & TARGET_PAGE_MASK;
3382 l = (page + TARGET_PAGE_SIZE) - addr;
3383 if (l > len)
3384 l = len;
3385 flags = page_get_flags(page);
3386 if (!(flags & PAGE_VALID))
3387 return -1;
3388 if (is_write) {
3389 if (!(flags & PAGE_WRITE))
3390 return -1;
3391 /* XXX: this code should not depend on lock_user */
3392 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3393 return -1;
3394 memcpy(p, buf, l);
3395 unlock_user(p, addr, l);
3396 } else {
3397 if (!(flags & PAGE_READ))
3398 return -1;
3399 /* XXX: this code should not depend on lock_user */
3400 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3401 return -1;
3402 memcpy(buf, p, l);
3403 unlock_user(p, addr, 0);
3405 len -= l;
3406 buf += l;
3407 addr += l;
3409 return 0;
3412 #else
3413 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3414 int len, int is_write)
3416 int l, io_index;
3417 uint8_t *ptr;
3418 uint32_t val;
3419 target_phys_addr_t page;
3420 unsigned long pd;
3421 PhysPageDesc *p;
3423 while (len > 0) {
3424 page = addr & TARGET_PAGE_MASK;
3425 l = (page + TARGET_PAGE_SIZE) - addr;
3426 if (l > len)
3427 l = len;
3428 p = phys_page_find(page >> TARGET_PAGE_BITS);
3429 if (!p) {
3430 pd = IO_MEM_UNASSIGNED;
3431 } else {
3432 pd = p->phys_offset;
3435 if (is_write) {
3436 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3437 target_phys_addr_t addr1 = addr;
3438 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3439 if (p)
3440 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3441 /* XXX: could force cpu_single_env to NULL to avoid
3442 potential bugs */
3443 if (l >= 4 && ((addr1 & 3) == 0)) {
3444 /* 32 bit write access */
3445 val = ldl_p(buf);
3446 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3447 l = 4;
3448 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3449 /* 16 bit write access */
3450 val = lduw_p(buf);
3451 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3452 l = 2;
3453 } else {
3454 /* 8 bit write access */
3455 val = ldub_p(buf);
3456 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3457 l = 1;
3459 } else {
3460 unsigned long addr1;
3461 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3462 /* RAM case */
3463 ptr = qemu_get_ram_ptr(addr1);
3464 memcpy(ptr, buf, l);
3465 if (!cpu_physical_memory_is_dirty(addr1)) {
3466 /* invalidate code */
3467 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3468 /* set dirty bit */
3469 cpu_physical_memory_set_dirty_flags(
3470 addr1, (0xff & ~CODE_DIRTY_FLAG));
3472 /* qemu doesn't execute guest code directly, but kvm does
3473 therefore flush instruction caches */
3474 if (kvm_enabled())
3475 flush_icache_range((unsigned long)ptr,
3476 ((unsigned long)ptr)+l);
3478 } else {
3479 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3480 !(pd & IO_MEM_ROMD)) {
3481 target_phys_addr_t addr1 = addr;
3482 /* I/O case */
3483 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3484 if (p)
3485 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3486 if (l >= 4 && ((addr1 & 3) == 0)) {
3487 /* 32 bit read access */
3488 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3489 stl_p(buf, val);
3490 l = 4;
3491 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3492 /* 16 bit read access */
3493 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3494 stw_p(buf, val);
3495 l = 2;
3496 } else {
3497 /* 8 bit read access */
3498 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3499 stb_p(buf, val);
3500 l = 1;
3502 } else {
3503 /* RAM case */
3504 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3505 (addr & ~TARGET_PAGE_MASK);
3506 memcpy(buf, ptr, l);
3509 len -= l;
3510 buf += l;
3511 addr += l;
3515 /* used for ROM loading : can write in RAM and ROM */
3516 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3517 const uint8_t *buf, int len)
3519 int l;
3520 uint8_t *ptr;
3521 target_phys_addr_t page;
3522 unsigned long pd;
3523 PhysPageDesc *p;
3525 while (len > 0) {
3526 page = addr & TARGET_PAGE_MASK;
3527 l = (page + TARGET_PAGE_SIZE) - addr;
3528 if (l > len)
3529 l = len;
3530 p = phys_page_find(page >> TARGET_PAGE_BITS);
3531 if (!p) {
3532 pd = IO_MEM_UNASSIGNED;
3533 } else {
3534 pd = p->phys_offset;
3537 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3538 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3539 !(pd & IO_MEM_ROMD)) {
3540 /* do nothing */
3541 } else {
3542 unsigned long addr1;
3543 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3544 /* ROM/RAM case */
3545 ptr = qemu_get_ram_ptr(addr1);
3546 memcpy(ptr, buf, l);
3548 len -= l;
3549 buf += l;
3550 addr += l;
3554 typedef struct {
3555 void *buffer;
3556 target_phys_addr_t addr;
3557 target_phys_addr_t len;
3558 } BounceBuffer;
3560 static BounceBuffer bounce;
3562 typedef struct MapClient {
3563 void *opaque;
3564 void (*callback)(void *opaque);
3565 QLIST_ENTRY(MapClient) link;
3566 } MapClient;
3568 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3569 = QLIST_HEAD_INITIALIZER(map_client_list);
3571 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3573 MapClient *client = qemu_malloc(sizeof(*client));
3575 client->opaque = opaque;
3576 client->callback = callback;
3577 QLIST_INSERT_HEAD(&map_client_list, client, link);
3578 return client;
3581 void cpu_unregister_map_client(void *_client)
3583 MapClient *client = (MapClient *)_client;
3585 QLIST_REMOVE(client, link);
3586 qemu_free(client);
3589 static void cpu_notify_map_clients(void)
3591 MapClient *client;
3593 while (!QLIST_EMPTY(&map_client_list)) {
3594 client = QLIST_FIRST(&map_client_list);
3595 client->callback(client->opaque);
3596 cpu_unregister_map_client(client);
3600 /* Map a physical memory region into a host virtual address.
3601 * May map a subset of the requested range, given by and returned in *plen.
3602 * May return NULL if resources needed to perform the mapping are exhausted.
3603 * Use only for reads OR writes - not for read-modify-write operations.
3604 * Use cpu_register_map_client() to know when retrying the map operation is
3605 * likely to succeed.
3607 void *cpu_physical_memory_map(target_phys_addr_t addr,
3608 target_phys_addr_t *plen,
3609 int is_write)
3611 target_phys_addr_t len = *plen;
3612 target_phys_addr_t done = 0;
3613 int l;
3614 uint8_t *ret = NULL;
3615 uint8_t *ptr;
3616 target_phys_addr_t page;
3617 unsigned long pd;
3618 PhysPageDesc *p;
3619 unsigned long addr1;
3621 while (len > 0) {
3622 page = addr & TARGET_PAGE_MASK;
3623 l = (page + TARGET_PAGE_SIZE) - addr;
3624 if (l > len)
3625 l = len;
3626 p = phys_page_find(page >> TARGET_PAGE_BITS);
3627 if (!p) {
3628 pd = IO_MEM_UNASSIGNED;
3629 } else {
3630 pd = p->phys_offset;
3633 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3634 if (done || bounce.buffer) {
3635 break;
3637 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3638 bounce.addr = addr;
3639 bounce.len = l;
3640 if (!is_write) {
3641 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3643 ptr = bounce.buffer;
3644 } else {
3645 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3646 ptr = qemu_get_ram_ptr(addr1);
3648 if (!done) {
3649 ret = ptr;
3650 } else if (ret + done != ptr) {
3651 break;
3654 len -= l;
3655 addr += l;
3656 done += l;
3658 *plen = done;
3659 return ret;
3662 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3663 * Will also mark the memory as dirty if is_write == 1. access_len gives
3664 * the amount of memory that was actually read or written by the caller.
3666 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3667 int is_write, target_phys_addr_t access_len)
3669 unsigned long flush_len = (unsigned long)access_len;
3671 if (buffer != bounce.buffer) {
3672 if (is_write) {
3673 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3674 while (access_len) {
3675 unsigned l;
3676 l = TARGET_PAGE_SIZE;
3677 if (l > access_len)
3678 l = access_len;
3679 if (!cpu_physical_memory_is_dirty(addr1)) {
3680 /* invalidate code */
3681 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3682 /* set dirty bit */
3683 cpu_physical_memory_set_dirty_flags(
3684 addr1, (0xff & ~CODE_DIRTY_FLAG));
3686 addr1 += l;
3687 access_len -= l;
3689 dma_flush_range((unsigned long)buffer,
3690 (unsigned long)buffer + flush_len);
3692 return;
3694 if (is_write) {
3695 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3697 qemu_vfree(bounce.buffer);
3698 bounce.buffer = NULL;
3699 cpu_notify_map_clients();
3702 /* warning: addr must be aligned */
3703 uint32_t ldl_phys(target_phys_addr_t addr)
3705 int io_index;
3706 uint8_t *ptr;
3707 uint32_t val;
3708 unsigned long pd;
3709 PhysPageDesc *p;
3711 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3712 if (!p) {
3713 pd = IO_MEM_UNASSIGNED;
3714 } else {
3715 pd = p->phys_offset;
3718 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3719 !(pd & IO_MEM_ROMD)) {
3720 /* I/O case */
3721 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3722 if (p)
3723 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3724 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3725 } else {
3726 /* RAM case */
3727 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3728 (addr & ~TARGET_PAGE_MASK);
3729 val = ldl_p(ptr);
3731 return val;
3734 /* warning: addr must be aligned */
3735 uint64_t ldq_phys(target_phys_addr_t addr)
3737 int io_index;
3738 uint8_t *ptr;
3739 uint64_t val;
3740 unsigned long pd;
3741 PhysPageDesc *p;
3743 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3744 if (!p) {
3745 pd = IO_MEM_UNASSIGNED;
3746 } else {
3747 pd = p->phys_offset;
3750 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3751 !(pd & IO_MEM_ROMD)) {
3752 /* I/O case */
3753 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3754 if (p)
3755 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3756 #ifdef TARGET_WORDS_BIGENDIAN
3757 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3758 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3759 #else
3760 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3761 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3762 #endif
3763 } else {
3764 /* RAM case */
3765 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3766 (addr & ~TARGET_PAGE_MASK);
3767 val = ldq_p(ptr);
3769 return val;
3772 /* XXX: optimize */
3773 uint32_t ldub_phys(target_phys_addr_t addr)
3775 uint8_t val;
3776 cpu_physical_memory_read(addr, &val, 1);
3777 return val;
3780 /* warning: addr must be aligned */
3781 uint32_t lduw_phys(target_phys_addr_t addr)
3783 int io_index;
3784 uint8_t *ptr;
3785 uint64_t val;
3786 unsigned long pd;
3787 PhysPageDesc *p;
3789 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3790 if (!p) {
3791 pd = IO_MEM_UNASSIGNED;
3792 } else {
3793 pd = p->phys_offset;
3796 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3797 !(pd & IO_MEM_ROMD)) {
3798 /* I/O case */
3799 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3800 if (p)
3801 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3802 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3803 } else {
3804 /* RAM case */
3805 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3806 (addr & ~TARGET_PAGE_MASK);
3807 val = lduw_p(ptr);
3809 return val;
3812 /* warning: addr must be aligned. The ram page is not masked as dirty
3813 and the code inside is not invalidated. It is useful if the dirty
3814 bits are used to track modified PTEs */
3815 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3817 int io_index;
3818 uint8_t *ptr;
3819 unsigned long pd;
3820 PhysPageDesc *p;
3822 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3823 if (!p) {
3824 pd = IO_MEM_UNASSIGNED;
3825 } else {
3826 pd = p->phys_offset;
3829 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3830 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3831 if (p)
3832 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3833 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3834 } else {
3835 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3836 ptr = qemu_get_ram_ptr(addr1);
3837 stl_p(ptr, val);
3839 if (unlikely(in_migration)) {
3840 if (!cpu_physical_memory_is_dirty(addr1)) {
3841 /* invalidate code */
3842 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3843 /* set dirty bit */
3844 cpu_physical_memory_set_dirty_flags(
3845 addr1, (0xff & ~CODE_DIRTY_FLAG));
3851 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3853 int io_index;
3854 uint8_t *ptr;
3855 unsigned long pd;
3856 PhysPageDesc *p;
3858 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3859 if (!p) {
3860 pd = IO_MEM_UNASSIGNED;
3861 } else {
3862 pd = p->phys_offset;
3865 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3866 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3867 if (p)
3868 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3869 #ifdef TARGET_WORDS_BIGENDIAN
3870 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3871 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3872 #else
3873 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3874 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3875 #endif
3876 } else {
3877 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3878 (addr & ~TARGET_PAGE_MASK);
3879 stq_p(ptr, val);
3883 /* warning: addr must be aligned */
3884 void stl_phys(target_phys_addr_t addr, uint32_t val)
3886 int io_index;
3887 uint8_t *ptr;
3888 unsigned long pd;
3889 PhysPageDesc *p;
3891 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3892 if (!p) {
3893 pd = IO_MEM_UNASSIGNED;
3894 } else {
3895 pd = p->phys_offset;
3898 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3899 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3900 if (p)
3901 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3902 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3903 } else {
3904 unsigned long addr1;
3905 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3906 /* RAM case */
3907 ptr = qemu_get_ram_ptr(addr1);
3908 stl_p(ptr, val);
3909 if (!cpu_physical_memory_is_dirty(addr1)) {
3910 /* invalidate code */
3911 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3912 /* set dirty bit */
3913 cpu_physical_memory_set_dirty_flags(addr1,
3914 (0xff & ~CODE_DIRTY_FLAG));
3919 /* XXX: optimize */
3920 void stb_phys(target_phys_addr_t addr, uint32_t val)
3922 uint8_t v = val;
3923 cpu_physical_memory_write(addr, &v, 1);
3926 /* warning: addr must be aligned */
3927 void stw_phys(target_phys_addr_t addr, uint32_t val)
3929 int io_index;
3930 uint8_t *ptr;
3931 unsigned long pd;
3932 PhysPageDesc *p;
3934 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3935 if (!p) {
3936 pd = IO_MEM_UNASSIGNED;
3937 } else {
3938 pd = p->phys_offset;
3941 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3942 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3943 if (p)
3944 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3945 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3946 } else {
3947 unsigned long addr1;
3948 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3949 /* RAM case */
3950 ptr = qemu_get_ram_ptr(addr1);
3951 stw_p(ptr, val);
3952 if (!cpu_physical_memory_is_dirty(addr1)) {
3953 /* invalidate code */
3954 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3955 /* set dirty bit */
3956 cpu_physical_memory_set_dirty_flags(addr1,
3957 (0xff & ~CODE_DIRTY_FLAG));
3962 /* XXX: optimize */
3963 void stq_phys(target_phys_addr_t addr, uint64_t val)
3965 val = tswap64(val);
3966 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3969 /* virtual memory access for debug (includes writing to ROM) */
3970 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3971 uint8_t *buf, int len, int is_write)
3973 int l;
3974 target_phys_addr_t phys_addr;
3975 target_ulong page;
3977 while (len > 0) {
3978 page = addr & TARGET_PAGE_MASK;
3979 phys_addr = cpu_get_phys_page_debug(env, page);
3980 /* if no physical page mapped, return an error */
3981 if (phys_addr == -1)
3982 return -1;
3983 l = (page + TARGET_PAGE_SIZE) - addr;
3984 if (l > len)
3985 l = len;
3986 phys_addr += (addr & ~TARGET_PAGE_MASK);
3987 if (is_write)
3988 cpu_physical_memory_write_rom(phys_addr, buf, l);
3989 else
3990 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3991 len -= l;
3992 buf += l;
3993 addr += l;
3995 return 0;
3997 #endif
3999 /* in deterministic execution mode, instructions doing device I/Os
4000 must be at the end of the TB */
4001 void cpu_io_recompile(CPUState *env, void *retaddr)
4003 TranslationBlock *tb;
4004 uint32_t n, cflags;
4005 target_ulong pc, cs_base;
4006 uint64_t flags;
4008 tb = tb_find_pc((unsigned long)retaddr);
4009 if (!tb) {
4010 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4011 retaddr);
4013 n = env->icount_decr.u16.low + tb->icount;
4014 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4015 /* Calculate how many instructions had been executed before the fault
4016 occurred. */
4017 n = n - env->icount_decr.u16.low;
4018 /* Generate a new TB ending on the I/O insn. */
4019 n++;
4020 /* On MIPS and SH, delay slot instructions can only be restarted if
4021 they were already the first instruction in the TB. If this is not
4022 the first instruction in a TB then re-execute the preceding
4023 branch. */
4024 #if defined(TARGET_MIPS)
4025 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4026 env->active_tc.PC -= 4;
4027 env->icount_decr.u16.low++;
4028 env->hflags &= ~MIPS_HFLAG_BMASK;
4030 #elif defined(TARGET_SH4)
4031 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4032 && n > 1) {
4033 env->pc -= 2;
4034 env->icount_decr.u16.low++;
4035 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4037 #endif
4038 /* This should never happen. */
4039 if (n > CF_COUNT_MASK)
4040 cpu_abort(env, "TB too big during recompile");
4042 cflags = n | CF_LAST_IO;
4043 pc = tb->pc;
4044 cs_base = tb->cs_base;
4045 flags = tb->flags;
4046 tb_phys_invalidate(tb, -1);
4047 /* FIXME: In theory this could raise an exception. In practice
4048 we have already translated the block once so it's probably ok. */
4049 tb_gen_code(env, pc, cs_base, flags, cflags);
4050 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4051 the first in the TB) then we end up generating a whole new TB and
4052 repeating the fault, which is horribly inefficient.
4053 Better would be to execute just this insn uncached, or generate a
4054 second new TB. */
4055 cpu_resume_from_signal(env, NULL);
4058 #if !defined(CONFIG_USER_ONLY)
4060 void dump_exec_info(FILE *f,
4061 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4063 int i, target_code_size, max_target_code_size;
4064 int direct_jmp_count, direct_jmp2_count, cross_page;
4065 TranslationBlock *tb;
4067 target_code_size = 0;
4068 max_target_code_size = 0;
4069 cross_page = 0;
4070 direct_jmp_count = 0;
4071 direct_jmp2_count = 0;
4072 for(i = 0; i < nb_tbs; i++) {
4073 tb = &tbs[i];
4074 target_code_size += tb->size;
4075 if (tb->size > max_target_code_size)
4076 max_target_code_size = tb->size;
4077 if (tb->page_addr[1] != -1)
4078 cross_page++;
4079 if (tb->tb_next_offset[0] != 0xffff) {
4080 direct_jmp_count++;
4081 if (tb->tb_next_offset[1] != 0xffff) {
4082 direct_jmp2_count++;
4086 /* XXX: avoid using doubles ? */
4087 cpu_fprintf(f, "Translation buffer state:\n");
4088 cpu_fprintf(f, "gen code size %ld/%ld\n",
4089 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4090 cpu_fprintf(f, "TB count %d/%d\n",
4091 nb_tbs, code_gen_max_blocks);
4092 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4093 nb_tbs ? target_code_size / nb_tbs : 0,
4094 max_target_code_size);
4095 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4096 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4097 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4098 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4099 cross_page,
4100 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4101 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4102 direct_jmp_count,
4103 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4104 direct_jmp2_count,
4105 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4106 cpu_fprintf(f, "\nStatistics:\n");
4107 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4108 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4109 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4110 #ifdef CONFIG_PROFILER
4111 tcg_dump_info(f, cpu_fprintf);
4112 #endif
4115 #define MMUSUFFIX _cmmu
4116 #define GETPC() NULL
4117 #define env cpu_single_env
4118 #define SOFTMMU_CODE_ACCESS
4120 #define SHIFT 0
4121 #include "softmmu_template.h"
4123 #define SHIFT 1
4124 #include "softmmu_template.h"
4126 #define SHIFT 2
4127 #include "softmmu_template.h"
4129 #define SHIFT 3
4130 #include "softmmu_template.h"
4132 #undef env
4134 #endif