ramblocks: Make use of DeviceState pointer and BusInfo.get_dev_path
[qemu/kraxel.git] / exec.c
blobfd47d5bc9a813e7ace1643599df919e3297b8298
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <unistd.h>
32 #include <inttypes.h>
34 #include "cpu.h"
35 #include "exec-all.h"
36 #include "qemu-common.h"
37 #include "tcg.h"
38 #include "hw/hw.h"
39 #include "hw/qdev.h"
40 #include "osdep.h"
41 #include "kvm.h"
42 #include "qemu-timer.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #include <signal.h>
46 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
47 #include <sys/param.h>
48 #if __FreeBSD_version >= 700104
49 #define HAVE_KINFO_GETVMMAP
50 #define sigqueue sigqueue_freebsd /* avoid redefinition */
51 #include <sys/time.h>
52 #include <sys/proc.h>
53 #include <machine/profile.h>
54 #define _KERNEL
55 #include <sys/user.h>
56 #undef _KERNEL
57 #undef sigqueue
58 #include <libutil.h>
59 #endif
60 #endif
61 #endif
63 //#define DEBUG_TB_INVALIDATE
64 //#define DEBUG_FLUSH
65 //#define DEBUG_TLB
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
77 #undef DEBUG_TB_CHECK
78 #endif
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock *tbs;
83 int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 static int nb_tbs;
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32)
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
100 #else
101 #define code_gen_section \
102 __attribute__((aligned (32)))
103 #endif
105 uint8_t code_gen_prologue[1024] code_gen_section;
106 static uint8_t *code_gen_buffer;
107 static unsigned long code_gen_buffer_size;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size;
110 uint8_t *code_gen_ptr;
112 #if !defined(CONFIG_USER_ONLY)
113 int phys_ram_fd;
114 static int in_migration;
116 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
117 #endif
119 CPUState *first_cpu;
120 /* current CPU in the current thread. It is only valid inside
121 cpu_exec() */
122 CPUState *cpu_single_env;
123 /* 0 = Do not count executed instructions.
124 1 = Precise instruction counting.
125 2 = Adaptive rate instruction counting. */
126 int use_icount = 0;
127 /* Current instruction counter. While executing translated code this may
128 include some instructions that have not yet been executed. */
129 int64_t qemu_icount;
131 typedef struct PageDesc {
132 /* list of TBs intersecting this ram page */
133 TranslationBlock *first_tb;
134 /* in order to optimize self modifying code, we count the number
135 of lookups we do to a given page to use a bitmap */
136 unsigned int code_write_count;
137 uint8_t *code_bitmap;
138 #if defined(CONFIG_USER_ONLY)
139 unsigned long flags;
140 #endif
141 } PageDesc;
143 /* In system mode we want L1_MAP to be based on ram offsets,
144 while in user mode we want it to be based on virtual addresses. */
145 #if !defined(CONFIG_USER_ONLY)
146 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
147 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
148 #else
149 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
150 #endif
151 #else
152 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
153 #endif
155 /* Size of the L2 (and L3, etc) page tables. */
156 #define L2_BITS 10
157 #define L2_SIZE (1 << L2_BITS)
159 /* The bits remaining after N lower levels of page tables. */
160 #define P_L1_BITS_REM \
161 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
162 #define V_L1_BITS_REM \
163 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165 /* Size of the L1 page table. Avoid silly small sizes. */
166 #if P_L1_BITS_REM < 4
167 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
168 #else
169 #define P_L1_BITS P_L1_BITS_REM
170 #endif
172 #if V_L1_BITS_REM < 4
173 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
174 #else
175 #define V_L1_BITS V_L1_BITS_REM
176 #endif
178 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
179 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
182 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184 unsigned long qemu_real_host_page_size;
185 unsigned long qemu_host_page_bits;
186 unsigned long qemu_host_page_size;
187 unsigned long qemu_host_page_mask;
189 /* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191 static void *l1_map[V_L1_SIZE];
193 #if !defined(CONFIG_USER_ONLY)
194 typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198 } PhysPageDesc;
200 /* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202 static void *l1_phys_map[P_L1_SIZE];
204 static void io_mem_init(void);
206 /* io memory support */
207 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
208 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
209 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
210 static char io_mem_used[IO_MEM_NB_ENTRIES];
211 static int io_mem_watch;
212 #endif
214 /* log support */
215 #ifdef WIN32
216 static const char *logfilename = "qemu.log";
217 #else
218 static const char *logfilename = "/tmp/qemu.log";
219 #endif
220 FILE *logfile;
221 int loglevel;
222 static int log_append = 0;
224 /* statistics */
225 #if !defined(CONFIG_USER_ONLY)
226 static int tlb_flush_count;
227 #endif
228 static int tb_flush_count;
229 static int tb_phys_invalidate_count;
231 #ifdef _WIN32
232 static void map_exec(void *addr, long size)
234 DWORD old_protect;
235 VirtualProtect(addr, size,
236 PAGE_EXECUTE_READWRITE, &old_protect);
239 #else
240 static void map_exec(void *addr, long size)
242 unsigned long start, end, page_size;
244 page_size = getpagesize();
245 start = (unsigned long)addr;
246 start &= ~(page_size - 1);
248 end = (unsigned long)addr + size;
249 end += page_size - 1;
250 end &= ~(page_size - 1);
252 mprotect((void *)start, end - start,
253 PROT_READ | PROT_WRITE | PROT_EXEC);
255 #endif
257 static void page_init(void)
259 /* NOTE: we can always suppose that qemu_host_page_size >=
260 TARGET_PAGE_SIZE */
261 #ifdef _WIN32
263 SYSTEM_INFO system_info;
265 GetSystemInfo(&system_info);
266 qemu_real_host_page_size = system_info.dwPageSize;
268 #else
269 qemu_real_host_page_size = getpagesize();
270 #endif
271 if (qemu_host_page_size == 0)
272 qemu_host_page_size = qemu_real_host_page_size;
273 if (qemu_host_page_size < TARGET_PAGE_SIZE)
274 qemu_host_page_size = TARGET_PAGE_SIZE;
275 qemu_host_page_bits = 0;
276 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
277 qemu_host_page_bits++;
278 qemu_host_page_mask = ~(qemu_host_page_size - 1);
280 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
282 #ifdef HAVE_KINFO_GETVMMAP
283 struct kinfo_vmentry *freep;
284 int i, cnt;
286 freep = kinfo_getvmmap(getpid(), &cnt);
287 if (freep) {
288 mmap_lock();
289 for (i = 0; i < cnt; i++) {
290 unsigned long startaddr, endaddr;
292 startaddr = freep[i].kve_start;
293 endaddr = freep[i].kve_end;
294 if (h2g_valid(startaddr)) {
295 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
297 if (h2g_valid(endaddr)) {
298 endaddr = h2g(endaddr);
299 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
300 } else {
301 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
302 endaddr = ~0ul;
303 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
304 #endif
308 free(freep);
309 mmap_unlock();
311 #else
312 FILE *f;
314 last_brk = (unsigned long)sbrk(0);
316 f = fopen("/compat/linux/proc/self/maps", "r");
317 if (f) {
318 mmap_lock();
320 do {
321 unsigned long startaddr, endaddr;
322 int n;
324 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
326 if (n == 2 && h2g_valid(startaddr)) {
327 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
329 if (h2g_valid(endaddr)) {
330 endaddr = h2g(endaddr);
331 } else {
332 endaddr = ~0ul;
334 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
336 } while (!feof(f));
338 fclose(f);
339 mmap_unlock();
341 #endif
343 #endif
346 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
348 PageDesc *pd;
349 void **lp;
350 int i;
352 #if defined(CONFIG_USER_ONLY)
353 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
354 # define ALLOC(P, SIZE) \
355 do { \
356 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
357 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
358 } while (0)
359 #else
360 # define ALLOC(P, SIZE) \
361 do { P = qemu_mallocz(SIZE); } while (0)
362 #endif
364 /* Level 1. Always allocated. */
365 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
367 /* Level 2..N-1. */
368 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
369 void **p = *lp;
371 if (p == NULL) {
372 if (!alloc) {
373 return NULL;
375 ALLOC(p, sizeof(void *) * L2_SIZE);
376 *lp = p;
379 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
382 pd = *lp;
383 if (pd == NULL) {
384 if (!alloc) {
385 return NULL;
387 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
388 *lp = pd;
391 #undef ALLOC
393 return pd + (index & (L2_SIZE - 1));
396 static inline PageDesc *page_find(tb_page_addr_t index)
398 return page_find_alloc(index, 0);
401 #if !defined(CONFIG_USER_ONLY)
402 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
404 PhysPageDesc *pd;
405 void **lp;
406 int i;
408 /* Level 1. Always allocated. */
409 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
411 /* Level 2..N-1. */
412 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
413 void **p = *lp;
414 if (p == NULL) {
415 if (!alloc) {
416 return NULL;
418 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
420 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
423 pd = *lp;
424 if (pd == NULL) {
425 int i;
427 if (!alloc) {
428 return NULL;
431 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
433 for (i = 0; i < L2_SIZE; i++) {
434 pd[i].phys_offset = IO_MEM_UNASSIGNED;
435 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
439 return pd + (index & (L2_SIZE - 1));
442 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
444 return phys_page_find_alloc(index, 0);
447 static void tlb_protect_code(ram_addr_t ram_addr);
448 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
449 target_ulong vaddr);
450 #define mmap_lock() do { } while(0)
451 #define mmap_unlock() do { } while(0)
452 #endif
454 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
456 #if defined(CONFIG_USER_ONLY)
457 /* Currently it is not recommended to allocate big chunks of data in
458 user mode. It will change when a dedicated libc will be used */
459 #define USE_STATIC_CODE_GEN_BUFFER
460 #endif
462 #ifdef USE_STATIC_CODE_GEN_BUFFER
463 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
464 __attribute__((aligned (CODE_GEN_ALIGN)));
465 #endif
467 static void code_gen_alloc(unsigned long tb_size)
469 #ifdef USE_STATIC_CODE_GEN_BUFFER
470 code_gen_buffer = static_code_gen_buffer;
471 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472 map_exec(code_gen_buffer, code_gen_buffer_size);
473 #else
474 code_gen_buffer_size = tb_size;
475 if (code_gen_buffer_size == 0) {
476 #if defined(CONFIG_USER_ONLY)
477 /* in user mode, phys_ram_size is not meaningful */
478 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
479 #else
480 /* XXX: needs adjustments */
481 code_gen_buffer_size = (unsigned long)(ram_size / 4);
482 #endif
484 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
485 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
486 /* The code gen buffer location may have constraints depending on
487 the host cpu and OS */
488 #if defined(__linux__)
490 int flags;
491 void *start = NULL;
493 flags = MAP_PRIVATE | MAP_ANONYMOUS;
494 #if defined(__x86_64__)
495 flags |= MAP_32BIT;
496 /* Cannot map more than that */
497 if (code_gen_buffer_size > (800 * 1024 * 1024))
498 code_gen_buffer_size = (800 * 1024 * 1024);
499 #elif defined(__sparc_v9__)
500 // Map the buffer below 2G, so we can use direct calls and branches
501 flags |= MAP_FIXED;
502 start = (void *) 0x60000000UL;
503 if (code_gen_buffer_size > (512 * 1024 * 1024))
504 code_gen_buffer_size = (512 * 1024 * 1024);
505 #elif defined(__arm__)
506 /* Map the buffer below 32M, so we can use direct calls and branches */
507 flags |= MAP_FIXED;
508 start = (void *) 0x01000000UL;
509 if (code_gen_buffer_size > 16 * 1024 * 1024)
510 code_gen_buffer_size = 16 * 1024 * 1024;
511 #elif defined(__s390x__)
512 /* Map the buffer so that we can use direct calls and branches. */
513 /* We have a +- 4GB range on the branches; leave some slop. */
514 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
515 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
517 start = (void *)0x90000000UL;
518 #endif
519 code_gen_buffer = mmap(start, code_gen_buffer_size,
520 PROT_WRITE | PROT_READ | PROT_EXEC,
521 flags, -1, 0);
522 if (code_gen_buffer == MAP_FAILED) {
523 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
524 exit(1);
527 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
529 int flags;
530 void *addr = NULL;
531 flags = MAP_PRIVATE | MAP_ANONYMOUS;
532 #if defined(__x86_64__)
533 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
534 * 0x40000000 is free */
535 flags |= MAP_FIXED;
536 addr = (void *)0x40000000;
537 /* Cannot map more than that */
538 if (code_gen_buffer_size > (800 * 1024 * 1024))
539 code_gen_buffer_size = (800 * 1024 * 1024);
540 #endif
541 code_gen_buffer = mmap(addr, code_gen_buffer_size,
542 PROT_WRITE | PROT_READ | PROT_EXEC,
543 flags, -1, 0);
544 if (code_gen_buffer == MAP_FAILED) {
545 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
546 exit(1);
549 #else
550 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
551 map_exec(code_gen_buffer, code_gen_buffer_size);
552 #endif
553 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
554 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
555 code_gen_buffer_max_size = code_gen_buffer_size -
556 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
557 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
558 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
561 /* Must be called before using the QEMU cpus. 'tb_size' is the size
562 (in bytes) allocated to the translation buffer. Zero means default
563 size. */
564 void cpu_exec_init_all(unsigned long tb_size)
566 cpu_gen_init();
567 code_gen_alloc(tb_size);
568 code_gen_ptr = code_gen_buffer;
569 page_init();
570 #if !defined(CONFIG_USER_ONLY)
571 io_mem_init();
572 #endif
573 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
574 /* There's no guest base to take into account, so go ahead and
575 initialize the prologue now. */
576 tcg_prologue_init(&tcg_ctx);
577 #endif
580 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
582 static int cpu_common_post_load(void *opaque, int version_id)
584 CPUState *env = opaque;
586 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
587 version_id is increased. */
588 env->interrupt_request &= ~0x01;
589 tlb_flush(env, 1);
591 return 0;
594 static const VMStateDescription vmstate_cpu_common = {
595 .name = "cpu_common",
596 .version_id = 1,
597 .minimum_version_id = 1,
598 .minimum_version_id_old = 1,
599 .post_load = cpu_common_post_load,
600 .fields = (VMStateField []) {
601 VMSTATE_UINT32(halted, CPUState),
602 VMSTATE_UINT32(interrupt_request, CPUState),
603 VMSTATE_END_OF_LIST()
606 #endif
608 CPUState *qemu_get_cpu(int cpu)
610 CPUState *env = first_cpu;
612 while (env) {
613 if (env->cpu_index == cpu)
614 break;
615 env = env->next_cpu;
618 return env;
621 void cpu_exec_init(CPUState *env)
623 CPUState **penv;
624 int cpu_index;
626 #if defined(CONFIG_USER_ONLY)
627 cpu_list_lock();
628 #endif
629 env->next_cpu = NULL;
630 penv = &first_cpu;
631 cpu_index = 0;
632 while (*penv != NULL) {
633 penv = &(*penv)->next_cpu;
634 cpu_index++;
636 env->cpu_index = cpu_index;
637 env->numa_node = 0;
638 QTAILQ_INIT(&env->breakpoints);
639 QTAILQ_INIT(&env->watchpoints);
640 *penv = env;
641 #if defined(CONFIG_USER_ONLY)
642 cpu_list_unlock();
643 #endif
644 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
645 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
646 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
647 cpu_save, cpu_load, env);
648 #endif
651 static inline void invalidate_page_bitmap(PageDesc *p)
653 if (p->code_bitmap) {
654 qemu_free(p->code_bitmap);
655 p->code_bitmap = NULL;
657 p->code_write_count = 0;
660 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
662 static void page_flush_tb_1 (int level, void **lp)
664 int i;
666 if (*lp == NULL) {
667 return;
669 if (level == 0) {
670 PageDesc *pd = *lp;
671 for (i = 0; i < L2_SIZE; ++i) {
672 pd[i].first_tb = NULL;
673 invalidate_page_bitmap(pd + i);
675 } else {
676 void **pp = *lp;
677 for (i = 0; i < L2_SIZE; ++i) {
678 page_flush_tb_1 (level - 1, pp + i);
683 static void page_flush_tb(void)
685 int i;
686 for (i = 0; i < V_L1_SIZE; i++) {
687 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
691 /* flush all the translation blocks */
692 /* XXX: tb_flush is currently not thread safe */
693 void tb_flush(CPUState *env1)
695 CPUState *env;
696 #if defined(DEBUG_FLUSH)
697 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
698 (unsigned long)(code_gen_ptr - code_gen_buffer),
699 nb_tbs, nb_tbs > 0 ?
700 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
701 #endif
702 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
703 cpu_abort(env1, "Internal error: code buffer overflow\n");
705 nb_tbs = 0;
707 for(env = first_cpu; env != NULL; env = env->next_cpu) {
708 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
711 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
712 page_flush_tb();
714 code_gen_ptr = code_gen_buffer;
715 /* XXX: flush processor icache at this point if cache flush is
716 expensive */
717 tb_flush_count++;
720 #ifdef DEBUG_TB_CHECK
722 static void tb_invalidate_check(target_ulong address)
724 TranslationBlock *tb;
725 int i;
726 address &= TARGET_PAGE_MASK;
727 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
728 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
729 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
730 address >= tb->pc + tb->size)) {
731 printf("ERROR invalidate: address=" TARGET_FMT_lx
732 " PC=%08lx size=%04x\n",
733 address, (long)tb->pc, tb->size);
739 /* verify that all the pages have correct rights for code */
740 static void tb_page_check(void)
742 TranslationBlock *tb;
743 int i, flags1, flags2;
745 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
746 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
747 flags1 = page_get_flags(tb->pc);
748 flags2 = page_get_flags(tb->pc + tb->size - 1);
749 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
750 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
751 (long)tb->pc, tb->size, flags1, flags2);
757 #endif
759 /* invalidate one TB */
760 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
761 int next_offset)
763 TranslationBlock *tb1;
764 for(;;) {
765 tb1 = *ptb;
766 if (tb1 == tb) {
767 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
768 break;
770 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
774 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
776 TranslationBlock *tb1;
777 unsigned int n1;
779 for(;;) {
780 tb1 = *ptb;
781 n1 = (long)tb1 & 3;
782 tb1 = (TranslationBlock *)((long)tb1 & ~3);
783 if (tb1 == tb) {
784 *ptb = tb1->page_next[n1];
785 break;
787 ptb = &tb1->page_next[n1];
791 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
793 TranslationBlock *tb1, **ptb;
794 unsigned int n1;
796 ptb = &tb->jmp_next[n];
797 tb1 = *ptb;
798 if (tb1) {
799 /* find tb(n) in circular list */
800 for(;;) {
801 tb1 = *ptb;
802 n1 = (long)tb1 & 3;
803 tb1 = (TranslationBlock *)((long)tb1 & ~3);
804 if (n1 == n && tb1 == tb)
805 break;
806 if (n1 == 2) {
807 ptb = &tb1->jmp_first;
808 } else {
809 ptb = &tb1->jmp_next[n1];
812 /* now we can suppress tb(n) from the list */
813 *ptb = tb->jmp_next[n];
815 tb->jmp_next[n] = NULL;
819 /* reset the jump entry 'n' of a TB so that it is not chained to
820 another TB */
821 static inline void tb_reset_jump(TranslationBlock *tb, int n)
823 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
826 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
828 CPUState *env;
829 PageDesc *p;
830 unsigned int h, n1;
831 tb_page_addr_t phys_pc;
832 TranslationBlock *tb1, *tb2;
834 /* remove the TB from the hash list */
835 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
836 h = tb_phys_hash_func(phys_pc);
837 tb_remove(&tb_phys_hash[h], tb,
838 offsetof(TranslationBlock, phys_hash_next));
840 /* remove the TB from the page list */
841 if (tb->page_addr[0] != page_addr) {
842 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
843 tb_page_remove(&p->first_tb, tb);
844 invalidate_page_bitmap(p);
846 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
847 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
848 tb_page_remove(&p->first_tb, tb);
849 invalidate_page_bitmap(p);
852 tb_invalidated_flag = 1;
854 /* remove the TB from the hash list */
855 h = tb_jmp_cache_hash_func(tb->pc);
856 for(env = first_cpu; env != NULL; env = env->next_cpu) {
857 if (env->tb_jmp_cache[h] == tb)
858 env->tb_jmp_cache[h] = NULL;
861 /* suppress this TB from the two jump lists */
862 tb_jmp_remove(tb, 0);
863 tb_jmp_remove(tb, 1);
865 /* suppress any remaining jumps to this TB */
866 tb1 = tb->jmp_first;
867 for(;;) {
868 n1 = (long)tb1 & 3;
869 if (n1 == 2)
870 break;
871 tb1 = (TranslationBlock *)((long)tb1 & ~3);
872 tb2 = tb1->jmp_next[n1];
873 tb_reset_jump(tb1, n1);
874 tb1->jmp_next[n1] = NULL;
875 tb1 = tb2;
877 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
879 tb_phys_invalidate_count++;
882 static inline void set_bits(uint8_t *tab, int start, int len)
884 int end, mask, end1;
886 end = start + len;
887 tab += start >> 3;
888 mask = 0xff << (start & 7);
889 if ((start & ~7) == (end & ~7)) {
890 if (start < end) {
891 mask &= ~(0xff << (end & 7));
892 *tab |= mask;
894 } else {
895 *tab++ |= mask;
896 start = (start + 8) & ~7;
897 end1 = end & ~7;
898 while (start < end1) {
899 *tab++ = 0xff;
900 start += 8;
902 if (start < end) {
903 mask = ~(0xff << (end & 7));
904 *tab |= mask;
909 static void build_page_bitmap(PageDesc *p)
911 int n, tb_start, tb_end;
912 TranslationBlock *tb;
914 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
916 tb = p->first_tb;
917 while (tb != NULL) {
918 n = (long)tb & 3;
919 tb = (TranslationBlock *)((long)tb & ~3);
920 /* NOTE: this is subtle as a TB may span two physical pages */
921 if (n == 0) {
922 /* NOTE: tb_end may be after the end of the page, but
923 it is not a problem */
924 tb_start = tb->pc & ~TARGET_PAGE_MASK;
925 tb_end = tb_start + tb->size;
926 if (tb_end > TARGET_PAGE_SIZE)
927 tb_end = TARGET_PAGE_SIZE;
928 } else {
929 tb_start = 0;
930 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
932 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
933 tb = tb->page_next[n];
937 TranslationBlock *tb_gen_code(CPUState *env,
938 target_ulong pc, target_ulong cs_base,
939 int flags, int cflags)
941 TranslationBlock *tb;
942 uint8_t *tc_ptr;
943 tb_page_addr_t phys_pc, phys_page2;
944 target_ulong virt_page2;
945 int code_gen_size;
947 phys_pc = get_page_addr_code(env, pc);
948 tb = tb_alloc(pc);
949 if (!tb) {
950 /* flush must be done */
951 tb_flush(env);
952 /* cannot fail at this point */
953 tb = tb_alloc(pc);
954 /* Don't forget to invalidate previous TB info. */
955 tb_invalidated_flag = 1;
957 tc_ptr = code_gen_ptr;
958 tb->tc_ptr = tc_ptr;
959 tb->cs_base = cs_base;
960 tb->flags = flags;
961 tb->cflags = cflags;
962 cpu_gen_code(env, tb, &code_gen_size);
963 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
965 /* check next page if needed */
966 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
967 phys_page2 = -1;
968 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
969 phys_page2 = get_page_addr_code(env, virt_page2);
971 tb_link_page(tb, phys_pc, phys_page2);
972 return tb;
975 /* invalidate all TBs which intersect with the target physical page
976 starting in range [start;end[. NOTE: start and end must refer to
977 the same physical page. 'is_cpu_write_access' should be true if called
978 from a real cpu write access: the virtual CPU will exit the current
979 TB if code is modified inside this TB. */
980 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
981 int is_cpu_write_access)
983 TranslationBlock *tb, *tb_next, *saved_tb;
984 CPUState *env = cpu_single_env;
985 tb_page_addr_t tb_start, tb_end;
986 PageDesc *p;
987 int n;
988 #ifdef TARGET_HAS_PRECISE_SMC
989 int current_tb_not_found = is_cpu_write_access;
990 TranslationBlock *current_tb = NULL;
991 int current_tb_modified = 0;
992 target_ulong current_pc = 0;
993 target_ulong current_cs_base = 0;
994 int current_flags = 0;
995 #endif /* TARGET_HAS_PRECISE_SMC */
997 p = page_find(start >> TARGET_PAGE_BITS);
998 if (!p)
999 return;
1000 if (!p->code_bitmap &&
1001 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1002 is_cpu_write_access) {
1003 /* build code bitmap */
1004 build_page_bitmap(p);
1007 /* we remove all the TBs in the range [start, end[ */
1008 /* XXX: see if in some cases it could be faster to invalidate all the code */
1009 tb = p->first_tb;
1010 while (tb != NULL) {
1011 n = (long)tb & 3;
1012 tb = (TranslationBlock *)((long)tb & ~3);
1013 tb_next = tb->page_next[n];
1014 /* NOTE: this is subtle as a TB may span two physical pages */
1015 if (n == 0) {
1016 /* NOTE: tb_end may be after the end of the page, but
1017 it is not a problem */
1018 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1019 tb_end = tb_start + tb->size;
1020 } else {
1021 tb_start = tb->page_addr[1];
1022 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1024 if (!(tb_end <= start || tb_start >= end)) {
1025 #ifdef TARGET_HAS_PRECISE_SMC
1026 if (current_tb_not_found) {
1027 current_tb_not_found = 0;
1028 current_tb = NULL;
1029 if (env->mem_io_pc) {
1030 /* now we have a real cpu fault */
1031 current_tb = tb_find_pc(env->mem_io_pc);
1034 if (current_tb == tb &&
1035 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1036 /* If we are modifying the current TB, we must stop
1037 its execution. We could be more precise by checking
1038 that the modification is after the current PC, but it
1039 would require a specialized function to partially
1040 restore the CPU state */
1042 current_tb_modified = 1;
1043 cpu_restore_state(current_tb, env,
1044 env->mem_io_pc, NULL);
1045 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1046 &current_flags);
1048 #endif /* TARGET_HAS_PRECISE_SMC */
1049 /* we need to do that to handle the case where a signal
1050 occurs while doing tb_phys_invalidate() */
1051 saved_tb = NULL;
1052 if (env) {
1053 saved_tb = env->current_tb;
1054 env->current_tb = NULL;
1056 tb_phys_invalidate(tb, -1);
1057 if (env) {
1058 env->current_tb = saved_tb;
1059 if (env->interrupt_request && env->current_tb)
1060 cpu_interrupt(env, env->interrupt_request);
1063 tb = tb_next;
1065 #if !defined(CONFIG_USER_ONLY)
1066 /* if no code remaining, no need to continue to use slow writes */
1067 if (!p->first_tb) {
1068 invalidate_page_bitmap(p);
1069 if (is_cpu_write_access) {
1070 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1073 #endif
1074 #ifdef TARGET_HAS_PRECISE_SMC
1075 if (current_tb_modified) {
1076 /* we generate a block containing just the instruction
1077 modifying the memory. It will ensure that it cannot modify
1078 itself */
1079 env->current_tb = NULL;
1080 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1081 cpu_resume_from_signal(env, NULL);
1083 #endif
1086 /* len must be <= 8 and start must be a multiple of len */
1087 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1089 PageDesc *p;
1090 int offset, b;
1091 #if 0
1092 if (1) {
1093 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1094 cpu_single_env->mem_io_vaddr, len,
1095 cpu_single_env->eip,
1096 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1098 #endif
1099 p = page_find(start >> TARGET_PAGE_BITS);
1100 if (!p)
1101 return;
1102 if (p->code_bitmap) {
1103 offset = start & ~TARGET_PAGE_MASK;
1104 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1105 if (b & ((1 << len) - 1))
1106 goto do_invalidate;
1107 } else {
1108 do_invalidate:
1109 tb_invalidate_phys_page_range(start, start + len, 1);
1113 #if !defined(CONFIG_SOFTMMU)
1114 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1115 unsigned long pc, void *puc)
1117 TranslationBlock *tb;
1118 PageDesc *p;
1119 int n;
1120 #ifdef TARGET_HAS_PRECISE_SMC
1121 TranslationBlock *current_tb = NULL;
1122 CPUState *env = cpu_single_env;
1123 int current_tb_modified = 0;
1124 target_ulong current_pc = 0;
1125 target_ulong current_cs_base = 0;
1126 int current_flags = 0;
1127 #endif
1129 addr &= TARGET_PAGE_MASK;
1130 p = page_find(addr >> TARGET_PAGE_BITS);
1131 if (!p)
1132 return;
1133 tb = p->first_tb;
1134 #ifdef TARGET_HAS_PRECISE_SMC
1135 if (tb && pc != 0) {
1136 current_tb = tb_find_pc(pc);
1138 #endif
1139 while (tb != NULL) {
1140 n = (long)tb & 3;
1141 tb = (TranslationBlock *)((long)tb & ~3);
1142 #ifdef TARGET_HAS_PRECISE_SMC
1143 if (current_tb == tb &&
1144 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1145 /* If we are modifying the current TB, we must stop
1146 its execution. We could be more precise by checking
1147 that the modification is after the current PC, but it
1148 would require a specialized function to partially
1149 restore the CPU state */
1151 current_tb_modified = 1;
1152 cpu_restore_state(current_tb, env, pc, puc);
1153 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1154 &current_flags);
1156 #endif /* TARGET_HAS_PRECISE_SMC */
1157 tb_phys_invalidate(tb, addr);
1158 tb = tb->page_next[n];
1160 p->first_tb = NULL;
1161 #ifdef TARGET_HAS_PRECISE_SMC
1162 if (current_tb_modified) {
1163 /* we generate a block containing just the instruction
1164 modifying the memory. It will ensure that it cannot modify
1165 itself */
1166 env->current_tb = NULL;
1167 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1168 cpu_resume_from_signal(env, puc);
1170 #endif
1172 #endif
1174 /* add the tb in the target page and protect it if necessary */
1175 static inline void tb_alloc_page(TranslationBlock *tb,
1176 unsigned int n, tb_page_addr_t page_addr)
1178 PageDesc *p;
1179 TranslationBlock *last_first_tb;
1181 tb->page_addr[n] = page_addr;
1182 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1183 tb->page_next[n] = p->first_tb;
1184 last_first_tb = p->first_tb;
1185 p->first_tb = (TranslationBlock *)((long)tb | n);
1186 invalidate_page_bitmap(p);
1188 #if defined(TARGET_HAS_SMC) || 1
1190 #if defined(CONFIG_USER_ONLY)
1191 if (p->flags & PAGE_WRITE) {
1192 target_ulong addr;
1193 PageDesc *p2;
1194 int prot;
1196 /* force the host page as non writable (writes will have a
1197 page fault + mprotect overhead) */
1198 page_addr &= qemu_host_page_mask;
1199 prot = 0;
1200 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1201 addr += TARGET_PAGE_SIZE) {
1203 p2 = page_find (addr >> TARGET_PAGE_BITS);
1204 if (!p2)
1205 continue;
1206 prot |= p2->flags;
1207 p2->flags &= ~PAGE_WRITE;
1209 mprotect(g2h(page_addr), qemu_host_page_size,
1210 (prot & PAGE_BITS) & ~PAGE_WRITE);
1211 #ifdef DEBUG_TB_INVALIDATE
1212 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1213 page_addr);
1214 #endif
1216 #else
1217 /* if some code is already present, then the pages are already
1218 protected. So we handle the case where only the first TB is
1219 allocated in a physical page */
1220 if (!last_first_tb) {
1221 tlb_protect_code(page_addr);
1223 #endif
1225 #endif /* TARGET_HAS_SMC */
1228 /* Allocate a new translation block. Flush the translation buffer if
1229 too many translation blocks or too much generated code. */
1230 TranslationBlock *tb_alloc(target_ulong pc)
1232 TranslationBlock *tb;
1234 if (nb_tbs >= code_gen_max_blocks ||
1235 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1236 return NULL;
1237 tb = &tbs[nb_tbs++];
1238 tb->pc = pc;
1239 tb->cflags = 0;
1240 return tb;
1243 void tb_free(TranslationBlock *tb)
1245 /* In practice this is mostly used for single use temporary TB
1246 Ignore the hard cases and just back up if this TB happens to
1247 be the last one generated. */
1248 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1249 code_gen_ptr = tb->tc_ptr;
1250 nb_tbs--;
1254 /* add a new TB and link it to the physical page tables. phys_page2 is
1255 (-1) to indicate that only one page contains the TB. */
1256 void tb_link_page(TranslationBlock *tb,
1257 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1259 unsigned int h;
1260 TranslationBlock **ptb;
1262 /* Grab the mmap lock to stop another thread invalidating this TB
1263 before we are done. */
1264 mmap_lock();
1265 /* add in the physical hash table */
1266 h = tb_phys_hash_func(phys_pc);
1267 ptb = &tb_phys_hash[h];
1268 tb->phys_hash_next = *ptb;
1269 *ptb = tb;
1271 /* add in the page list */
1272 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1273 if (phys_page2 != -1)
1274 tb_alloc_page(tb, 1, phys_page2);
1275 else
1276 tb->page_addr[1] = -1;
1278 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1279 tb->jmp_next[0] = NULL;
1280 tb->jmp_next[1] = NULL;
1282 /* init original jump addresses */
1283 if (tb->tb_next_offset[0] != 0xffff)
1284 tb_reset_jump(tb, 0);
1285 if (tb->tb_next_offset[1] != 0xffff)
1286 tb_reset_jump(tb, 1);
1288 #ifdef DEBUG_TB_CHECK
1289 tb_page_check();
1290 #endif
1291 mmap_unlock();
1294 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1295 tb[1].tc_ptr. Return NULL if not found */
1296 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1298 int m_min, m_max, m;
1299 unsigned long v;
1300 TranslationBlock *tb;
1302 if (nb_tbs <= 0)
1303 return NULL;
1304 if (tc_ptr < (unsigned long)code_gen_buffer ||
1305 tc_ptr >= (unsigned long)code_gen_ptr)
1306 return NULL;
1307 /* binary search (cf Knuth) */
1308 m_min = 0;
1309 m_max = nb_tbs - 1;
1310 while (m_min <= m_max) {
1311 m = (m_min + m_max) >> 1;
1312 tb = &tbs[m];
1313 v = (unsigned long)tb->tc_ptr;
1314 if (v == tc_ptr)
1315 return tb;
1316 else if (tc_ptr < v) {
1317 m_max = m - 1;
1318 } else {
1319 m_min = m + 1;
1322 return &tbs[m_max];
1325 static void tb_reset_jump_recursive(TranslationBlock *tb);
1327 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1329 TranslationBlock *tb1, *tb_next, **ptb;
1330 unsigned int n1;
1332 tb1 = tb->jmp_next[n];
1333 if (tb1 != NULL) {
1334 /* find head of list */
1335 for(;;) {
1336 n1 = (long)tb1 & 3;
1337 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1338 if (n1 == 2)
1339 break;
1340 tb1 = tb1->jmp_next[n1];
1342 /* we are now sure now that tb jumps to tb1 */
1343 tb_next = tb1;
1345 /* remove tb from the jmp_first list */
1346 ptb = &tb_next->jmp_first;
1347 for(;;) {
1348 tb1 = *ptb;
1349 n1 = (long)tb1 & 3;
1350 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1351 if (n1 == n && tb1 == tb)
1352 break;
1353 ptb = &tb1->jmp_next[n1];
1355 *ptb = tb->jmp_next[n];
1356 tb->jmp_next[n] = NULL;
1358 /* suppress the jump to next tb in generated code */
1359 tb_reset_jump(tb, n);
1361 /* suppress jumps in the tb on which we could have jumped */
1362 tb_reset_jump_recursive(tb_next);
1366 static void tb_reset_jump_recursive(TranslationBlock *tb)
1368 tb_reset_jump_recursive2(tb, 0);
1369 tb_reset_jump_recursive2(tb, 1);
1372 #if defined(TARGET_HAS_ICE)
1373 #if defined(CONFIG_USER_ONLY)
1374 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1376 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1378 #else
1379 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1381 target_phys_addr_t addr;
1382 target_ulong pd;
1383 ram_addr_t ram_addr;
1384 PhysPageDesc *p;
1386 addr = cpu_get_phys_page_debug(env, pc);
1387 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1388 if (!p) {
1389 pd = IO_MEM_UNASSIGNED;
1390 } else {
1391 pd = p->phys_offset;
1393 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1394 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1396 #endif
1397 #endif /* TARGET_HAS_ICE */
1399 #if defined(CONFIG_USER_ONLY)
1400 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1405 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1406 int flags, CPUWatchpoint **watchpoint)
1408 return -ENOSYS;
1410 #else
1411 /* Add a watchpoint. */
1412 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1413 int flags, CPUWatchpoint **watchpoint)
1415 target_ulong len_mask = ~(len - 1);
1416 CPUWatchpoint *wp;
1418 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1419 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1420 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1421 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1422 return -EINVAL;
1424 wp = qemu_malloc(sizeof(*wp));
1426 wp->vaddr = addr;
1427 wp->len_mask = len_mask;
1428 wp->flags = flags;
1430 /* keep all GDB-injected watchpoints in front */
1431 if (flags & BP_GDB)
1432 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1433 else
1434 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1436 tlb_flush_page(env, addr);
1438 if (watchpoint)
1439 *watchpoint = wp;
1440 return 0;
1443 /* Remove a specific watchpoint. */
1444 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1445 int flags)
1447 target_ulong len_mask = ~(len - 1);
1448 CPUWatchpoint *wp;
1450 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1451 if (addr == wp->vaddr && len_mask == wp->len_mask
1452 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1453 cpu_watchpoint_remove_by_ref(env, wp);
1454 return 0;
1457 return -ENOENT;
1460 /* Remove a specific watchpoint by reference. */
1461 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1463 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1465 tlb_flush_page(env, watchpoint->vaddr);
1467 qemu_free(watchpoint);
1470 /* Remove all matching watchpoints. */
1471 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1473 CPUWatchpoint *wp, *next;
1475 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1476 if (wp->flags & mask)
1477 cpu_watchpoint_remove_by_ref(env, wp);
1480 #endif
1482 /* Add a breakpoint. */
1483 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1484 CPUBreakpoint **breakpoint)
1486 #if defined(TARGET_HAS_ICE)
1487 CPUBreakpoint *bp;
1489 bp = qemu_malloc(sizeof(*bp));
1491 bp->pc = pc;
1492 bp->flags = flags;
1494 /* keep all GDB-injected breakpoints in front */
1495 if (flags & BP_GDB)
1496 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1497 else
1498 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1500 breakpoint_invalidate(env, pc);
1502 if (breakpoint)
1503 *breakpoint = bp;
1504 return 0;
1505 #else
1506 return -ENOSYS;
1507 #endif
1510 /* Remove a specific breakpoint. */
1511 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1513 #if defined(TARGET_HAS_ICE)
1514 CPUBreakpoint *bp;
1516 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1517 if (bp->pc == pc && bp->flags == flags) {
1518 cpu_breakpoint_remove_by_ref(env, bp);
1519 return 0;
1522 return -ENOENT;
1523 #else
1524 return -ENOSYS;
1525 #endif
1528 /* Remove a specific breakpoint by reference. */
1529 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1531 #if defined(TARGET_HAS_ICE)
1532 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1534 breakpoint_invalidate(env, breakpoint->pc);
1536 qemu_free(breakpoint);
1537 #endif
1540 /* Remove all matching breakpoints. */
1541 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1543 #if defined(TARGET_HAS_ICE)
1544 CPUBreakpoint *bp, *next;
1546 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1547 if (bp->flags & mask)
1548 cpu_breakpoint_remove_by_ref(env, bp);
1550 #endif
1553 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1554 CPU loop after each instruction */
1555 void cpu_single_step(CPUState *env, int enabled)
1557 #if defined(TARGET_HAS_ICE)
1558 if (env->singlestep_enabled != enabled) {
1559 env->singlestep_enabled = enabled;
1560 if (kvm_enabled())
1561 kvm_update_guest_debug(env, 0);
1562 else {
1563 /* must flush all the translated code to avoid inconsistencies */
1564 /* XXX: only flush what is necessary */
1565 tb_flush(env);
1568 #endif
1571 /* enable or disable low levels log */
1572 void cpu_set_log(int log_flags)
1574 loglevel = log_flags;
1575 if (loglevel && !logfile) {
1576 logfile = fopen(logfilename, log_append ? "a" : "w");
1577 if (!logfile) {
1578 perror(logfilename);
1579 _exit(1);
1581 #if !defined(CONFIG_SOFTMMU)
1582 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1584 static char logfile_buf[4096];
1585 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1587 #elif !defined(_WIN32)
1588 /* Win32 doesn't support line-buffering and requires size >= 2 */
1589 setvbuf(logfile, NULL, _IOLBF, 0);
1590 #endif
1591 log_append = 1;
1593 if (!loglevel && logfile) {
1594 fclose(logfile);
1595 logfile = NULL;
1599 void cpu_set_log_filename(const char *filename)
1601 logfilename = strdup(filename);
1602 if (logfile) {
1603 fclose(logfile);
1604 logfile = NULL;
1606 cpu_set_log(loglevel);
1609 static void cpu_unlink_tb(CPUState *env)
1611 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1612 problem and hope the cpu will stop of its own accord. For userspace
1613 emulation this often isn't actually as bad as it sounds. Often
1614 signals are used primarily to interrupt blocking syscalls. */
1615 TranslationBlock *tb;
1616 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1618 spin_lock(&interrupt_lock);
1619 tb = env->current_tb;
1620 /* if the cpu is currently executing code, we must unlink it and
1621 all the potentially executing TB */
1622 if (tb) {
1623 env->current_tb = NULL;
1624 tb_reset_jump_recursive(tb);
1626 spin_unlock(&interrupt_lock);
1629 /* mask must never be zero, except for A20 change call */
1630 void cpu_interrupt(CPUState *env, int mask)
1632 int old_mask;
1634 old_mask = env->interrupt_request;
1635 env->interrupt_request |= mask;
1637 #ifndef CONFIG_USER_ONLY
1639 * If called from iothread context, wake the target cpu in
1640 * case its halted.
1642 if (!qemu_cpu_self(env)) {
1643 qemu_cpu_kick(env);
1644 return;
1646 #endif
1648 if (use_icount) {
1649 env->icount_decr.u16.high = 0xffff;
1650 #ifndef CONFIG_USER_ONLY
1651 if (!can_do_io(env)
1652 && (mask & ~old_mask) != 0) {
1653 cpu_abort(env, "Raised interrupt while not in I/O function");
1655 #endif
1656 } else {
1657 cpu_unlink_tb(env);
1661 void cpu_reset_interrupt(CPUState *env, int mask)
1663 env->interrupt_request &= ~mask;
1666 void cpu_exit(CPUState *env)
1668 env->exit_request = 1;
1669 cpu_unlink_tb(env);
1672 const CPULogItem cpu_log_items[] = {
1673 { CPU_LOG_TB_OUT_ASM, "out_asm",
1674 "show generated host assembly code for each compiled TB" },
1675 { CPU_LOG_TB_IN_ASM, "in_asm",
1676 "show target assembly code for each compiled TB" },
1677 { CPU_LOG_TB_OP, "op",
1678 "show micro ops for each compiled TB" },
1679 { CPU_LOG_TB_OP_OPT, "op_opt",
1680 "show micro ops "
1681 #ifdef TARGET_I386
1682 "before eflags optimization and "
1683 #endif
1684 "after liveness analysis" },
1685 { CPU_LOG_INT, "int",
1686 "show interrupts/exceptions in short format" },
1687 { CPU_LOG_EXEC, "exec",
1688 "show trace before each executed TB (lots of logs)" },
1689 { CPU_LOG_TB_CPU, "cpu",
1690 "show CPU state before block translation" },
1691 #ifdef TARGET_I386
1692 { CPU_LOG_PCALL, "pcall",
1693 "show protected mode far calls/returns/exceptions" },
1694 { CPU_LOG_RESET, "cpu_reset",
1695 "show CPU state before CPU resets" },
1696 #endif
1697 #ifdef DEBUG_IOPORT
1698 { CPU_LOG_IOPORT, "ioport",
1699 "show all i/o ports accesses" },
1700 #endif
1701 { 0, NULL, NULL },
1704 #ifndef CONFIG_USER_ONLY
1705 static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1706 = QLIST_HEAD_INITIALIZER(memory_client_list);
1708 static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1709 ram_addr_t size,
1710 ram_addr_t phys_offset)
1712 CPUPhysMemoryClient *client;
1713 QLIST_FOREACH(client, &memory_client_list, list) {
1714 client->set_memory(client, start_addr, size, phys_offset);
1718 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1719 target_phys_addr_t end)
1721 CPUPhysMemoryClient *client;
1722 QLIST_FOREACH(client, &memory_client_list, list) {
1723 int r = client->sync_dirty_bitmap(client, start, end);
1724 if (r < 0)
1725 return r;
1727 return 0;
1730 static int cpu_notify_migration_log(int enable)
1732 CPUPhysMemoryClient *client;
1733 QLIST_FOREACH(client, &memory_client_list, list) {
1734 int r = client->migration_log(client, enable);
1735 if (r < 0)
1736 return r;
1738 return 0;
1741 static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1742 int level, void **lp)
1744 int i;
1746 if (*lp == NULL) {
1747 return;
1749 if (level == 0) {
1750 PhysPageDesc *pd = *lp;
1751 for (i = 0; i < L2_SIZE; ++i) {
1752 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1753 client->set_memory(client, pd[i].region_offset,
1754 TARGET_PAGE_SIZE, pd[i].phys_offset);
1757 } else {
1758 void **pp = *lp;
1759 for (i = 0; i < L2_SIZE; ++i) {
1760 phys_page_for_each_1(client, level - 1, pp + i);
1765 static void phys_page_for_each(CPUPhysMemoryClient *client)
1767 int i;
1768 for (i = 0; i < P_L1_SIZE; ++i) {
1769 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1770 l1_phys_map + 1);
1774 void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1776 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1777 phys_page_for_each(client);
1780 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1782 QLIST_REMOVE(client, list);
1784 #endif
1786 static int cmp1(const char *s1, int n, const char *s2)
1788 if (strlen(s2) != n)
1789 return 0;
1790 return memcmp(s1, s2, n) == 0;
1793 /* takes a comma separated list of log masks. Return 0 if error. */
1794 int cpu_str_to_log_mask(const char *str)
1796 const CPULogItem *item;
1797 int mask;
1798 const char *p, *p1;
1800 p = str;
1801 mask = 0;
1802 for(;;) {
1803 p1 = strchr(p, ',');
1804 if (!p1)
1805 p1 = p + strlen(p);
1806 if(cmp1(p,p1-p,"all")) {
1807 for(item = cpu_log_items; item->mask != 0; item++) {
1808 mask |= item->mask;
1810 } else {
1811 for(item = cpu_log_items; item->mask != 0; item++) {
1812 if (cmp1(p, p1 - p, item->name))
1813 goto found;
1815 return 0;
1817 found:
1818 mask |= item->mask;
1819 if (*p1 != ',')
1820 break;
1821 p = p1 + 1;
1823 return mask;
1826 void cpu_abort(CPUState *env, const char *fmt, ...)
1828 va_list ap;
1829 va_list ap2;
1831 va_start(ap, fmt);
1832 va_copy(ap2, ap);
1833 fprintf(stderr, "qemu: fatal: ");
1834 vfprintf(stderr, fmt, ap);
1835 fprintf(stderr, "\n");
1836 #ifdef TARGET_I386
1837 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1838 #else
1839 cpu_dump_state(env, stderr, fprintf, 0);
1840 #endif
1841 if (qemu_log_enabled()) {
1842 qemu_log("qemu: fatal: ");
1843 qemu_log_vprintf(fmt, ap2);
1844 qemu_log("\n");
1845 #ifdef TARGET_I386
1846 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1847 #else
1848 log_cpu_state(env, 0);
1849 #endif
1850 qemu_log_flush();
1851 qemu_log_close();
1853 va_end(ap2);
1854 va_end(ap);
1855 #if defined(CONFIG_USER_ONLY)
1857 struct sigaction act;
1858 sigfillset(&act.sa_mask);
1859 act.sa_handler = SIG_DFL;
1860 sigaction(SIGABRT, &act, NULL);
1862 #endif
1863 abort();
1866 CPUState *cpu_copy(CPUState *env)
1868 CPUState *new_env = cpu_init(env->cpu_model_str);
1869 CPUState *next_cpu = new_env->next_cpu;
1870 int cpu_index = new_env->cpu_index;
1871 #if defined(TARGET_HAS_ICE)
1872 CPUBreakpoint *bp;
1873 CPUWatchpoint *wp;
1874 #endif
1876 memcpy(new_env, env, sizeof(CPUState));
1878 /* Preserve chaining and index. */
1879 new_env->next_cpu = next_cpu;
1880 new_env->cpu_index = cpu_index;
1882 /* Clone all break/watchpoints.
1883 Note: Once we support ptrace with hw-debug register access, make sure
1884 BP_CPU break/watchpoints are handled correctly on clone. */
1885 QTAILQ_INIT(&env->breakpoints);
1886 QTAILQ_INIT(&env->watchpoints);
1887 #if defined(TARGET_HAS_ICE)
1888 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1889 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1891 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1892 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1893 wp->flags, NULL);
1895 #endif
1897 return new_env;
1900 #if !defined(CONFIG_USER_ONLY)
1902 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1904 unsigned int i;
1906 /* Discard jump cache entries for any tb which might potentially
1907 overlap the flushed page. */
1908 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1909 memset (&env->tb_jmp_cache[i], 0,
1910 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1912 i = tb_jmp_cache_hash_page(addr);
1913 memset (&env->tb_jmp_cache[i], 0,
1914 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1917 static CPUTLBEntry s_cputlb_empty_entry = {
1918 .addr_read = -1,
1919 .addr_write = -1,
1920 .addr_code = -1,
1921 .addend = -1,
1924 /* NOTE: if flush_global is true, also flush global entries (not
1925 implemented yet) */
1926 void tlb_flush(CPUState *env, int flush_global)
1928 int i;
1930 #if defined(DEBUG_TLB)
1931 printf("tlb_flush:\n");
1932 #endif
1933 /* must reset current TB so that interrupts cannot modify the
1934 links while we are modifying them */
1935 env->current_tb = NULL;
1937 for(i = 0; i < CPU_TLB_SIZE; i++) {
1938 int mmu_idx;
1939 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1940 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1944 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1946 env->tlb_flush_addr = -1;
1947 env->tlb_flush_mask = 0;
1948 tlb_flush_count++;
1951 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1953 if (addr == (tlb_entry->addr_read &
1954 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1955 addr == (tlb_entry->addr_write &
1956 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1957 addr == (tlb_entry->addr_code &
1958 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1959 *tlb_entry = s_cputlb_empty_entry;
1963 void tlb_flush_page(CPUState *env, target_ulong addr)
1965 int i;
1966 int mmu_idx;
1968 #if defined(DEBUG_TLB)
1969 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1970 #endif
1971 /* Check if we need to flush due to large pages. */
1972 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1973 #if defined(DEBUG_TLB)
1974 printf("tlb_flush_page: forced full flush ("
1975 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1976 env->tlb_flush_addr, env->tlb_flush_mask);
1977 #endif
1978 tlb_flush(env, 1);
1979 return;
1981 /* must reset current TB so that interrupts cannot modify the
1982 links while we are modifying them */
1983 env->current_tb = NULL;
1985 addr &= TARGET_PAGE_MASK;
1986 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1987 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1988 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1990 tlb_flush_jmp_cache(env, addr);
1993 /* update the TLBs so that writes to code in the virtual page 'addr'
1994 can be detected */
1995 static void tlb_protect_code(ram_addr_t ram_addr)
1997 cpu_physical_memory_reset_dirty(ram_addr,
1998 ram_addr + TARGET_PAGE_SIZE,
1999 CODE_DIRTY_FLAG);
2002 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2003 tested for self modifying code */
2004 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2005 target_ulong vaddr)
2007 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2010 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2011 unsigned long start, unsigned long length)
2013 unsigned long addr;
2014 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2015 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2016 if ((addr - start) < length) {
2017 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2022 /* Note: start and end must be within the same ram block. */
2023 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2024 int dirty_flags)
2026 CPUState *env;
2027 unsigned long length, start1;
2028 int i;
2030 start &= TARGET_PAGE_MASK;
2031 end = TARGET_PAGE_ALIGN(end);
2033 length = end - start;
2034 if (length == 0)
2035 return;
2036 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2038 /* we modify the TLB cache so that the dirty bit will be set again
2039 when accessing the range */
2040 start1 = (unsigned long)qemu_get_ram_ptr(start);
2041 /* Chek that we don't span multiple blocks - this breaks the
2042 address comparisons below. */
2043 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2044 != (end - 1) - start) {
2045 abort();
2048 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2049 int mmu_idx;
2050 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2051 for(i = 0; i < CPU_TLB_SIZE; i++)
2052 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2053 start1, length);
2058 int cpu_physical_memory_set_dirty_tracking(int enable)
2060 int ret = 0;
2061 in_migration = enable;
2062 ret = cpu_notify_migration_log(!!enable);
2063 return ret;
2066 int cpu_physical_memory_get_dirty_tracking(void)
2068 return in_migration;
2071 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2072 target_phys_addr_t end_addr)
2074 int ret;
2076 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2077 return ret;
2080 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2082 ram_addr_t ram_addr;
2083 void *p;
2085 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2086 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2087 + tlb_entry->addend);
2088 ram_addr = qemu_ram_addr_from_host(p);
2089 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2090 tlb_entry->addr_write |= TLB_NOTDIRTY;
2095 /* update the TLB according to the current state of the dirty bits */
2096 void cpu_tlb_update_dirty(CPUState *env)
2098 int i;
2099 int mmu_idx;
2100 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2101 for(i = 0; i < CPU_TLB_SIZE; i++)
2102 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2106 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2108 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2109 tlb_entry->addr_write = vaddr;
2112 /* update the TLB corresponding to virtual page vaddr
2113 so that it is no longer dirty */
2114 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2116 int i;
2117 int mmu_idx;
2119 vaddr &= TARGET_PAGE_MASK;
2120 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2121 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2122 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2125 /* Our TLB does not support large pages, so remember the area covered by
2126 large pages and trigger a full TLB flush if these are invalidated. */
2127 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2128 target_ulong size)
2130 target_ulong mask = ~(size - 1);
2132 if (env->tlb_flush_addr == (target_ulong)-1) {
2133 env->tlb_flush_addr = vaddr & mask;
2134 env->tlb_flush_mask = mask;
2135 return;
2137 /* Extend the existing region to include the new page.
2138 This is a compromise between unnecessary flushes and the cost
2139 of maintaining a full variable size TLB. */
2140 mask &= env->tlb_flush_mask;
2141 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2142 mask <<= 1;
2144 env->tlb_flush_addr &= mask;
2145 env->tlb_flush_mask = mask;
2148 /* Add a new TLB entry. At most one entry for a given virtual address
2149 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2150 supplied size is only used by tlb_flush_page. */
2151 void tlb_set_page(CPUState *env, target_ulong vaddr,
2152 target_phys_addr_t paddr, int prot,
2153 int mmu_idx, target_ulong size)
2155 PhysPageDesc *p;
2156 unsigned long pd;
2157 unsigned int index;
2158 target_ulong address;
2159 target_ulong code_address;
2160 unsigned long addend;
2161 CPUTLBEntry *te;
2162 CPUWatchpoint *wp;
2163 target_phys_addr_t iotlb;
2165 assert(size >= TARGET_PAGE_SIZE);
2166 if (size != TARGET_PAGE_SIZE) {
2167 tlb_add_large_page(env, vaddr, size);
2169 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2170 if (!p) {
2171 pd = IO_MEM_UNASSIGNED;
2172 } else {
2173 pd = p->phys_offset;
2175 #if defined(DEBUG_TLB)
2176 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2177 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2178 #endif
2180 address = vaddr;
2181 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2182 /* IO memory case (romd handled later) */
2183 address |= TLB_MMIO;
2185 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2186 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2187 /* Normal RAM. */
2188 iotlb = pd & TARGET_PAGE_MASK;
2189 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2190 iotlb |= IO_MEM_NOTDIRTY;
2191 else
2192 iotlb |= IO_MEM_ROM;
2193 } else {
2194 /* IO handlers are currently passed a physical address.
2195 It would be nice to pass an offset from the base address
2196 of that region. This would avoid having to special case RAM,
2197 and avoid full address decoding in every device.
2198 We can't use the high bits of pd for this because
2199 IO_MEM_ROMD uses these as a ram address. */
2200 iotlb = (pd & ~TARGET_PAGE_MASK);
2201 if (p) {
2202 iotlb += p->region_offset;
2203 } else {
2204 iotlb += paddr;
2208 code_address = address;
2209 /* Make accesses to pages with watchpoints go via the
2210 watchpoint trap routines. */
2211 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2212 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2213 /* Avoid trapping reads of pages with a write breakpoint. */
2214 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2215 iotlb = io_mem_watch + paddr;
2216 address |= TLB_MMIO;
2217 break;
2222 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2223 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2224 te = &env->tlb_table[mmu_idx][index];
2225 te->addend = addend - vaddr;
2226 if (prot & PAGE_READ) {
2227 te->addr_read = address;
2228 } else {
2229 te->addr_read = -1;
2232 if (prot & PAGE_EXEC) {
2233 te->addr_code = code_address;
2234 } else {
2235 te->addr_code = -1;
2237 if (prot & PAGE_WRITE) {
2238 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2239 (pd & IO_MEM_ROMD)) {
2240 /* Write access calls the I/O callback. */
2241 te->addr_write = address | TLB_MMIO;
2242 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2243 !cpu_physical_memory_is_dirty(pd)) {
2244 te->addr_write = address | TLB_NOTDIRTY;
2245 } else {
2246 te->addr_write = address;
2248 } else {
2249 te->addr_write = -1;
2253 #else
2255 void tlb_flush(CPUState *env, int flush_global)
2259 void tlb_flush_page(CPUState *env, target_ulong addr)
2264 * Walks guest process memory "regions" one by one
2265 * and calls callback function 'fn' for each region.
2268 struct walk_memory_regions_data
2270 walk_memory_regions_fn fn;
2271 void *priv;
2272 unsigned long start;
2273 int prot;
2276 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2277 abi_ulong end, int new_prot)
2279 if (data->start != -1ul) {
2280 int rc = data->fn(data->priv, data->start, end, data->prot);
2281 if (rc != 0) {
2282 return rc;
2286 data->start = (new_prot ? end : -1ul);
2287 data->prot = new_prot;
2289 return 0;
2292 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2293 abi_ulong base, int level, void **lp)
2295 abi_ulong pa;
2296 int i, rc;
2298 if (*lp == NULL) {
2299 return walk_memory_regions_end(data, base, 0);
2302 if (level == 0) {
2303 PageDesc *pd = *lp;
2304 for (i = 0; i < L2_SIZE; ++i) {
2305 int prot = pd[i].flags;
2307 pa = base | (i << TARGET_PAGE_BITS);
2308 if (prot != data->prot) {
2309 rc = walk_memory_regions_end(data, pa, prot);
2310 if (rc != 0) {
2311 return rc;
2315 } else {
2316 void **pp = *lp;
2317 for (i = 0; i < L2_SIZE; ++i) {
2318 pa = base | ((abi_ulong)i <<
2319 (TARGET_PAGE_BITS + L2_BITS * level));
2320 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2321 if (rc != 0) {
2322 return rc;
2327 return 0;
2330 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2332 struct walk_memory_regions_data data;
2333 unsigned long i;
2335 data.fn = fn;
2336 data.priv = priv;
2337 data.start = -1ul;
2338 data.prot = 0;
2340 for (i = 0; i < V_L1_SIZE; i++) {
2341 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2342 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2343 if (rc != 0) {
2344 return rc;
2348 return walk_memory_regions_end(&data, 0, 0);
2351 static int dump_region(void *priv, abi_ulong start,
2352 abi_ulong end, unsigned long prot)
2354 FILE *f = (FILE *)priv;
2356 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2357 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2358 start, end, end - start,
2359 ((prot & PAGE_READ) ? 'r' : '-'),
2360 ((prot & PAGE_WRITE) ? 'w' : '-'),
2361 ((prot & PAGE_EXEC) ? 'x' : '-'));
2363 return (0);
2366 /* dump memory mappings */
2367 void page_dump(FILE *f)
2369 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2370 "start", "end", "size", "prot");
2371 walk_memory_regions(f, dump_region);
2374 int page_get_flags(target_ulong address)
2376 PageDesc *p;
2378 p = page_find(address >> TARGET_PAGE_BITS);
2379 if (!p)
2380 return 0;
2381 return p->flags;
2384 /* Modify the flags of a page and invalidate the code if necessary.
2385 The flag PAGE_WRITE_ORG is positioned automatically depending
2386 on PAGE_WRITE. The mmap_lock should already be held. */
2387 void page_set_flags(target_ulong start, target_ulong end, int flags)
2389 target_ulong addr, len;
2391 /* This function should never be called with addresses outside the
2392 guest address space. If this assert fires, it probably indicates
2393 a missing call to h2g_valid. */
2394 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2395 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2396 #endif
2397 assert(start < end);
2399 start = start & TARGET_PAGE_MASK;
2400 end = TARGET_PAGE_ALIGN(end);
2402 if (flags & PAGE_WRITE) {
2403 flags |= PAGE_WRITE_ORG;
2406 for (addr = start, len = end - start;
2407 len != 0;
2408 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2409 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2411 /* If the write protection bit is set, then we invalidate
2412 the code inside. */
2413 if (!(p->flags & PAGE_WRITE) &&
2414 (flags & PAGE_WRITE) &&
2415 p->first_tb) {
2416 tb_invalidate_phys_page(addr, 0, NULL);
2418 p->flags = flags;
2422 int page_check_range(target_ulong start, target_ulong len, int flags)
2424 PageDesc *p;
2425 target_ulong end;
2426 target_ulong addr;
2428 /* This function should never be called with addresses outside the
2429 guest address space. If this assert fires, it probably indicates
2430 a missing call to h2g_valid. */
2431 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2432 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2433 #endif
2435 if (len == 0) {
2436 return 0;
2438 if (start + len - 1 < start) {
2439 /* We've wrapped around. */
2440 return -1;
2443 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2444 start = start & TARGET_PAGE_MASK;
2446 for (addr = start, len = end - start;
2447 len != 0;
2448 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2449 p = page_find(addr >> TARGET_PAGE_BITS);
2450 if( !p )
2451 return -1;
2452 if( !(p->flags & PAGE_VALID) )
2453 return -1;
2455 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2456 return -1;
2457 if (flags & PAGE_WRITE) {
2458 if (!(p->flags & PAGE_WRITE_ORG))
2459 return -1;
2460 /* unprotect the page if it was put read-only because it
2461 contains translated code */
2462 if (!(p->flags & PAGE_WRITE)) {
2463 if (!page_unprotect(addr, 0, NULL))
2464 return -1;
2466 return 0;
2469 return 0;
2472 /* called from signal handler: invalidate the code and unprotect the
2473 page. Return TRUE if the fault was successfully handled. */
2474 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2476 unsigned int prot;
2477 PageDesc *p;
2478 target_ulong host_start, host_end, addr;
2480 /* Technically this isn't safe inside a signal handler. However we
2481 know this only ever happens in a synchronous SEGV handler, so in
2482 practice it seems to be ok. */
2483 mmap_lock();
2485 p = page_find(address >> TARGET_PAGE_BITS);
2486 if (!p) {
2487 mmap_unlock();
2488 return 0;
2491 /* if the page was really writable, then we change its
2492 protection back to writable */
2493 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2494 host_start = address & qemu_host_page_mask;
2495 host_end = host_start + qemu_host_page_size;
2497 prot = 0;
2498 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2499 p = page_find(addr >> TARGET_PAGE_BITS);
2500 p->flags |= PAGE_WRITE;
2501 prot |= p->flags;
2503 /* and since the content will be modified, we must invalidate
2504 the corresponding translated code. */
2505 tb_invalidate_phys_page(addr, pc, puc);
2506 #ifdef DEBUG_TB_CHECK
2507 tb_invalidate_check(addr);
2508 #endif
2510 mprotect((void *)g2h(host_start), qemu_host_page_size,
2511 prot & PAGE_BITS);
2513 mmap_unlock();
2514 return 1;
2516 mmap_unlock();
2517 return 0;
2520 static inline void tlb_set_dirty(CPUState *env,
2521 unsigned long addr, target_ulong vaddr)
2524 #endif /* defined(CONFIG_USER_ONLY) */
2526 #if !defined(CONFIG_USER_ONLY)
2528 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2529 typedef struct subpage_t {
2530 target_phys_addr_t base;
2531 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2532 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2533 } subpage_t;
2535 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2536 ram_addr_t memory, ram_addr_t region_offset);
2537 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2538 ram_addr_t orig_memory,
2539 ram_addr_t region_offset);
2540 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2541 need_subpage) \
2542 do { \
2543 if (addr > start_addr) \
2544 start_addr2 = 0; \
2545 else { \
2546 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2547 if (start_addr2 > 0) \
2548 need_subpage = 1; \
2551 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2552 end_addr2 = TARGET_PAGE_SIZE - 1; \
2553 else { \
2554 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2555 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2556 need_subpage = 1; \
2558 } while (0)
2560 /* register physical memory.
2561 For RAM, 'size' must be a multiple of the target page size.
2562 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2563 io memory page. The address used when calling the IO function is
2564 the offset from the start of the region, plus region_offset. Both
2565 start_addr and region_offset are rounded down to a page boundary
2566 before calculating this offset. This should not be a problem unless
2567 the low bits of start_addr and region_offset differ. */
2568 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2569 ram_addr_t size,
2570 ram_addr_t phys_offset,
2571 ram_addr_t region_offset)
2573 target_phys_addr_t addr, end_addr;
2574 PhysPageDesc *p;
2575 CPUState *env;
2576 ram_addr_t orig_size = size;
2577 subpage_t *subpage;
2579 cpu_notify_set_memory(start_addr, size, phys_offset);
2581 if (phys_offset == IO_MEM_UNASSIGNED) {
2582 region_offset = start_addr;
2584 region_offset &= TARGET_PAGE_MASK;
2585 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2586 end_addr = start_addr + (target_phys_addr_t)size;
2587 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2588 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2589 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2590 ram_addr_t orig_memory = p->phys_offset;
2591 target_phys_addr_t start_addr2, end_addr2;
2592 int need_subpage = 0;
2594 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2595 need_subpage);
2596 if (need_subpage) {
2597 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2598 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2599 &p->phys_offset, orig_memory,
2600 p->region_offset);
2601 } else {
2602 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2603 >> IO_MEM_SHIFT];
2605 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2606 region_offset);
2607 p->region_offset = 0;
2608 } else {
2609 p->phys_offset = phys_offset;
2610 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2611 (phys_offset & IO_MEM_ROMD))
2612 phys_offset += TARGET_PAGE_SIZE;
2614 } else {
2615 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2616 p->phys_offset = phys_offset;
2617 p->region_offset = region_offset;
2618 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2619 (phys_offset & IO_MEM_ROMD)) {
2620 phys_offset += TARGET_PAGE_SIZE;
2621 } else {
2622 target_phys_addr_t start_addr2, end_addr2;
2623 int need_subpage = 0;
2625 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2626 end_addr2, need_subpage);
2628 if (need_subpage) {
2629 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2630 &p->phys_offset, IO_MEM_UNASSIGNED,
2631 addr & TARGET_PAGE_MASK);
2632 subpage_register(subpage, start_addr2, end_addr2,
2633 phys_offset, region_offset);
2634 p->region_offset = 0;
2638 region_offset += TARGET_PAGE_SIZE;
2641 /* since each CPU stores ram addresses in its TLB cache, we must
2642 reset the modified entries */
2643 /* XXX: slow ! */
2644 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2645 tlb_flush(env, 1);
2649 /* XXX: temporary until new memory mapping API */
2650 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2652 PhysPageDesc *p;
2654 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2655 if (!p)
2656 return IO_MEM_UNASSIGNED;
2657 return p->phys_offset;
2660 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2662 if (kvm_enabled())
2663 kvm_coalesce_mmio_region(addr, size);
2666 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2668 if (kvm_enabled())
2669 kvm_uncoalesce_mmio_region(addr, size);
2672 void qemu_flush_coalesced_mmio_buffer(void)
2674 if (kvm_enabled())
2675 kvm_flush_coalesced_mmio_buffer();
2678 #if defined(__linux__) && !defined(TARGET_S390X)
2680 #include <sys/vfs.h>
2682 #define HUGETLBFS_MAGIC 0x958458f6
2684 static long gethugepagesize(const char *path)
2686 struct statfs fs;
2687 int ret;
2689 do {
2690 ret = statfs(path, &fs);
2691 } while (ret != 0 && errno == EINTR);
2693 if (ret != 0) {
2694 perror(path);
2695 return 0;
2698 if (fs.f_type != HUGETLBFS_MAGIC)
2699 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2701 return fs.f_bsize;
2704 static void *file_ram_alloc(ram_addr_t memory, const char *path)
2706 char *filename;
2707 void *area;
2708 int fd;
2709 #ifdef MAP_POPULATE
2710 int flags;
2711 #endif
2712 unsigned long hpagesize;
2714 hpagesize = gethugepagesize(path);
2715 if (!hpagesize) {
2716 return NULL;
2719 if (memory < hpagesize) {
2720 return NULL;
2723 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2724 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2725 return NULL;
2728 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2729 return NULL;
2732 fd = mkstemp(filename);
2733 if (fd < 0) {
2734 perror("unable to create backing store for hugepages");
2735 free(filename);
2736 return NULL;
2738 unlink(filename);
2739 free(filename);
2741 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2744 * ftruncate is not supported by hugetlbfs in older
2745 * hosts, so don't bother bailing out on errors.
2746 * If anything goes wrong with it under other filesystems,
2747 * mmap will fail.
2749 if (ftruncate(fd, memory))
2750 perror("ftruncate");
2752 #ifdef MAP_POPULATE
2753 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2754 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2755 * to sidestep this quirk.
2757 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2758 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2759 #else
2760 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2761 #endif
2762 if (area == MAP_FAILED) {
2763 perror("file_ram_alloc: can't mmap RAM pages");
2764 close(fd);
2765 return (NULL);
2767 return area;
2769 #endif
2771 static ram_addr_t find_ram_offset(ram_addr_t size)
2773 RAMBlock *block;
2774 ram_addr_t last = 0;
2776 QLIST_FOREACH(block, &ram_list.blocks, next)
2777 last = MAX(last, block->offset + block->length);
2779 return last;
2782 ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2784 RAMBlock *new_block, *block;
2786 size = TARGET_PAGE_ALIGN(size);
2787 new_block = qemu_mallocz(sizeof(*new_block));
2789 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2790 char *id = dev->parent_bus->info->get_dev_path(dev);
2791 if (id) {
2792 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2793 qemu_free(id);
2796 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2798 QLIST_FOREACH(block, &ram_list.blocks, next) {
2799 if (!strcmp(block->idstr, new_block->idstr)) {
2800 if (block->length == new_block->length) {
2801 fprintf(stderr, "RAMBlock \"%s\" exists, assuming lack of"
2802 "free.\n", new_block->idstr);
2803 qemu_free(new_block);
2804 return block->offset;
2805 } else {
2806 fprintf(stderr, "RAMBlock \"%s\" already registered with"
2807 "different size, abort\n", new_block->idstr);
2808 abort();
2813 if (mem_path) {
2814 #if defined (__linux__) && !defined(TARGET_S390X)
2815 new_block->host = file_ram_alloc(size, mem_path);
2816 if (!new_block->host) {
2817 new_block->host = qemu_vmalloc(size);
2818 #ifdef MADV_MERGEABLE
2819 madvise(new_block->host, size, MADV_MERGEABLE);
2820 #endif
2822 #else
2823 fprintf(stderr, "-mem-path option unsupported\n");
2824 exit(1);
2825 #endif
2826 } else {
2827 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2828 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2829 new_block->host = mmap((void*)0x1000000, size,
2830 PROT_EXEC|PROT_READ|PROT_WRITE,
2831 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2832 #else
2833 new_block->host = qemu_vmalloc(size);
2834 #endif
2835 #ifdef MADV_MERGEABLE
2836 madvise(new_block->host, size, MADV_MERGEABLE);
2837 #endif
2839 new_block->offset = find_ram_offset(size);
2840 new_block->length = size;
2842 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2844 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2845 (new_block->offset + size) >> TARGET_PAGE_BITS);
2846 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2847 0xff, size >> TARGET_PAGE_BITS);
2849 if (kvm_enabled())
2850 kvm_setup_guest_memory(new_block->host, size);
2852 return new_block->offset;
2855 void qemu_ram_free(ram_addr_t addr)
2857 /* TODO: implement this. */
2860 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2861 With the exception of the softmmu code in this file, this should
2862 only be used for local memory (e.g. video ram) that the device owns,
2863 and knows it isn't going to access beyond the end of the block.
2865 It should not be used for general purpose DMA.
2866 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2868 void *qemu_get_ram_ptr(ram_addr_t addr)
2870 RAMBlock *block;
2872 QLIST_FOREACH(block, &ram_list.blocks, next) {
2873 if (addr - block->offset < block->length) {
2874 QLIST_REMOVE(block, next);
2875 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2876 return block->host + (addr - block->offset);
2880 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2881 abort();
2883 return NULL;
2886 /* Some of the softmmu routines need to translate from a host pointer
2887 (typically a TLB entry) back to a ram offset. */
2888 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2890 RAMBlock *block;
2891 uint8_t *host = ptr;
2893 QLIST_FOREACH(block, &ram_list.blocks, next) {
2894 if (host - block->host < block->length) {
2895 return block->offset + (host - block->host);
2899 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2900 abort();
2902 return 0;
2905 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2907 #ifdef DEBUG_UNASSIGNED
2908 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2909 #endif
2910 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2911 do_unassigned_access(addr, 0, 0, 0, 1);
2912 #endif
2913 return 0;
2916 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2918 #ifdef DEBUG_UNASSIGNED
2919 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2920 #endif
2921 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2922 do_unassigned_access(addr, 0, 0, 0, 2);
2923 #endif
2924 return 0;
2927 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2929 #ifdef DEBUG_UNASSIGNED
2930 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2931 #endif
2932 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2933 do_unassigned_access(addr, 0, 0, 0, 4);
2934 #endif
2935 return 0;
2938 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2940 #ifdef DEBUG_UNASSIGNED
2941 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2942 #endif
2943 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2944 do_unassigned_access(addr, 1, 0, 0, 1);
2945 #endif
2948 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2950 #ifdef DEBUG_UNASSIGNED
2951 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2952 #endif
2953 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2954 do_unassigned_access(addr, 1, 0, 0, 2);
2955 #endif
2958 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2960 #ifdef DEBUG_UNASSIGNED
2961 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2962 #endif
2963 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2964 do_unassigned_access(addr, 1, 0, 0, 4);
2965 #endif
2968 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2969 unassigned_mem_readb,
2970 unassigned_mem_readw,
2971 unassigned_mem_readl,
2974 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2975 unassigned_mem_writeb,
2976 unassigned_mem_writew,
2977 unassigned_mem_writel,
2980 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2981 uint32_t val)
2983 int dirty_flags;
2984 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2985 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2986 #if !defined(CONFIG_USER_ONLY)
2987 tb_invalidate_phys_page_fast(ram_addr, 1);
2988 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2989 #endif
2991 stb_p(qemu_get_ram_ptr(ram_addr), val);
2992 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2993 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2994 /* we remove the notdirty callback only if the code has been
2995 flushed */
2996 if (dirty_flags == 0xff)
2997 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3000 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3001 uint32_t val)
3003 int dirty_flags;
3004 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3005 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3006 #if !defined(CONFIG_USER_ONLY)
3007 tb_invalidate_phys_page_fast(ram_addr, 2);
3008 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3009 #endif
3011 stw_p(qemu_get_ram_ptr(ram_addr), val);
3012 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3013 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3014 /* we remove the notdirty callback only if the code has been
3015 flushed */
3016 if (dirty_flags == 0xff)
3017 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3020 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3021 uint32_t val)
3023 int dirty_flags;
3024 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3025 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3026 #if !defined(CONFIG_USER_ONLY)
3027 tb_invalidate_phys_page_fast(ram_addr, 4);
3028 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3029 #endif
3031 stl_p(qemu_get_ram_ptr(ram_addr), val);
3032 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3033 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3034 /* we remove the notdirty callback only if the code has been
3035 flushed */
3036 if (dirty_flags == 0xff)
3037 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3040 static CPUReadMemoryFunc * const error_mem_read[3] = {
3041 NULL, /* never used */
3042 NULL, /* never used */
3043 NULL, /* never used */
3046 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3047 notdirty_mem_writeb,
3048 notdirty_mem_writew,
3049 notdirty_mem_writel,
3052 /* Generate a debug exception if a watchpoint has been hit. */
3053 static void check_watchpoint(int offset, int len_mask, int flags)
3055 CPUState *env = cpu_single_env;
3056 target_ulong pc, cs_base;
3057 TranslationBlock *tb;
3058 target_ulong vaddr;
3059 CPUWatchpoint *wp;
3060 int cpu_flags;
3062 if (env->watchpoint_hit) {
3063 /* We re-entered the check after replacing the TB. Now raise
3064 * the debug interrupt so that is will trigger after the
3065 * current instruction. */
3066 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3067 return;
3069 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3070 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3071 if ((vaddr == (wp->vaddr & len_mask) ||
3072 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3073 wp->flags |= BP_WATCHPOINT_HIT;
3074 if (!env->watchpoint_hit) {
3075 env->watchpoint_hit = wp;
3076 tb = tb_find_pc(env->mem_io_pc);
3077 if (!tb) {
3078 cpu_abort(env, "check_watchpoint: could not find TB for "
3079 "pc=%p", (void *)env->mem_io_pc);
3081 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3082 tb_phys_invalidate(tb, -1);
3083 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3084 env->exception_index = EXCP_DEBUG;
3085 } else {
3086 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3087 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3089 cpu_resume_from_signal(env, NULL);
3091 } else {
3092 wp->flags &= ~BP_WATCHPOINT_HIT;
3097 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3098 so these check for a hit then pass through to the normal out-of-line
3099 phys routines. */
3100 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3102 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3103 return ldub_phys(addr);
3106 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3108 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3109 return lduw_phys(addr);
3112 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3114 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3115 return ldl_phys(addr);
3118 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3119 uint32_t val)
3121 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3122 stb_phys(addr, val);
3125 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3126 uint32_t val)
3128 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3129 stw_phys(addr, val);
3132 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3133 uint32_t val)
3135 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3136 stl_phys(addr, val);
3139 static CPUReadMemoryFunc * const watch_mem_read[3] = {
3140 watch_mem_readb,
3141 watch_mem_readw,
3142 watch_mem_readl,
3145 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3146 watch_mem_writeb,
3147 watch_mem_writew,
3148 watch_mem_writel,
3151 static inline uint32_t subpage_readlen (subpage_t *mmio,
3152 target_phys_addr_t addr,
3153 unsigned int len)
3155 unsigned int idx = SUBPAGE_IDX(addr);
3156 #if defined(DEBUG_SUBPAGE)
3157 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3158 mmio, len, addr, idx);
3159 #endif
3161 addr += mmio->region_offset[idx];
3162 idx = mmio->sub_io_index[idx];
3163 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3166 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3167 uint32_t value, unsigned int len)
3169 unsigned int idx = SUBPAGE_IDX(addr);
3170 #if defined(DEBUG_SUBPAGE)
3171 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3172 __func__, mmio, len, addr, idx, value);
3173 #endif
3175 addr += mmio->region_offset[idx];
3176 idx = mmio->sub_io_index[idx];
3177 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3180 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3182 return subpage_readlen(opaque, addr, 0);
3185 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3186 uint32_t value)
3188 subpage_writelen(opaque, addr, value, 0);
3191 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3193 return subpage_readlen(opaque, addr, 1);
3196 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3197 uint32_t value)
3199 subpage_writelen(opaque, addr, value, 1);
3202 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3204 return subpage_readlen(opaque, addr, 2);
3207 static void subpage_writel (void *opaque, target_phys_addr_t addr,
3208 uint32_t value)
3210 subpage_writelen(opaque, addr, value, 2);
3213 static CPUReadMemoryFunc * const subpage_read[] = {
3214 &subpage_readb,
3215 &subpage_readw,
3216 &subpage_readl,
3219 static CPUWriteMemoryFunc * const subpage_write[] = {
3220 &subpage_writeb,
3221 &subpage_writew,
3222 &subpage_writel,
3225 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3226 ram_addr_t memory, ram_addr_t region_offset)
3228 int idx, eidx;
3230 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3231 return -1;
3232 idx = SUBPAGE_IDX(start);
3233 eidx = SUBPAGE_IDX(end);
3234 #if defined(DEBUG_SUBPAGE)
3235 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3236 mmio, start, end, idx, eidx, memory);
3237 #endif
3238 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3239 for (; idx <= eidx; idx++) {
3240 mmio->sub_io_index[idx] = memory;
3241 mmio->region_offset[idx] = region_offset;
3244 return 0;
3247 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3248 ram_addr_t orig_memory,
3249 ram_addr_t region_offset)
3251 subpage_t *mmio;
3252 int subpage_memory;
3254 mmio = qemu_mallocz(sizeof(subpage_t));
3256 mmio->base = base;
3257 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3258 #if defined(DEBUG_SUBPAGE)
3259 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3260 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3261 #endif
3262 *phys = subpage_memory | IO_MEM_SUBPAGE;
3263 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3265 return mmio;
3268 static int get_free_io_mem_idx(void)
3270 int i;
3272 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3273 if (!io_mem_used[i]) {
3274 io_mem_used[i] = 1;
3275 return i;
3277 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3278 return -1;
3281 /* mem_read and mem_write are arrays of functions containing the
3282 function to access byte (index 0), word (index 1) and dword (index
3283 2). Functions can be omitted with a NULL function pointer.
3284 If io_index is non zero, the corresponding io zone is
3285 modified. If it is zero, a new io zone is allocated. The return
3286 value can be used with cpu_register_physical_memory(). (-1) is
3287 returned if error. */
3288 static int cpu_register_io_memory_fixed(int io_index,
3289 CPUReadMemoryFunc * const *mem_read,
3290 CPUWriteMemoryFunc * const *mem_write,
3291 void *opaque)
3293 int i;
3295 if (io_index <= 0) {
3296 io_index = get_free_io_mem_idx();
3297 if (io_index == -1)
3298 return io_index;
3299 } else {
3300 io_index >>= IO_MEM_SHIFT;
3301 if (io_index >= IO_MEM_NB_ENTRIES)
3302 return -1;
3305 for (i = 0; i < 3; ++i) {
3306 io_mem_read[io_index][i]
3307 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3309 for (i = 0; i < 3; ++i) {
3310 io_mem_write[io_index][i]
3311 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3313 io_mem_opaque[io_index] = opaque;
3315 return (io_index << IO_MEM_SHIFT);
3318 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3319 CPUWriteMemoryFunc * const *mem_write,
3320 void *opaque)
3322 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3325 void cpu_unregister_io_memory(int io_table_address)
3327 int i;
3328 int io_index = io_table_address >> IO_MEM_SHIFT;
3330 for (i=0;i < 3; i++) {
3331 io_mem_read[io_index][i] = unassigned_mem_read[i];
3332 io_mem_write[io_index][i] = unassigned_mem_write[i];
3334 io_mem_opaque[io_index] = NULL;
3335 io_mem_used[io_index] = 0;
3338 static void io_mem_init(void)
3340 int i;
3342 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3343 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3344 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3345 for (i=0; i<5; i++)
3346 io_mem_used[i] = 1;
3348 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3349 watch_mem_write, NULL);
3352 #endif /* !defined(CONFIG_USER_ONLY) */
3354 /* physical memory access (slow version, mainly for debug) */
3355 #if defined(CONFIG_USER_ONLY)
3356 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3357 uint8_t *buf, int len, int is_write)
3359 int l, flags;
3360 target_ulong page;
3361 void * p;
3363 while (len > 0) {
3364 page = addr & TARGET_PAGE_MASK;
3365 l = (page + TARGET_PAGE_SIZE) - addr;
3366 if (l > len)
3367 l = len;
3368 flags = page_get_flags(page);
3369 if (!(flags & PAGE_VALID))
3370 return -1;
3371 if (is_write) {
3372 if (!(flags & PAGE_WRITE))
3373 return -1;
3374 /* XXX: this code should not depend on lock_user */
3375 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3376 return -1;
3377 memcpy(p, buf, l);
3378 unlock_user(p, addr, l);
3379 } else {
3380 if (!(flags & PAGE_READ))
3381 return -1;
3382 /* XXX: this code should not depend on lock_user */
3383 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3384 return -1;
3385 memcpy(buf, p, l);
3386 unlock_user(p, addr, 0);
3388 len -= l;
3389 buf += l;
3390 addr += l;
3392 return 0;
3395 #else
3396 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3397 int len, int is_write)
3399 int l, io_index;
3400 uint8_t *ptr;
3401 uint32_t val;
3402 target_phys_addr_t page;
3403 unsigned long pd;
3404 PhysPageDesc *p;
3406 while (len > 0) {
3407 page = addr & TARGET_PAGE_MASK;
3408 l = (page + TARGET_PAGE_SIZE) - addr;
3409 if (l > len)
3410 l = len;
3411 p = phys_page_find(page >> TARGET_PAGE_BITS);
3412 if (!p) {
3413 pd = IO_MEM_UNASSIGNED;
3414 } else {
3415 pd = p->phys_offset;
3418 if (is_write) {
3419 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3420 target_phys_addr_t addr1 = addr;
3421 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3422 if (p)
3423 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3424 /* XXX: could force cpu_single_env to NULL to avoid
3425 potential bugs */
3426 if (l >= 4 && ((addr1 & 3) == 0)) {
3427 /* 32 bit write access */
3428 val = ldl_p(buf);
3429 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3430 l = 4;
3431 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3432 /* 16 bit write access */
3433 val = lduw_p(buf);
3434 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3435 l = 2;
3436 } else {
3437 /* 8 bit write access */
3438 val = ldub_p(buf);
3439 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3440 l = 1;
3442 } else {
3443 unsigned long addr1;
3444 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3445 /* RAM case */
3446 ptr = qemu_get_ram_ptr(addr1);
3447 memcpy(ptr, buf, l);
3448 if (!cpu_physical_memory_is_dirty(addr1)) {
3449 /* invalidate code */
3450 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3451 /* set dirty bit */
3452 cpu_physical_memory_set_dirty_flags(
3453 addr1, (0xff & ~CODE_DIRTY_FLAG));
3456 } else {
3457 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3458 !(pd & IO_MEM_ROMD)) {
3459 target_phys_addr_t addr1 = addr;
3460 /* I/O case */
3461 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3462 if (p)
3463 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3464 if (l >= 4 && ((addr1 & 3) == 0)) {
3465 /* 32 bit read access */
3466 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3467 stl_p(buf, val);
3468 l = 4;
3469 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3470 /* 16 bit read access */
3471 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3472 stw_p(buf, val);
3473 l = 2;
3474 } else {
3475 /* 8 bit read access */
3476 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3477 stb_p(buf, val);
3478 l = 1;
3480 } else {
3481 /* RAM case */
3482 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3483 (addr & ~TARGET_PAGE_MASK);
3484 memcpy(buf, ptr, l);
3487 len -= l;
3488 buf += l;
3489 addr += l;
3493 /* used for ROM loading : can write in RAM and ROM */
3494 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3495 const uint8_t *buf, int len)
3497 int l;
3498 uint8_t *ptr;
3499 target_phys_addr_t page;
3500 unsigned long pd;
3501 PhysPageDesc *p;
3503 while (len > 0) {
3504 page = addr & TARGET_PAGE_MASK;
3505 l = (page + TARGET_PAGE_SIZE) - addr;
3506 if (l > len)
3507 l = len;
3508 p = phys_page_find(page >> TARGET_PAGE_BITS);
3509 if (!p) {
3510 pd = IO_MEM_UNASSIGNED;
3511 } else {
3512 pd = p->phys_offset;
3515 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3516 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3517 !(pd & IO_MEM_ROMD)) {
3518 /* do nothing */
3519 } else {
3520 unsigned long addr1;
3521 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3522 /* ROM/RAM case */
3523 ptr = qemu_get_ram_ptr(addr1);
3524 memcpy(ptr, buf, l);
3526 len -= l;
3527 buf += l;
3528 addr += l;
3532 typedef struct {
3533 void *buffer;
3534 target_phys_addr_t addr;
3535 target_phys_addr_t len;
3536 } BounceBuffer;
3538 static BounceBuffer bounce;
3540 typedef struct MapClient {
3541 void *opaque;
3542 void (*callback)(void *opaque);
3543 QLIST_ENTRY(MapClient) link;
3544 } MapClient;
3546 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3547 = QLIST_HEAD_INITIALIZER(map_client_list);
3549 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3551 MapClient *client = qemu_malloc(sizeof(*client));
3553 client->opaque = opaque;
3554 client->callback = callback;
3555 QLIST_INSERT_HEAD(&map_client_list, client, link);
3556 return client;
3559 void cpu_unregister_map_client(void *_client)
3561 MapClient *client = (MapClient *)_client;
3563 QLIST_REMOVE(client, link);
3564 qemu_free(client);
3567 static void cpu_notify_map_clients(void)
3569 MapClient *client;
3571 while (!QLIST_EMPTY(&map_client_list)) {
3572 client = QLIST_FIRST(&map_client_list);
3573 client->callback(client->opaque);
3574 cpu_unregister_map_client(client);
3578 /* Map a physical memory region into a host virtual address.
3579 * May map a subset of the requested range, given by and returned in *plen.
3580 * May return NULL if resources needed to perform the mapping are exhausted.
3581 * Use only for reads OR writes - not for read-modify-write operations.
3582 * Use cpu_register_map_client() to know when retrying the map operation is
3583 * likely to succeed.
3585 void *cpu_physical_memory_map(target_phys_addr_t addr,
3586 target_phys_addr_t *plen,
3587 int is_write)
3589 target_phys_addr_t len = *plen;
3590 target_phys_addr_t done = 0;
3591 int l;
3592 uint8_t *ret = NULL;
3593 uint8_t *ptr;
3594 target_phys_addr_t page;
3595 unsigned long pd;
3596 PhysPageDesc *p;
3597 unsigned long addr1;
3599 while (len > 0) {
3600 page = addr & TARGET_PAGE_MASK;
3601 l = (page + TARGET_PAGE_SIZE) - addr;
3602 if (l > len)
3603 l = len;
3604 p = phys_page_find(page >> TARGET_PAGE_BITS);
3605 if (!p) {
3606 pd = IO_MEM_UNASSIGNED;
3607 } else {
3608 pd = p->phys_offset;
3611 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3612 if (done || bounce.buffer) {
3613 break;
3615 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3616 bounce.addr = addr;
3617 bounce.len = l;
3618 if (!is_write) {
3619 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3621 ptr = bounce.buffer;
3622 } else {
3623 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3624 ptr = qemu_get_ram_ptr(addr1);
3626 if (!done) {
3627 ret = ptr;
3628 } else if (ret + done != ptr) {
3629 break;
3632 len -= l;
3633 addr += l;
3634 done += l;
3636 *plen = done;
3637 return ret;
3640 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3641 * Will also mark the memory as dirty if is_write == 1. access_len gives
3642 * the amount of memory that was actually read or written by the caller.
3644 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3645 int is_write, target_phys_addr_t access_len)
3647 if (buffer != bounce.buffer) {
3648 if (is_write) {
3649 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3650 while (access_len) {
3651 unsigned l;
3652 l = TARGET_PAGE_SIZE;
3653 if (l > access_len)
3654 l = access_len;
3655 if (!cpu_physical_memory_is_dirty(addr1)) {
3656 /* invalidate code */
3657 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3658 /* set dirty bit */
3659 cpu_physical_memory_set_dirty_flags(
3660 addr1, (0xff & ~CODE_DIRTY_FLAG));
3662 addr1 += l;
3663 access_len -= l;
3666 return;
3668 if (is_write) {
3669 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3671 qemu_vfree(bounce.buffer);
3672 bounce.buffer = NULL;
3673 cpu_notify_map_clients();
3676 /* warning: addr must be aligned */
3677 uint32_t ldl_phys(target_phys_addr_t addr)
3679 int io_index;
3680 uint8_t *ptr;
3681 uint32_t val;
3682 unsigned long pd;
3683 PhysPageDesc *p;
3685 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3686 if (!p) {
3687 pd = IO_MEM_UNASSIGNED;
3688 } else {
3689 pd = p->phys_offset;
3692 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3693 !(pd & IO_MEM_ROMD)) {
3694 /* I/O case */
3695 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3696 if (p)
3697 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3698 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3699 } else {
3700 /* RAM case */
3701 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3702 (addr & ~TARGET_PAGE_MASK);
3703 val = ldl_p(ptr);
3705 return val;
3708 /* warning: addr must be aligned */
3709 uint64_t ldq_phys(target_phys_addr_t addr)
3711 int io_index;
3712 uint8_t *ptr;
3713 uint64_t val;
3714 unsigned long pd;
3715 PhysPageDesc *p;
3717 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3718 if (!p) {
3719 pd = IO_MEM_UNASSIGNED;
3720 } else {
3721 pd = p->phys_offset;
3724 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3725 !(pd & IO_MEM_ROMD)) {
3726 /* I/O case */
3727 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3728 if (p)
3729 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3730 #ifdef TARGET_WORDS_BIGENDIAN
3731 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3732 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3733 #else
3734 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3735 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3736 #endif
3737 } else {
3738 /* RAM case */
3739 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3740 (addr & ~TARGET_PAGE_MASK);
3741 val = ldq_p(ptr);
3743 return val;
3746 /* XXX: optimize */
3747 uint32_t ldub_phys(target_phys_addr_t addr)
3749 uint8_t val;
3750 cpu_physical_memory_read(addr, &val, 1);
3751 return val;
3754 /* warning: addr must be aligned */
3755 uint32_t lduw_phys(target_phys_addr_t addr)
3757 int io_index;
3758 uint8_t *ptr;
3759 uint64_t val;
3760 unsigned long pd;
3761 PhysPageDesc *p;
3763 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3764 if (!p) {
3765 pd = IO_MEM_UNASSIGNED;
3766 } else {
3767 pd = p->phys_offset;
3770 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3771 !(pd & IO_MEM_ROMD)) {
3772 /* I/O case */
3773 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3774 if (p)
3775 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3776 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3777 } else {
3778 /* RAM case */
3779 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3780 (addr & ~TARGET_PAGE_MASK);
3781 val = lduw_p(ptr);
3783 return val;
3786 /* warning: addr must be aligned. The ram page is not masked as dirty
3787 and the code inside is not invalidated. It is useful if the dirty
3788 bits are used to track modified PTEs */
3789 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3791 int io_index;
3792 uint8_t *ptr;
3793 unsigned long pd;
3794 PhysPageDesc *p;
3796 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3797 if (!p) {
3798 pd = IO_MEM_UNASSIGNED;
3799 } else {
3800 pd = p->phys_offset;
3803 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3804 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3805 if (p)
3806 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3807 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3808 } else {
3809 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3810 ptr = qemu_get_ram_ptr(addr1);
3811 stl_p(ptr, val);
3813 if (unlikely(in_migration)) {
3814 if (!cpu_physical_memory_is_dirty(addr1)) {
3815 /* invalidate code */
3816 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3817 /* set dirty bit */
3818 cpu_physical_memory_set_dirty_flags(
3819 addr1, (0xff & ~CODE_DIRTY_FLAG));
3825 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3827 int io_index;
3828 uint8_t *ptr;
3829 unsigned long pd;
3830 PhysPageDesc *p;
3832 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3833 if (!p) {
3834 pd = IO_MEM_UNASSIGNED;
3835 } else {
3836 pd = p->phys_offset;
3839 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3840 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3841 if (p)
3842 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3843 #ifdef TARGET_WORDS_BIGENDIAN
3844 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3845 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3846 #else
3847 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3848 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3849 #endif
3850 } else {
3851 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3852 (addr & ~TARGET_PAGE_MASK);
3853 stq_p(ptr, val);
3857 /* warning: addr must be aligned */
3858 void stl_phys(target_phys_addr_t addr, uint32_t val)
3860 int io_index;
3861 uint8_t *ptr;
3862 unsigned long pd;
3863 PhysPageDesc *p;
3865 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3866 if (!p) {
3867 pd = IO_MEM_UNASSIGNED;
3868 } else {
3869 pd = p->phys_offset;
3872 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3873 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3874 if (p)
3875 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3876 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3877 } else {
3878 unsigned long addr1;
3879 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3880 /* RAM case */
3881 ptr = qemu_get_ram_ptr(addr1);
3882 stl_p(ptr, val);
3883 if (!cpu_physical_memory_is_dirty(addr1)) {
3884 /* invalidate code */
3885 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3886 /* set dirty bit */
3887 cpu_physical_memory_set_dirty_flags(addr1,
3888 (0xff & ~CODE_DIRTY_FLAG));
3893 /* XXX: optimize */
3894 void stb_phys(target_phys_addr_t addr, uint32_t val)
3896 uint8_t v = val;
3897 cpu_physical_memory_write(addr, &v, 1);
3900 /* warning: addr must be aligned */
3901 void stw_phys(target_phys_addr_t addr, uint32_t val)
3903 int io_index;
3904 uint8_t *ptr;
3905 unsigned long pd;
3906 PhysPageDesc *p;
3908 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3909 if (!p) {
3910 pd = IO_MEM_UNASSIGNED;
3911 } else {
3912 pd = p->phys_offset;
3915 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3916 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3917 if (p)
3918 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3919 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3920 } else {
3921 unsigned long addr1;
3922 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3923 /* RAM case */
3924 ptr = qemu_get_ram_ptr(addr1);
3925 stw_p(ptr, val);
3926 if (!cpu_physical_memory_is_dirty(addr1)) {
3927 /* invalidate code */
3928 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3929 /* set dirty bit */
3930 cpu_physical_memory_set_dirty_flags(addr1,
3931 (0xff & ~CODE_DIRTY_FLAG));
3936 /* XXX: optimize */
3937 void stq_phys(target_phys_addr_t addr, uint64_t val)
3939 val = tswap64(val);
3940 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3943 /* virtual memory access for debug (includes writing to ROM) */
3944 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3945 uint8_t *buf, int len, int is_write)
3947 int l;
3948 target_phys_addr_t phys_addr;
3949 target_ulong page;
3951 while (len > 0) {
3952 page = addr & TARGET_PAGE_MASK;
3953 phys_addr = cpu_get_phys_page_debug(env, page);
3954 /* if no physical page mapped, return an error */
3955 if (phys_addr == -1)
3956 return -1;
3957 l = (page + TARGET_PAGE_SIZE) - addr;
3958 if (l > len)
3959 l = len;
3960 phys_addr += (addr & ~TARGET_PAGE_MASK);
3961 if (is_write)
3962 cpu_physical_memory_write_rom(phys_addr, buf, l);
3963 else
3964 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3965 len -= l;
3966 buf += l;
3967 addr += l;
3969 return 0;
3971 #endif
3973 /* in deterministic execution mode, instructions doing device I/Os
3974 must be at the end of the TB */
3975 void cpu_io_recompile(CPUState *env, void *retaddr)
3977 TranslationBlock *tb;
3978 uint32_t n, cflags;
3979 target_ulong pc, cs_base;
3980 uint64_t flags;
3982 tb = tb_find_pc((unsigned long)retaddr);
3983 if (!tb) {
3984 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3985 retaddr);
3987 n = env->icount_decr.u16.low + tb->icount;
3988 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3989 /* Calculate how many instructions had been executed before the fault
3990 occurred. */
3991 n = n - env->icount_decr.u16.low;
3992 /* Generate a new TB ending on the I/O insn. */
3993 n++;
3994 /* On MIPS and SH, delay slot instructions can only be restarted if
3995 they were already the first instruction in the TB. If this is not
3996 the first instruction in a TB then re-execute the preceding
3997 branch. */
3998 #if defined(TARGET_MIPS)
3999 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4000 env->active_tc.PC -= 4;
4001 env->icount_decr.u16.low++;
4002 env->hflags &= ~MIPS_HFLAG_BMASK;
4004 #elif defined(TARGET_SH4)
4005 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4006 && n > 1) {
4007 env->pc -= 2;
4008 env->icount_decr.u16.low++;
4009 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4011 #endif
4012 /* This should never happen. */
4013 if (n > CF_COUNT_MASK)
4014 cpu_abort(env, "TB too big during recompile");
4016 cflags = n | CF_LAST_IO;
4017 pc = tb->pc;
4018 cs_base = tb->cs_base;
4019 flags = tb->flags;
4020 tb_phys_invalidate(tb, -1);
4021 /* FIXME: In theory this could raise an exception. In practice
4022 we have already translated the block once so it's probably ok. */
4023 tb_gen_code(env, pc, cs_base, flags, cflags);
4024 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4025 the first in the TB) then we end up generating a whole new TB and
4026 repeating the fault, which is horribly inefficient.
4027 Better would be to execute just this insn uncached, or generate a
4028 second new TB. */
4029 cpu_resume_from_signal(env, NULL);
4032 #if !defined(CONFIG_USER_ONLY)
4034 void dump_exec_info(FILE *f,
4035 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4037 int i, target_code_size, max_target_code_size;
4038 int direct_jmp_count, direct_jmp2_count, cross_page;
4039 TranslationBlock *tb;
4041 target_code_size = 0;
4042 max_target_code_size = 0;
4043 cross_page = 0;
4044 direct_jmp_count = 0;
4045 direct_jmp2_count = 0;
4046 for(i = 0; i < nb_tbs; i++) {
4047 tb = &tbs[i];
4048 target_code_size += tb->size;
4049 if (tb->size > max_target_code_size)
4050 max_target_code_size = tb->size;
4051 if (tb->page_addr[1] != -1)
4052 cross_page++;
4053 if (tb->tb_next_offset[0] != 0xffff) {
4054 direct_jmp_count++;
4055 if (tb->tb_next_offset[1] != 0xffff) {
4056 direct_jmp2_count++;
4060 /* XXX: avoid using doubles ? */
4061 cpu_fprintf(f, "Translation buffer state:\n");
4062 cpu_fprintf(f, "gen code size %ld/%ld\n",
4063 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4064 cpu_fprintf(f, "TB count %d/%d\n",
4065 nb_tbs, code_gen_max_blocks);
4066 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4067 nb_tbs ? target_code_size / nb_tbs : 0,
4068 max_target_code_size);
4069 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4070 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4071 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4072 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4073 cross_page,
4074 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4075 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4076 direct_jmp_count,
4077 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4078 direct_jmp2_count,
4079 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4080 cpu_fprintf(f, "\nStatistics:\n");
4081 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4082 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4083 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4084 tcg_dump_info(f, cpu_fprintf);
4087 #define MMUSUFFIX _cmmu
4088 #define GETPC() NULL
4089 #define env cpu_single_env
4090 #define SOFTMMU_CODE_ACCESS
4092 #define SHIFT 0
4093 #include "softmmu_template.h"
4095 #define SHIFT 1
4096 #include "softmmu_template.h"
4098 #define SHIFT 2
4099 #include "softmmu_template.h"
4101 #define SHIFT 3
4102 #include "softmmu_template.h"
4104 #undef env
4106 #endif