Merge commit '0be71e324f774a77243f1a1487f468232d69542b' into upstream-merge
[qemu-kvm/stefanha.git] / exec.c
blob4e166c4a402ff7f96bfd197be056683f3f4adef9
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <unistd.h>
32 #include <inttypes.h>
34 #include "cpu.h"
35 #include "exec-all.h"
36 #include "qemu-common.h"
37 #include "cache-utils.h"
39 #if !defined(TARGET_IA64)
40 #include "tcg.h"
41 #endif
42 #include "qemu-kvm.h"
44 #include "hw/hw.h"
45 #include "osdep.h"
46 #include "kvm.h"
47 #include "qemu-timer.h"
48 #if defined(CONFIG_USER_ONLY)
49 #include <qemu.h>
50 #include <signal.h>
51 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
52 #include <sys/param.h>
53 #if __FreeBSD_version >= 700104
54 #define HAVE_KINFO_GETVMMAP
55 #define sigqueue sigqueue_freebsd /* avoid redefinition */
56 #include <sys/time.h>
57 #include <sys/proc.h>
58 #include <machine/profile.h>
59 #define _KERNEL
60 #include <sys/user.h>
61 #undef _KERNEL
62 #undef sigqueue
63 #include <libutil.h>
64 #endif
65 #endif
66 #endif
68 //#define DEBUG_TB_INVALIDATE
69 //#define DEBUG_FLUSH
70 //#define DEBUG_TLB
71 //#define DEBUG_UNASSIGNED
73 /* make various TB consistency checks */
74 //#define DEBUG_TB_CHECK
75 //#define DEBUG_TLB_CHECK
77 //#define DEBUG_IOPORT
78 //#define DEBUG_SUBPAGE
80 #if !defined(CONFIG_USER_ONLY)
81 /* TB consistency checks only implemented for usermode emulation. */
82 #undef DEBUG_TB_CHECK
83 #endif
85 #define SMC_BITMAP_USE_THRESHOLD 10
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 static int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #elif defined(_WIN32)
102 /* Maximum alignment for Win32 is 16. */
103 #define code_gen_section \
104 __attribute__((aligned (16)))
105 #else
106 #define code_gen_section \
107 __attribute__((aligned (32)))
108 #endif
110 uint8_t code_gen_prologue[1024] code_gen_section;
111 static uint8_t *code_gen_buffer;
112 static unsigned long code_gen_buffer_size;
113 /* threshold to flush the translated code buffer */
114 static unsigned long code_gen_buffer_max_size;
115 uint8_t *code_gen_ptr;
117 #if !defined(CONFIG_USER_ONLY)
118 int phys_ram_fd;
119 static int in_migration;
121 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
122 #endif
124 CPUState *first_cpu;
125 /* current CPU in the current thread. It is only valid inside
126 cpu_exec() */
127 CPUState *cpu_single_env;
128 /* 0 = Do not count executed instructions.
129 1 = Precise instruction counting.
130 2 = Adaptive rate instruction counting. */
131 int use_icount = 0;
132 /* Current instruction counter. While executing translated code this may
133 include some instructions that have not yet been executed. */
134 int64_t qemu_icount;
136 typedef struct PageDesc {
137 /* list of TBs intersecting this ram page */
138 TranslationBlock *first_tb;
139 /* in order to optimize self modifying code, we count the number
140 of lookups we do to a given page to use a bitmap */
141 unsigned int code_write_count;
142 uint8_t *code_bitmap;
143 #if defined(CONFIG_USER_ONLY)
144 unsigned long flags;
145 #endif
146 } PageDesc;
148 /* In system mode we want L1_MAP to be based on ram offsets,
149 while in user mode we want it to be based on virtual addresses. */
150 #if !defined(CONFIG_USER_ONLY)
151 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
152 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
153 #else
154 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
155 #endif
156 #else
157 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
158 #endif
160 /* Size of the L2 (and L3, etc) page tables. */
161 #define L2_BITS 10
162 #define L2_SIZE (1 << L2_BITS)
164 /* The bits remaining after N lower levels of page tables. */
165 #define P_L1_BITS_REM \
166 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
167 #define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
170 /* Size of the L1 page table. Avoid silly small sizes. */
171 #if P_L1_BITS_REM < 4
172 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
173 #else
174 #define P_L1_BITS P_L1_BITS_REM
175 #endif
177 #if V_L1_BITS_REM < 4
178 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
179 #else
180 #define V_L1_BITS V_L1_BITS_REM
181 #endif
183 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
184 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
186 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
187 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
189 unsigned long qemu_real_host_page_size;
190 unsigned long qemu_host_page_bits;
191 unsigned long qemu_host_page_size;
192 unsigned long qemu_host_page_mask;
194 /* This is a multi-level map on the virtual address space.
195 The bottom level has pointers to PageDesc. */
196 static void *l1_map[V_L1_SIZE];
198 #if !defined(CONFIG_USER_ONLY)
199 typedef struct PhysPageDesc {
200 /* offset in host memory of the page + io_index in the low bits */
201 ram_addr_t phys_offset;
202 ram_addr_t region_offset;
203 } PhysPageDesc;
205 /* This is a multi-level map on the physical address space.
206 The bottom level has pointers to PhysPageDesc. */
207 static void *l1_phys_map[P_L1_SIZE];
209 static void io_mem_init(void);
211 /* io memory support */
212 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
213 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
214 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
215 static char io_mem_used[IO_MEM_NB_ENTRIES];
216 static int io_mem_watch;
217 #endif
219 /* log support */
220 #ifdef WIN32
221 static const char *logfilename = "qemu.log";
222 #else
223 static const char *logfilename = "/tmp/qemu.log";
224 #endif
225 FILE *logfile;
226 int loglevel;
227 static int log_append = 0;
229 /* statistics */
230 #if !defined(CONFIG_USER_ONLY)
231 static int tlb_flush_count;
232 #endif
233 static int tb_flush_count;
234 static int tb_phys_invalidate_count;
236 #ifdef _WIN32
237 static void map_exec(void *addr, long size)
239 DWORD old_protect;
240 VirtualProtect(addr, size,
241 PAGE_EXECUTE_READWRITE, &old_protect);
244 #else
245 static void map_exec(void *addr, long size)
247 unsigned long start, end, page_size;
249 page_size = getpagesize();
250 start = (unsigned long)addr;
251 start &= ~(page_size - 1);
253 end = (unsigned long)addr + size;
254 end += page_size - 1;
255 end &= ~(page_size - 1);
257 mprotect((void *)start, end - start,
258 PROT_READ | PROT_WRITE | PROT_EXEC);
260 #endif
262 static void page_init(void)
264 /* NOTE: we can always suppose that qemu_host_page_size >=
265 TARGET_PAGE_SIZE */
266 #ifdef _WIN32
268 SYSTEM_INFO system_info;
270 GetSystemInfo(&system_info);
271 qemu_real_host_page_size = system_info.dwPageSize;
273 #else
274 qemu_real_host_page_size = getpagesize();
275 #endif
276 if (qemu_host_page_size == 0)
277 qemu_host_page_size = qemu_real_host_page_size;
278 if (qemu_host_page_size < TARGET_PAGE_SIZE)
279 qemu_host_page_size = TARGET_PAGE_SIZE;
280 qemu_host_page_bits = 0;
281 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
282 qemu_host_page_bits++;
283 qemu_host_page_mask = ~(qemu_host_page_size - 1);
285 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
287 #ifdef HAVE_KINFO_GETVMMAP
288 struct kinfo_vmentry *freep;
289 int i, cnt;
291 freep = kinfo_getvmmap(getpid(), &cnt);
292 if (freep) {
293 mmap_lock();
294 for (i = 0; i < cnt; i++) {
295 unsigned long startaddr, endaddr;
297 startaddr = freep[i].kve_start;
298 endaddr = freep[i].kve_end;
299 if (h2g_valid(startaddr)) {
300 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
302 if (h2g_valid(endaddr)) {
303 endaddr = h2g(endaddr);
304 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
305 } else {
306 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
307 endaddr = ~0ul;
308 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
309 #endif
313 free(freep);
314 mmap_unlock();
316 #else
317 FILE *f;
319 last_brk = (unsigned long)sbrk(0);
321 f = fopen("/compat/linux/proc/self/maps", "r");
322 if (f) {
323 mmap_lock();
325 do {
326 unsigned long startaddr, endaddr;
327 int n;
329 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
331 if (n == 2 && h2g_valid(startaddr)) {
332 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
334 if (h2g_valid(endaddr)) {
335 endaddr = h2g(endaddr);
336 } else {
337 endaddr = ~0ul;
339 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
341 } while (!feof(f));
343 fclose(f);
344 mmap_unlock();
346 #endif
348 #endif
351 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
353 PageDesc *pd;
354 void **lp;
355 int i;
357 #if defined(CONFIG_USER_ONLY)
358 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
359 # define ALLOC(P, SIZE) \
360 do { \
361 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
362 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
363 } while (0)
364 #else
365 # define ALLOC(P, SIZE) \
366 do { P = qemu_mallocz(SIZE); } while (0)
367 #endif
369 /* Level 1. Always allocated. */
370 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
372 /* Level 2..N-1. */
373 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
374 void **p = *lp;
376 if (p == NULL) {
377 if (!alloc) {
378 return NULL;
380 ALLOC(p, sizeof(void *) * L2_SIZE);
381 *lp = p;
384 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
387 pd = *lp;
388 if (pd == NULL) {
389 if (!alloc) {
390 return NULL;
392 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
393 *lp = pd;
396 #undef ALLOC
398 return pd + (index & (L2_SIZE - 1));
401 static inline PageDesc *page_find(tb_page_addr_t index)
403 return page_find_alloc(index, 0);
406 #if !defined(CONFIG_USER_ONLY)
407 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
409 PhysPageDesc *pd;
410 void **lp;
411 int i;
413 /* Level 1. Always allocated. */
414 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
416 /* Level 2..N-1. */
417 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
418 void **p = *lp;
419 if (p == NULL) {
420 if (!alloc) {
421 return NULL;
423 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
425 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
428 pd = *lp;
429 if (pd == NULL) {
430 int i;
432 if (!alloc) {
433 return NULL;
436 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
438 for (i = 0; i < L2_SIZE; i++) {
439 pd[i].phys_offset = IO_MEM_UNASSIGNED;
440 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
444 return pd + (index & (L2_SIZE - 1));
447 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
449 return phys_page_find_alloc(index, 0);
452 static void tlb_protect_code(ram_addr_t ram_addr);
453 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
454 target_ulong vaddr);
455 #define mmap_lock() do { } while(0)
456 #define mmap_unlock() do { } while(0)
457 #endif
459 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
461 #if defined(CONFIG_USER_ONLY)
462 /* Currently it is not recommended to allocate big chunks of data in
463 user mode. It will change when a dedicated libc will be used */
464 #define USE_STATIC_CODE_GEN_BUFFER
465 #endif
467 #ifdef USE_STATIC_CODE_GEN_BUFFER
468 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
469 __attribute__((aligned (CODE_GEN_ALIGN)));
470 #endif
472 static void code_gen_alloc(unsigned long tb_size)
474 if (kvm_enabled())
475 return;
477 #ifdef USE_STATIC_CODE_GEN_BUFFER
478 code_gen_buffer = static_code_gen_buffer;
479 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
480 map_exec(code_gen_buffer, code_gen_buffer_size);
481 #else
482 code_gen_buffer_size = tb_size;
483 if (code_gen_buffer_size == 0) {
484 #if defined(CONFIG_USER_ONLY)
485 /* in user mode, phys_ram_size is not meaningful */
486 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
487 #else
488 /* XXX: needs adjustments */
489 code_gen_buffer_size = (unsigned long)(ram_size / 4);
490 #endif
492 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
493 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
494 /* The code gen buffer location may have constraints depending on
495 the host cpu and OS */
496 #if defined(__linux__)
498 int flags;
499 void *start = NULL;
501 flags = MAP_PRIVATE | MAP_ANONYMOUS;
502 #if defined(__x86_64__)
503 flags |= MAP_32BIT;
504 /* Cannot map more than that */
505 if (code_gen_buffer_size > (800 * 1024 * 1024))
506 code_gen_buffer_size = (800 * 1024 * 1024);
507 #elif defined(__sparc_v9__)
508 // Map the buffer below 2G, so we can use direct calls and branches
509 flags |= MAP_FIXED;
510 start = (void *) 0x60000000UL;
511 if (code_gen_buffer_size > (512 * 1024 * 1024))
512 code_gen_buffer_size = (512 * 1024 * 1024);
513 #elif defined(__arm__)
514 /* Map the buffer below 32M, so we can use direct calls and branches */
515 flags |= MAP_FIXED;
516 start = (void *) 0x01000000UL;
517 if (code_gen_buffer_size > 16 * 1024 * 1024)
518 code_gen_buffer_size = 16 * 1024 * 1024;
519 #elif defined(__s390x__)
520 /* Map the buffer so that we can use direct calls and branches. */
521 /* We have a +- 4GB range on the branches; leave some slop. */
522 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
523 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
525 start = (void *)0x90000000UL;
526 #endif
527 code_gen_buffer = mmap(start, code_gen_buffer_size,
528 PROT_WRITE | PROT_READ | PROT_EXEC,
529 flags, -1, 0);
530 if (code_gen_buffer == MAP_FAILED) {
531 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
532 exit(1);
535 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
537 int flags;
538 void *addr = NULL;
539 flags = MAP_PRIVATE | MAP_ANONYMOUS;
540 #if defined(__x86_64__)
541 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542 * 0x40000000 is free */
543 flags |= MAP_FIXED;
544 addr = (void *)0x40000000;
545 /* Cannot map more than that */
546 if (code_gen_buffer_size > (800 * 1024 * 1024))
547 code_gen_buffer_size = (800 * 1024 * 1024);
548 #endif
549 code_gen_buffer = mmap(addr, code_gen_buffer_size,
550 PROT_WRITE | PROT_READ | PROT_EXEC,
551 flags, -1, 0);
552 if (code_gen_buffer == MAP_FAILED) {
553 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
554 exit(1);
557 #else
558 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
559 map_exec(code_gen_buffer, code_gen_buffer_size);
560 #endif
561 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
562 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
563 code_gen_buffer_max_size = code_gen_buffer_size -
564 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
565 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
566 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
569 /* Must be called before using the QEMU cpus. 'tb_size' is the size
570 (in bytes) allocated to the translation buffer. Zero means default
571 size. */
572 void cpu_exec_init_all(unsigned long tb_size)
574 cpu_gen_init();
575 code_gen_alloc(tb_size);
576 code_gen_ptr = code_gen_buffer;
577 page_init();
578 #if !defined(CONFIG_USER_ONLY)
579 io_mem_init();
580 #endif
581 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
582 /* There's no guest base to take into account, so go ahead and
583 initialize the prologue now. */
584 tcg_prologue_init(&tcg_ctx);
585 #endif
588 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
590 static int cpu_common_post_load(void *opaque, int version_id)
592 CPUState *env = opaque;
594 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
595 version_id is increased. */
596 env->interrupt_request &= ~0x01;
597 tlb_flush(env, 1);
599 return 0;
602 static const VMStateDescription vmstate_cpu_common = {
603 .name = "cpu_common",
604 .version_id = 1,
605 .minimum_version_id = 1,
606 .minimum_version_id_old = 1,
607 .post_load = cpu_common_post_load,
608 .fields = (VMStateField []) {
609 VMSTATE_UINT32(halted, CPUState),
610 VMSTATE_UINT32(interrupt_request, CPUState),
611 VMSTATE_END_OF_LIST()
614 #endif
616 CPUState *qemu_get_cpu(int cpu)
618 CPUState *env = first_cpu;
620 while (env) {
621 if (env->cpu_index == cpu)
622 break;
623 env = env->next_cpu;
626 return env;
629 void cpu_exec_init(CPUState *env)
631 CPUState **penv;
632 int cpu_index;
634 #if defined(CONFIG_USER_ONLY)
635 cpu_list_lock();
636 #endif
637 env->next_cpu = NULL;
638 penv = &first_cpu;
639 cpu_index = 0;
640 while (*penv != NULL) {
641 penv = &(*penv)->next_cpu;
642 cpu_index++;
644 env->cpu_index = cpu_index;
645 env->numa_node = 0;
646 QTAILQ_INIT(&env->breakpoints);
647 QTAILQ_INIT(&env->watchpoints);
648 #ifdef __WIN32
649 env->thread_id = GetCurrentProcessId();
650 #else
651 env->thread_id = getpid();
652 #endif
653 *penv = env;
654 #if defined(CONFIG_USER_ONLY)
655 cpu_list_unlock();
656 #endif
657 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
658 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
659 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
660 cpu_save, cpu_load, env);
661 #endif
664 static inline void invalidate_page_bitmap(PageDesc *p)
666 if (p->code_bitmap) {
667 qemu_free(p->code_bitmap);
668 p->code_bitmap = NULL;
670 p->code_write_count = 0;
673 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
675 static void page_flush_tb_1 (int level, void **lp)
677 int i;
679 if (*lp == NULL) {
680 return;
682 if (level == 0) {
683 PageDesc *pd = *lp;
684 for (i = 0; i < L2_SIZE; ++i) {
685 pd[i].first_tb = NULL;
686 invalidate_page_bitmap(pd + i);
688 } else {
689 void **pp = *lp;
690 for (i = 0; i < L2_SIZE; ++i) {
691 page_flush_tb_1 (level - 1, pp + i);
696 static void page_flush_tb(void)
698 int i;
699 for (i = 0; i < V_L1_SIZE; i++) {
700 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
704 /* flush all the translation blocks */
705 /* XXX: tb_flush is currently not thread safe */
706 void tb_flush(CPUState *env1)
708 CPUState *env;
709 #if defined(DEBUG_FLUSH)
710 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
711 (unsigned long)(code_gen_ptr - code_gen_buffer),
712 nb_tbs, nb_tbs > 0 ?
713 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
714 #endif
715 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
716 cpu_abort(env1, "Internal error: code buffer overflow\n");
718 nb_tbs = 0;
720 for(env = first_cpu; env != NULL; env = env->next_cpu) {
721 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
724 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
725 page_flush_tb();
727 code_gen_ptr = code_gen_buffer;
728 /* XXX: flush processor icache at this point if cache flush is
729 expensive */
730 tb_flush_count++;
733 #ifdef DEBUG_TB_CHECK
735 static void tb_invalidate_check(target_ulong address)
737 TranslationBlock *tb;
738 int i;
739 address &= TARGET_PAGE_MASK;
740 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
741 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
742 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
743 address >= tb->pc + tb->size)) {
744 printf("ERROR invalidate: address=" TARGET_FMT_lx
745 " PC=%08lx size=%04x\n",
746 address, (long)tb->pc, tb->size);
752 /* verify that all the pages have correct rights for code */
753 static void tb_page_check(void)
755 TranslationBlock *tb;
756 int i, flags1, flags2;
758 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
759 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
760 flags1 = page_get_flags(tb->pc);
761 flags2 = page_get_flags(tb->pc + tb->size - 1);
762 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
763 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
764 (long)tb->pc, tb->size, flags1, flags2);
770 #endif
772 /* invalidate one TB */
773 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
774 int next_offset)
776 TranslationBlock *tb1;
777 for(;;) {
778 tb1 = *ptb;
779 if (tb1 == tb) {
780 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
781 break;
783 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
787 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
789 TranslationBlock *tb1;
790 unsigned int n1;
792 for(;;) {
793 tb1 = *ptb;
794 n1 = (long)tb1 & 3;
795 tb1 = (TranslationBlock *)((long)tb1 & ~3);
796 if (tb1 == tb) {
797 *ptb = tb1->page_next[n1];
798 break;
800 ptb = &tb1->page_next[n1];
804 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
806 TranslationBlock *tb1, **ptb;
807 unsigned int n1;
809 ptb = &tb->jmp_next[n];
810 tb1 = *ptb;
811 if (tb1) {
812 /* find tb(n) in circular list */
813 for(;;) {
814 tb1 = *ptb;
815 n1 = (long)tb1 & 3;
816 tb1 = (TranslationBlock *)((long)tb1 & ~3);
817 if (n1 == n && tb1 == tb)
818 break;
819 if (n1 == 2) {
820 ptb = &tb1->jmp_first;
821 } else {
822 ptb = &tb1->jmp_next[n1];
825 /* now we can suppress tb(n) from the list */
826 *ptb = tb->jmp_next[n];
828 tb->jmp_next[n] = NULL;
832 /* reset the jump entry 'n' of a TB so that it is not chained to
833 another TB */
834 static inline void tb_reset_jump(TranslationBlock *tb, int n)
836 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
839 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
841 CPUState *env;
842 PageDesc *p;
843 unsigned int h, n1;
844 tb_page_addr_t phys_pc;
845 TranslationBlock *tb1, *tb2;
847 /* remove the TB from the hash list */
848 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
849 h = tb_phys_hash_func(phys_pc);
850 tb_remove(&tb_phys_hash[h], tb,
851 offsetof(TranslationBlock, phys_hash_next));
853 /* remove the TB from the page list */
854 if (tb->page_addr[0] != page_addr) {
855 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
856 tb_page_remove(&p->first_tb, tb);
857 invalidate_page_bitmap(p);
859 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
860 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
861 tb_page_remove(&p->first_tb, tb);
862 invalidate_page_bitmap(p);
865 tb_invalidated_flag = 1;
867 /* remove the TB from the hash list */
868 h = tb_jmp_cache_hash_func(tb->pc);
869 for(env = first_cpu; env != NULL; env = env->next_cpu) {
870 if (env->tb_jmp_cache[h] == tb)
871 env->tb_jmp_cache[h] = NULL;
874 /* suppress this TB from the two jump lists */
875 tb_jmp_remove(tb, 0);
876 tb_jmp_remove(tb, 1);
878 /* suppress any remaining jumps to this TB */
879 tb1 = tb->jmp_first;
880 for(;;) {
881 n1 = (long)tb1 & 3;
882 if (n1 == 2)
883 break;
884 tb1 = (TranslationBlock *)((long)tb1 & ~3);
885 tb2 = tb1->jmp_next[n1];
886 tb_reset_jump(tb1, n1);
887 tb1->jmp_next[n1] = NULL;
888 tb1 = tb2;
890 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
892 tb_phys_invalidate_count++;
895 static inline void set_bits(uint8_t *tab, int start, int len)
897 int end, mask, end1;
899 end = start + len;
900 tab += start >> 3;
901 mask = 0xff << (start & 7);
902 if ((start & ~7) == (end & ~7)) {
903 if (start < end) {
904 mask &= ~(0xff << (end & 7));
905 *tab |= mask;
907 } else {
908 *tab++ |= mask;
909 start = (start + 8) & ~7;
910 end1 = end & ~7;
911 while (start < end1) {
912 *tab++ = 0xff;
913 start += 8;
915 if (start < end) {
916 mask = ~(0xff << (end & 7));
917 *tab |= mask;
922 static void build_page_bitmap(PageDesc *p)
924 int n, tb_start, tb_end;
925 TranslationBlock *tb;
927 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
929 tb = p->first_tb;
930 while (tb != NULL) {
931 n = (long)tb & 3;
932 tb = (TranslationBlock *)((long)tb & ~3);
933 /* NOTE: this is subtle as a TB may span two physical pages */
934 if (n == 0) {
935 /* NOTE: tb_end may be after the end of the page, but
936 it is not a problem */
937 tb_start = tb->pc & ~TARGET_PAGE_MASK;
938 tb_end = tb_start + tb->size;
939 if (tb_end > TARGET_PAGE_SIZE)
940 tb_end = TARGET_PAGE_SIZE;
941 } else {
942 tb_start = 0;
943 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
945 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
946 tb = tb->page_next[n];
950 TranslationBlock *tb_gen_code(CPUState *env,
951 target_ulong pc, target_ulong cs_base,
952 int flags, int cflags)
954 TranslationBlock *tb;
955 uint8_t *tc_ptr;
956 tb_page_addr_t phys_pc, phys_page2;
957 target_ulong virt_page2;
958 int code_gen_size;
960 phys_pc = get_page_addr_code(env, pc);
961 tb = tb_alloc(pc);
962 if (!tb) {
963 /* flush must be done */
964 tb_flush(env);
965 /* cannot fail at this point */
966 tb = tb_alloc(pc);
967 /* Don't forget to invalidate previous TB info. */
968 tb_invalidated_flag = 1;
970 tc_ptr = code_gen_ptr;
971 tb->tc_ptr = tc_ptr;
972 tb->cs_base = cs_base;
973 tb->flags = flags;
974 tb->cflags = cflags;
975 cpu_gen_code(env, tb, &code_gen_size);
976 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
978 /* check next page if needed */
979 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
980 phys_page2 = -1;
981 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
982 phys_page2 = get_page_addr_code(env, virt_page2);
984 tb_link_page(tb, phys_pc, phys_page2);
985 return tb;
988 /* invalidate all TBs which intersect with the target physical page
989 starting in range [start;end[. NOTE: start and end must refer to
990 the same physical page. 'is_cpu_write_access' should be true if called
991 from a real cpu write access: the virtual CPU will exit the current
992 TB if code is modified inside this TB. */
993 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
994 int is_cpu_write_access)
996 TranslationBlock *tb, *tb_next, *saved_tb;
997 CPUState *env = cpu_single_env;
998 tb_page_addr_t tb_start, tb_end;
999 PageDesc *p;
1000 int n;
1001 #ifdef TARGET_HAS_PRECISE_SMC
1002 int current_tb_not_found = is_cpu_write_access;
1003 TranslationBlock *current_tb = NULL;
1004 int current_tb_modified = 0;
1005 target_ulong current_pc = 0;
1006 target_ulong current_cs_base = 0;
1007 int current_flags = 0;
1008 #endif /* TARGET_HAS_PRECISE_SMC */
1010 p = page_find(start >> TARGET_PAGE_BITS);
1011 if (!p)
1012 return;
1013 if (!p->code_bitmap &&
1014 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1015 is_cpu_write_access) {
1016 /* build code bitmap */
1017 build_page_bitmap(p);
1020 /* we remove all the TBs in the range [start, end[ */
1021 /* XXX: see if in some cases it could be faster to invalidate all the code */
1022 tb = p->first_tb;
1023 while (tb != NULL) {
1024 n = (long)tb & 3;
1025 tb = (TranslationBlock *)((long)tb & ~3);
1026 tb_next = tb->page_next[n];
1027 /* NOTE: this is subtle as a TB may span two physical pages */
1028 if (n == 0) {
1029 /* NOTE: tb_end may be after the end of the page, but
1030 it is not a problem */
1031 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1032 tb_end = tb_start + tb->size;
1033 } else {
1034 tb_start = tb->page_addr[1];
1035 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1037 if (!(tb_end <= start || tb_start >= end)) {
1038 #ifdef TARGET_HAS_PRECISE_SMC
1039 if (current_tb_not_found) {
1040 current_tb_not_found = 0;
1041 current_tb = NULL;
1042 if (env->mem_io_pc) {
1043 /* now we have a real cpu fault */
1044 current_tb = tb_find_pc(env->mem_io_pc);
1047 if (current_tb == tb &&
1048 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1049 /* If we are modifying the current TB, we must stop
1050 its execution. We could be more precise by checking
1051 that the modification is after the current PC, but it
1052 would require a specialized function to partially
1053 restore the CPU state */
1055 current_tb_modified = 1;
1056 cpu_restore_state(current_tb, env,
1057 env->mem_io_pc, NULL);
1058 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1059 &current_flags);
1061 #endif /* TARGET_HAS_PRECISE_SMC */
1062 /* we need to do that to handle the case where a signal
1063 occurs while doing tb_phys_invalidate() */
1064 saved_tb = NULL;
1065 if (env) {
1066 saved_tb = env->current_tb;
1067 env->current_tb = NULL;
1069 tb_phys_invalidate(tb, -1);
1070 if (env) {
1071 env->current_tb = saved_tb;
1072 if (env->interrupt_request && env->current_tb)
1073 cpu_interrupt(env, env->interrupt_request);
1076 tb = tb_next;
1078 #if !defined(CONFIG_USER_ONLY)
1079 /* if no code remaining, no need to continue to use slow writes */
1080 if (!p->first_tb) {
1081 invalidate_page_bitmap(p);
1082 if (is_cpu_write_access) {
1083 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1086 #endif
1087 #ifdef TARGET_HAS_PRECISE_SMC
1088 if (current_tb_modified) {
1089 /* we generate a block containing just the instruction
1090 modifying the memory. It will ensure that it cannot modify
1091 itself */
1092 env->current_tb = NULL;
1093 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1094 cpu_resume_from_signal(env, NULL);
1096 #endif
1099 /* len must be <= 8 and start must be a multiple of len */
1100 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1102 PageDesc *p;
1103 int offset, b;
1104 #if 0
1105 if (1) {
1106 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1107 cpu_single_env->mem_io_vaddr, len,
1108 cpu_single_env->eip,
1109 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1111 #endif
1112 p = page_find(start >> TARGET_PAGE_BITS);
1113 if (!p)
1114 return;
1115 if (p->code_bitmap) {
1116 offset = start & ~TARGET_PAGE_MASK;
1117 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1118 if (b & ((1 << len) - 1))
1119 goto do_invalidate;
1120 } else {
1121 do_invalidate:
1122 tb_invalidate_phys_page_range(start, start + len, 1);
1126 #if !defined(CONFIG_SOFTMMU)
1127 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1128 unsigned long pc, void *puc)
1130 TranslationBlock *tb;
1131 PageDesc *p;
1132 int n;
1133 #ifdef TARGET_HAS_PRECISE_SMC
1134 TranslationBlock *current_tb = NULL;
1135 CPUState *env = cpu_single_env;
1136 int current_tb_modified = 0;
1137 target_ulong current_pc = 0;
1138 target_ulong current_cs_base = 0;
1139 int current_flags = 0;
1140 #endif
1142 addr &= TARGET_PAGE_MASK;
1143 p = page_find(addr >> TARGET_PAGE_BITS);
1144 if (!p)
1145 return;
1146 tb = p->first_tb;
1147 #ifdef TARGET_HAS_PRECISE_SMC
1148 if (tb && pc != 0) {
1149 current_tb = tb_find_pc(pc);
1151 #endif
1152 while (tb != NULL) {
1153 n = (long)tb & 3;
1154 tb = (TranslationBlock *)((long)tb & ~3);
1155 #ifdef TARGET_HAS_PRECISE_SMC
1156 if (current_tb == tb &&
1157 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1158 /* If we are modifying the current TB, we must stop
1159 its execution. We could be more precise by checking
1160 that the modification is after the current PC, but it
1161 would require a specialized function to partially
1162 restore the CPU state */
1164 current_tb_modified = 1;
1165 cpu_restore_state(current_tb, env, pc, puc);
1166 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1167 &current_flags);
1169 #endif /* TARGET_HAS_PRECISE_SMC */
1170 tb_phys_invalidate(tb, addr);
1171 tb = tb->page_next[n];
1173 p->first_tb = NULL;
1174 #ifdef TARGET_HAS_PRECISE_SMC
1175 if (current_tb_modified) {
1176 /* we generate a block containing just the instruction
1177 modifying the memory. It will ensure that it cannot modify
1178 itself */
1179 env->current_tb = NULL;
1180 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1181 cpu_resume_from_signal(env, puc);
1183 #endif
1185 #endif
1187 /* add the tb in the target page and protect it if necessary */
1188 static inline void tb_alloc_page(TranslationBlock *tb,
1189 unsigned int n, tb_page_addr_t page_addr)
1191 PageDesc *p;
1192 TranslationBlock *last_first_tb;
1194 tb->page_addr[n] = page_addr;
1195 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1196 tb->page_next[n] = p->first_tb;
1197 last_first_tb = p->first_tb;
1198 p->first_tb = (TranslationBlock *)((long)tb | n);
1199 invalidate_page_bitmap(p);
1201 #if defined(TARGET_HAS_SMC) || 1
1203 #if defined(CONFIG_USER_ONLY)
1204 if (p->flags & PAGE_WRITE) {
1205 target_ulong addr;
1206 PageDesc *p2;
1207 int prot;
1209 /* force the host page as non writable (writes will have a
1210 page fault + mprotect overhead) */
1211 page_addr &= qemu_host_page_mask;
1212 prot = 0;
1213 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1214 addr += TARGET_PAGE_SIZE) {
1216 p2 = page_find (addr >> TARGET_PAGE_BITS);
1217 if (!p2)
1218 continue;
1219 prot |= p2->flags;
1220 p2->flags &= ~PAGE_WRITE;
1222 mprotect(g2h(page_addr), qemu_host_page_size,
1223 (prot & PAGE_BITS) & ~PAGE_WRITE);
1224 #ifdef DEBUG_TB_INVALIDATE
1225 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1226 page_addr);
1227 #endif
1229 #else
1230 /* if some code is already present, then the pages are already
1231 protected. So we handle the case where only the first TB is
1232 allocated in a physical page */
1233 if (!last_first_tb) {
1234 tlb_protect_code(page_addr);
1236 #endif
1238 #endif /* TARGET_HAS_SMC */
1241 /* Allocate a new translation block. Flush the translation buffer if
1242 too many translation blocks or too much generated code. */
1243 TranslationBlock *tb_alloc(target_ulong pc)
1245 TranslationBlock *tb;
1247 if (nb_tbs >= code_gen_max_blocks ||
1248 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1249 return NULL;
1250 tb = &tbs[nb_tbs++];
1251 tb->pc = pc;
1252 tb->cflags = 0;
1253 return tb;
1256 void tb_free(TranslationBlock *tb)
1258 /* In practice this is mostly used for single use temporary TB
1259 Ignore the hard cases and just back up if this TB happens to
1260 be the last one generated. */
1261 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1262 code_gen_ptr = tb->tc_ptr;
1263 nb_tbs--;
1267 /* add a new TB and link it to the physical page tables. phys_page2 is
1268 (-1) to indicate that only one page contains the TB. */
1269 void tb_link_page(TranslationBlock *tb,
1270 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1272 unsigned int h;
1273 TranslationBlock **ptb;
1275 /* Grab the mmap lock to stop another thread invalidating this TB
1276 before we are done. */
1277 mmap_lock();
1278 /* add in the physical hash table */
1279 h = tb_phys_hash_func(phys_pc);
1280 ptb = &tb_phys_hash[h];
1281 tb->phys_hash_next = *ptb;
1282 *ptb = tb;
1284 /* add in the page list */
1285 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1286 if (phys_page2 != -1)
1287 tb_alloc_page(tb, 1, phys_page2);
1288 else
1289 tb->page_addr[1] = -1;
1291 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1292 tb->jmp_next[0] = NULL;
1293 tb->jmp_next[1] = NULL;
1295 /* init original jump addresses */
1296 if (tb->tb_next_offset[0] != 0xffff)
1297 tb_reset_jump(tb, 0);
1298 if (tb->tb_next_offset[1] != 0xffff)
1299 tb_reset_jump(tb, 1);
1301 #ifdef DEBUG_TB_CHECK
1302 tb_page_check();
1303 #endif
1304 mmap_unlock();
1307 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1308 tb[1].tc_ptr. Return NULL if not found */
1309 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1311 int m_min, m_max, m;
1312 unsigned long v;
1313 TranslationBlock *tb;
1315 if (nb_tbs <= 0)
1316 return NULL;
1317 if (tc_ptr < (unsigned long)code_gen_buffer ||
1318 tc_ptr >= (unsigned long)code_gen_ptr)
1319 return NULL;
1320 /* binary search (cf Knuth) */
1321 m_min = 0;
1322 m_max = nb_tbs - 1;
1323 while (m_min <= m_max) {
1324 m = (m_min + m_max) >> 1;
1325 tb = &tbs[m];
1326 v = (unsigned long)tb->tc_ptr;
1327 if (v == tc_ptr)
1328 return tb;
1329 else if (tc_ptr < v) {
1330 m_max = m - 1;
1331 } else {
1332 m_min = m + 1;
1335 return &tbs[m_max];
1338 static void tb_reset_jump_recursive(TranslationBlock *tb);
1340 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1342 TranslationBlock *tb1, *tb_next, **ptb;
1343 unsigned int n1;
1345 tb1 = tb->jmp_next[n];
1346 if (tb1 != NULL) {
1347 /* find head of list */
1348 for(;;) {
1349 n1 = (long)tb1 & 3;
1350 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1351 if (n1 == 2)
1352 break;
1353 tb1 = tb1->jmp_next[n1];
1355 /* we are now sure now that tb jumps to tb1 */
1356 tb_next = tb1;
1358 /* remove tb from the jmp_first list */
1359 ptb = &tb_next->jmp_first;
1360 for(;;) {
1361 tb1 = *ptb;
1362 n1 = (long)tb1 & 3;
1363 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1364 if (n1 == n && tb1 == tb)
1365 break;
1366 ptb = &tb1->jmp_next[n1];
1368 *ptb = tb->jmp_next[n];
1369 tb->jmp_next[n] = NULL;
1371 /* suppress the jump to next tb in generated code */
1372 tb_reset_jump(tb, n);
1374 /* suppress jumps in the tb on which we could have jumped */
1375 tb_reset_jump_recursive(tb_next);
1379 static void tb_reset_jump_recursive(TranslationBlock *tb)
1381 tb_reset_jump_recursive2(tb, 0);
1382 tb_reset_jump_recursive2(tb, 1);
1385 #if defined(TARGET_HAS_ICE)
1386 #if defined(CONFIG_USER_ONLY)
1387 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1389 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1391 #else
1392 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1394 target_phys_addr_t addr;
1395 target_ulong pd;
1396 ram_addr_t ram_addr;
1397 PhysPageDesc *p;
1399 addr = cpu_get_phys_page_debug(env, pc);
1400 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1401 if (!p) {
1402 pd = IO_MEM_UNASSIGNED;
1403 } else {
1404 pd = p->phys_offset;
1406 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1407 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1409 #endif
1410 #endif /* TARGET_HAS_ICE */
1412 #if defined(CONFIG_USER_ONLY)
1413 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1418 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1419 int flags, CPUWatchpoint **watchpoint)
1421 return -ENOSYS;
1423 #else
1424 /* Add a watchpoint. */
1425 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1426 int flags, CPUWatchpoint **watchpoint)
1428 target_ulong len_mask = ~(len - 1);
1429 CPUWatchpoint *wp;
1431 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1432 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1433 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1434 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1435 return -EINVAL;
1437 wp = qemu_malloc(sizeof(*wp));
1439 wp->vaddr = addr;
1440 wp->len_mask = len_mask;
1441 wp->flags = flags;
1443 /* keep all GDB-injected watchpoints in front */
1444 if (flags & BP_GDB)
1445 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1446 else
1447 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1449 tlb_flush_page(env, addr);
1451 if (watchpoint)
1452 *watchpoint = wp;
1453 return 0;
1456 /* Remove a specific watchpoint. */
1457 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1458 int flags)
1460 target_ulong len_mask = ~(len - 1);
1461 CPUWatchpoint *wp;
1463 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1464 if (addr == wp->vaddr && len_mask == wp->len_mask
1465 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1466 cpu_watchpoint_remove_by_ref(env, wp);
1467 return 0;
1470 return -ENOENT;
1473 /* Remove a specific watchpoint by reference. */
1474 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1476 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1478 tlb_flush_page(env, watchpoint->vaddr);
1480 qemu_free(watchpoint);
1483 /* Remove all matching watchpoints. */
1484 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1486 CPUWatchpoint *wp, *next;
1488 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1489 if (wp->flags & mask)
1490 cpu_watchpoint_remove_by_ref(env, wp);
1493 #endif
1495 /* Add a breakpoint. */
1496 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1497 CPUBreakpoint **breakpoint)
1499 #if defined(TARGET_HAS_ICE)
1500 CPUBreakpoint *bp;
1502 bp = qemu_malloc(sizeof(*bp));
1504 bp->pc = pc;
1505 bp->flags = flags;
1507 /* keep all GDB-injected breakpoints in front */
1508 if (flags & BP_GDB)
1509 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1510 else
1511 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1513 breakpoint_invalidate(env, pc);
1515 if (breakpoint)
1516 *breakpoint = bp;
1517 return 0;
1518 #else
1519 return -ENOSYS;
1520 #endif
1523 /* Remove a specific breakpoint. */
1524 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1526 #if defined(TARGET_HAS_ICE)
1527 CPUBreakpoint *bp;
1529 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1530 if (bp->pc == pc && bp->flags == flags) {
1531 cpu_breakpoint_remove_by_ref(env, bp);
1532 return 0;
1535 return -ENOENT;
1536 #else
1537 return -ENOSYS;
1538 #endif
1541 /* Remove a specific breakpoint by reference. */
1542 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1544 #if defined(TARGET_HAS_ICE)
1545 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1547 breakpoint_invalidate(env, breakpoint->pc);
1549 qemu_free(breakpoint);
1550 #endif
1553 /* Remove all matching breakpoints. */
1554 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1556 #if defined(TARGET_HAS_ICE)
1557 CPUBreakpoint *bp, *next;
1559 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1560 if (bp->flags & mask)
1561 cpu_breakpoint_remove_by_ref(env, bp);
1563 #endif
1566 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1567 CPU loop after each instruction */
1568 void cpu_single_step(CPUState *env, int enabled)
1570 #if defined(TARGET_HAS_ICE)
1571 if (env->singlestep_enabled != enabled) {
1572 env->singlestep_enabled = enabled;
1573 if (kvm_enabled())
1574 kvm_update_guest_debug(env, 0);
1575 else {
1576 /* must flush all the translated code to avoid inconsistencies */
1577 /* XXX: only flush what is necessary */
1578 tb_flush(env);
1581 #endif
1584 /* enable or disable low levels log */
1585 void cpu_set_log(int log_flags)
1587 loglevel = log_flags;
1588 if (loglevel && !logfile) {
1589 logfile = fopen(logfilename, log_append ? "a" : "w");
1590 if (!logfile) {
1591 perror(logfilename);
1592 _exit(1);
1594 #if !defined(CONFIG_SOFTMMU)
1595 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1597 static char logfile_buf[4096];
1598 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1600 #elif !defined(_WIN32)
1601 /* Win32 doesn't support line-buffering and requires size >= 2 */
1602 setvbuf(logfile, NULL, _IOLBF, 0);
1603 #endif
1604 log_append = 1;
1606 if (!loglevel && logfile) {
1607 fclose(logfile);
1608 logfile = NULL;
1612 void cpu_set_log_filename(const char *filename)
1614 logfilename = strdup(filename);
1615 if (logfile) {
1616 fclose(logfile);
1617 logfile = NULL;
1619 cpu_set_log(loglevel);
1622 static void cpu_unlink_tb(CPUState *env)
1624 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1625 problem and hope the cpu will stop of its own accord. For userspace
1626 emulation this often isn't actually as bad as it sounds. Often
1627 signals are used primarily to interrupt blocking syscalls. */
1628 TranslationBlock *tb;
1629 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1631 spin_lock(&interrupt_lock);
1632 tb = env->current_tb;
1633 /* if the cpu is currently executing code, we must unlink it and
1634 all the potentially executing TB */
1635 if (tb) {
1636 env->current_tb = NULL;
1637 tb_reset_jump_recursive(tb);
1639 spin_unlock(&interrupt_lock);
1642 /* mask must never be zero, except for A20 change call */
1643 void cpu_interrupt(CPUState *env, int mask)
1645 int old_mask;
1647 old_mask = env->interrupt_request;
1648 env->interrupt_request |= mask;
1649 if (kvm_enabled() && !kvm_irqchip_in_kernel())
1650 kvm_update_interrupt_request(env);
1652 #ifndef CONFIG_USER_ONLY
1654 * If called from iothread context, wake the target cpu in
1655 * case its halted.
1657 if (!qemu_cpu_self(env)) {
1658 qemu_cpu_kick(env);
1659 return;
1661 #endif
1663 if (use_icount) {
1664 env->icount_decr.u16.high = 0xffff;
1665 #ifndef CONFIG_USER_ONLY
1666 if (!can_do_io(env)
1667 && (mask & ~old_mask) != 0) {
1668 cpu_abort(env, "Raised interrupt while not in I/O function");
1670 #endif
1671 } else {
1672 cpu_unlink_tb(env);
1676 void cpu_reset_interrupt(CPUState *env, int mask)
1678 env->interrupt_request &= ~mask;
1681 void cpu_exit(CPUState *env)
1683 env->exit_request = 1;
1684 cpu_unlink_tb(env);
1687 const CPULogItem cpu_log_items[] = {
1688 { CPU_LOG_TB_OUT_ASM, "out_asm",
1689 "show generated host assembly code for each compiled TB" },
1690 { CPU_LOG_TB_IN_ASM, "in_asm",
1691 "show target assembly code for each compiled TB" },
1692 { CPU_LOG_TB_OP, "op",
1693 "show micro ops for each compiled TB" },
1694 { CPU_LOG_TB_OP_OPT, "op_opt",
1695 "show micro ops "
1696 #ifdef TARGET_I386
1697 "before eflags optimization and "
1698 #endif
1699 "after liveness analysis" },
1700 { CPU_LOG_INT, "int",
1701 "show interrupts/exceptions in short format" },
1702 { CPU_LOG_EXEC, "exec",
1703 "show trace before each executed TB (lots of logs)" },
1704 { CPU_LOG_TB_CPU, "cpu",
1705 "show CPU state before block translation" },
1706 #ifdef TARGET_I386
1707 { CPU_LOG_PCALL, "pcall",
1708 "show protected mode far calls/returns/exceptions" },
1709 { CPU_LOG_RESET, "cpu_reset",
1710 "show CPU state before CPU resets" },
1711 #endif
1712 #ifdef DEBUG_IOPORT
1713 { CPU_LOG_IOPORT, "ioport",
1714 "show all i/o ports accesses" },
1715 #endif
1716 { 0, NULL, NULL },
1719 #ifndef CONFIG_USER_ONLY
1720 static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1721 = QLIST_HEAD_INITIALIZER(memory_client_list);
1723 static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1724 ram_addr_t size,
1725 ram_addr_t phys_offset)
1727 CPUPhysMemoryClient *client;
1728 QLIST_FOREACH(client, &memory_client_list, list) {
1729 client->set_memory(client, start_addr, size, phys_offset);
1733 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1734 target_phys_addr_t end)
1736 CPUPhysMemoryClient *client;
1737 QLIST_FOREACH(client, &memory_client_list, list) {
1738 int r = client->sync_dirty_bitmap(client, start, end);
1739 if (r < 0)
1740 return r;
1742 return 0;
1745 static int cpu_notify_migration_log(int enable)
1747 CPUPhysMemoryClient *client;
1748 QLIST_FOREACH(client, &memory_client_list, list) {
1749 int r = client->migration_log(client, enable);
1750 if (r < 0)
1751 return r;
1753 return 0;
1756 static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1757 int level, void **lp)
1759 int i;
1761 if (*lp == NULL) {
1762 return;
1764 if (level == 0) {
1765 PhysPageDesc *pd = *lp;
1766 for (i = 0; i < L2_SIZE; ++i) {
1767 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1768 client->set_memory(client, pd[i].region_offset,
1769 TARGET_PAGE_SIZE, pd[i].phys_offset);
1772 } else {
1773 void **pp = *lp;
1774 for (i = 0; i < L2_SIZE; ++i) {
1775 phys_page_for_each_1(client, level - 1, pp + i);
1780 static void phys_page_for_each(CPUPhysMemoryClient *client)
1782 int i;
1783 for (i = 0; i < P_L1_SIZE; ++i) {
1784 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1785 l1_phys_map + 1);
1789 void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1791 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1792 phys_page_for_each(client);
1795 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1797 QLIST_REMOVE(client, list);
1799 #endif
1801 static int cmp1(const char *s1, int n, const char *s2)
1803 if (strlen(s2) != n)
1804 return 0;
1805 return memcmp(s1, s2, n) == 0;
1808 /* takes a comma separated list of log masks. Return 0 if error. */
1809 int cpu_str_to_log_mask(const char *str)
1811 const CPULogItem *item;
1812 int mask;
1813 const char *p, *p1;
1815 p = str;
1816 mask = 0;
1817 for(;;) {
1818 p1 = strchr(p, ',');
1819 if (!p1)
1820 p1 = p + strlen(p);
1821 if(cmp1(p,p1-p,"all")) {
1822 for(item = cpu_log_items; item->mask != 0; item++) {
1823 mask |= item->mask;
1825 } else {
1826 for(item = cpu_log_items; item->mask != 0; item++) {
1827 if (cmp1(p, p1 - p, item->name))
1828 goto found;
1830 return 0;
1832 found:
1833 mask |= item->mask;
1834 if (*p1 != ',')
1835 break;
1836 p = p1 + 1;
1838 return mask;
1841 void cpu_abort(CPUState *env, const char *fmt, ...)
1843 va_list ap;
1844 va_list ap2;
1846 va_start(ap, fmt);
1847 va_copy(ap2, ap);
1848 fprintf(stderr, "qemu: fatal: ");
1849 vfprintf(stderr, fmt, ap);
1850 fprintf(stderr, "\n");
1851 #ifdef TARGET_I386
1852 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1853 #else
1854 cpu_dump_state(env, stderr, fprintf, 0);
1855 #endif
1856 if (qemu_log_enabled()) {
1857 qemu_log("qemu: fatal: ");
1858 qemu_log_vprintf(fmt, ap2);
1859 qemu_log("\n");
1860 #ifdef TARGET_I386
1861 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1862 #else
1863 log_cpu_state(env, 0);
1864 #endif
1865 qemu_log_flush();
1866 qemu_log_close();
1868 va_end(ap2);
1869 va_end(ap);
1870 #if defined(CONFIG_USER_ONLY)
1872 struct sigaction act;
1873 sigfillset(&act.sa_mask);
1874 act.sa_handler = SIG_DFL;
1875 sigaction(SIGABRT, &act, NULL);
1877 #endif
1878 abort();
1881 CPUState *cpu_copy(CPUState *env)
1883 CPUState *new_env = cpu_init(env->cpu_model_str);
1884 CPUState *next_cpu = new_env->next_cpu;
1885 int cpu_index = new_env->cpu_index;
1886 #if defined(TARGET_HAS_ICE)
1887 CPUBreakpoint *bp;
1888 CPUWatchpoint *wp;
1889 #endif
1891 memcpy(new_env, env, sizeof(CPUState));
1893 /* Preserve chaining and index. */
1894 new_env->next_cpu = next_cpu;
1895 new_env->cpu_index = cpu_index;
1897 /* Clone all break/watchpoints.
1898 Note: Once we support ptrace with hw-debug register access, make sure
1899 BP_CPU break/watchpoints are handled correctly on clone. */
1900 QTAILQ_INIT(&env->breakpoints);
1901 QTAILQ_INIT(&env->watchpoints);
1902 #if defined(TARGET_HAS_ICE)
1903 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1904 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1906 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1907 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1908 wp->flags, NULL);
1910 #endif
1912 return new_env;
1915 #if !defined(CONFIG_USER_ONLY)
1917 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1919 unsigned int i;
1921 /* Discard jump cache entries for any tb which might potentially
1922 overlap the flushed page. */
1923 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1924 memset (&env->tb_jmp_cache[i], 0,
1925 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1927 i = tb_jmp_cache_hash_page(addr);
1928 memset (&env->tb_jmp_cache[i], 0,
1929 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1932 static CPUTLBEntry s_cputlb_empty_entry = {
1933 .addr_read = -1,
1934 .addr_write = -1,
1935 .addr_code = -1,
1936 .addend = -1,
1939 /* NOTE: if flush_global is true, also flush global entries (not
1940 implemented yet) */
1941 void tlb_flush(CPUState *env, int flush_global)
1943 int i;
1945 #if defined(DEBUG_TLB)
1946 printf("tlb_flush:\n");
1947 #endif
1948 /* must reset current TB so that interrupts cannot modify the
1949 links while we are modifying them */
1950 env->current_tb = NULL;
1952 for(i = 0; i < CPU_TLB_SIZE; i++) {
1953 int mmu_idx;
1954 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1955 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1959 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1961 env->tlb_flush_addr = -1;
1962 env->tlb_flush_mask = 0;
1963 tlb_flush_count++;
1966 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1968 if (addr == (tlb_entry->addr_read &
1969 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1970 addr == (tlb_entry->addr_write &
1971 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1972 addr == (tlb_entry->addr_code &
1973 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1974 *tlb_entry = s_cputlb_empty_entry;
1978 void tlb_flush_page(CPUState *env, target_ulong addr)
1980 int i;
1981 int mmu_idx;
1983 #if defined(DEBUG_TLB)
1984 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1985 #endif
1986 /* Check if we need to flush due to large pages. */
1987 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1988 #if defined(DEBUG_TLB)
1989 printf("tlb_flush_page: forced full flush ("
1990 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1991 env->tlb_flush_addr, env->tlb_flush_mask);
1992 #endif
1993 tlb_flush(env, 1);
1994 return;
1996 /* must reset current TB so that interrupts cannot modify the
1997 links while we are modifying them */
1998 env->current_tb = NULL;
2000 addr &= TARGET_PAGE_MASK;
2001 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2002 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2003 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2005 tlb_flush_jmp_cache(env, addr);
2008 /* update the TLBs so that writes to code in the virtual page 'addr'
2009 can be detected */
2010 static void tlb_protect_code(ram_addr_t ram_addr)
2012 cpu_physical_memory_reset_dirty(ram_addr,
2013 ram_addr + TARGET_PAGE_SIZE,
2014 CODE_DIRTY_FLAG);
2017 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2018 tested for self modifying code */
2019 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2020 target_ulong vaddr)
2022 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2025 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2026 unsigned long start, unsigned long length)
2028 unsigned long addr;
2029 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2030 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2031 if ((addr - start) < length) {
2032 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2037 /* Note: start and end must be within the same ram block. */
2038 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2039 int dirty_flags)
2041 CPUState *env;
2042 unsigned long length, start1;
2043 int i;
2045 start &= TARGET_PAGE_MASK;
2046 end = TARGET_PAGE_ALIGN(end);
2048 length = end - start;
2049 if (length == 0)
2050 return;
2051 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2053 /* we modify the TLB cache so that the dirty bit will be set again
2054 when accessing the range */
2055 start1 = (unsigned long)qemu_get_ram_ptr(start);
2056 /* Chek that we don't span multiple blocks - this breaks the
2057 address comparisons below. */
2058 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2059 != (end - 1) - start) {
2060 abort();
2063 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2064 int mmu_idx;
2065 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2066 for(i = 0; i < CPU_TLB_SIZE; i++)
2067 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2068 start1, length);
2073 int cpu_physical_memory_set_dirty_tracking(int enable)
2075 int ret = 0;
2076 in_migration = enable;
2077 ret = cpu_notify_migration_log(!!enable);
2078 return ret;
2081 int cpu_physical_memory_get_dirty_tracking(void)
2083 return in_migration;
2086 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2087 target_phys_addr_t end_addr)
2089 int ret;
2091 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2092 return ret;
2095 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2097 ram_addr_t ram_addr;
2098 void *p;
2100 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2101 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2102 + tlb_entry->addend);
2103 ram_addr = qemu_ram_addr_from_host(p);
2104 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2105 tlb_entry->addr_write |= TLB_NOTDIRTY;
2110 /* update the TLB according to the current state of the dirty bits */
2111 void cpu_tlb_update_dirty(CPUState *env)
2113 int i;
2114 int mmu_idx;
2115 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2116 for(i = 0; i < CPU_TLB_SIZE; i++)
2117 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2121 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2123 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2124 tlb_entry->addr_write = vaddr;
2127 /* update the TLB corresponding to virtual page vaddr
2128 so that it is no longer dirty */
2129 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2131 int i;
2132 int mmu_idx;
2134 vaddr &= TARGET_PAGE_MASK;
2135 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2136 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2137 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2140 /* Our TLB does not support large pages, so remember the area covered by
2141 large pages and trigger a full TLB flush if these are invalidated. */
2142 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2143 target_ulong size)
2145 target_ulong mask = ~(size - 1);
2147 if (env->tlb_flush_addr == (target_ulong)-1) {
2148 env->tlb_flush_addr = vaddr & mask;
2149 env->tlb_flush_mask = mask;
2150 return;
2152 /* Extend the existing region to include the new page.
2153 This is a compromise between unnecessary flushes and the cost
2154 of maintaining a full variable size TLB. */
2155 mask &= env->tlb_flush_mask;
2156 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2157 mask <<= 1;
2159 env->tlb_flush_addr &= mask;
2160 env->tlb_flush_mask = mask;
2163 /* Add a new TLB entry. At most one entry for a given virtual address
2164 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2165 supplied size is only used by tlb_flush_page. */
2166 void tlb_set_page(CPUState *env, target_ulong vaddr,
2167 target_phys_addr_t paddr, int prot,
2168 int mmu_idx, target_ulong size)
2170 PhysPageDesc *p;
2171 unsigned long pd;
2172 unsigned int index;
2173 target_ulong address;
2174 target_ulong code_address;
2175 unsigned long addend;
2176 CPUTLBEntry *te;
2177 CPUWatchpoint *wp;
2178 target_phys_addr_t iotlb;
2180 assert(size >= TARGET_PAGE_SIZE);
2181 if (size != TARGET_PAGE_SIZE) {
2182 tlb_add_large_page(env, vaddr, size);
2184 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2185 if (!p) {
2186 pd = IO_MEM_UNASSIGNED;
2187 } else {
2188 pd = p->phys_offset;
2190 #if defined(DEBUG_TLB)
2191 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2192 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2193 #endif
2195 address = vaddr;
2196 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2197 /* IO memory case (romd handled later) */
2198 address |= TLB_MMIO;
2200 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2201 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2202 /* Normal RAM. */
2203 iotlb = pd & TARGET_PAGE_MASK;
2204 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2205 iotlb |= IO_MEM_NOTDIRTY;
2206 else
2207 iotlb |= IO_MEM_ROM;
2208 } else {
2209 /* IO handlers are currently passed a physical address.
2210 It would be nice to pass an offset from the base address
2211 of that region. This would avoid having to special case RAM,
2212 and avoid full address decoding in every device.
2213 We can't use the high bits of pd for this because
2214 IO_MEM_ROMD uses these as a ram address. */
2215 iotlb = (pd & ~TARGET_PAGE_MASK);
2216 if (p) {
2217 iotlb += p->region_offset;
2218 } else {
2219 iotlb += paddr;
2223 code_address = address;
2224 /* Make accesses to pages with watchpoints go via the
2225 watchpoint trap routines. */
2226 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2227 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2228 /* Avoid trapping reads of pages with a write breakpoint. */
2229 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2230 iotlb = io_mem_watch + paddr;
2231 address |= TLB_MMIO;
2232 break;
2237 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2238 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2239 te = &env->tlb_table[mmu_idx][index];
2240 te->addend = addend - vaddr;
2241 if (prot & PAGE_READ) {
2242 te->addr_read = address;
2243 } else {
2244 te->addr_read = -1;
2247 if (prot & PAGE_EXEC) {
2248 te->addr_code = code_address;
2249 } else {
2250 te->addr_code = -1;
2252 if (prot & PAGE_WRITE) {
2253 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2254 (pd & IO_MEM_ROMD)) {
2255 /* Write access calls the I/O callback. */
2256 te->addr_write = address | TLB_MMIO;
2257 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2258 !cpu_physical_memory_is_dirty(pd)) {
2259 te->addr_write = address | TLB_NOTDIRTY;
2260 } else {
2261 te->addr_write = address;
2263 } else {
2264 te->addr_write = -1;
2268 #else
2270 void tlb_flush(CPUState *env, int flush_global)
2274 void tlb_flush_page(CPUState *env, target_ulong addr)
2279 * Walks guest process memory "regions" one by one
2280 * and calls callback function 'fn' for each region.
2283 struct walk_memory_regions_data
2285 walk_memory_regions_fn fn;
2286 void *priv;
2287 unsigned long start;
2288 int prot;
2291 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2292 abi_ulong end, int new_prot)
2294 if (data->start != -1ul) {
2295 int rc = data->fn(data->priv, data->start, end, data->prot);
2296 if (rc != 0) {
2297 return rc;
2301 data->start = (new_prot ? end : -1ul);
2302 data->prot = new_prot;
2304 return 0;
2307 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2308 abi_ulong base, int level, void **lp)
2310 abi_ulong pa;
2311 int i, rc;
2313 if (*lp == NULL) {
2314 return walk_memory_regions_end(data, base, 0);
2317 if (level == 0) {
2318 PageDesc *pd = *lp;
2319 for (i = 0; i < L2_SIZE; ++i) {
2320 int prot = pd[i].flags;
2322 pa = base | (i << TARGET_PAGE_BITS);
2323 if (prot != data->prot) {
2324 rc = walk_memory_regions_end(data, pa, prot);
2325 if (rc != 0) {
2326 return rc;
2330 } else {
2331 void **pp = *lp;
2332 for (i = 0; i < L2_SIZE; ++i) {
2333 pa = base | ((abi_ulong)i <<
2334 (TARGET_PAGE_BITS + L2_BITS * level));
2335 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2336 if (rc != 0) {
2337 return rc;
2342 return 0;
2345 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2347 struct walk_memory_regions_data data;
2348 unsigned long i;
2350 data.fn = fn;
2351 data.priv = priv;
2352 data.start = -1ul;
2353 data.prot = 0;
2355 for (i = 0; i < V_L1_SIZE; i++) {
2356 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2357 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2358 if (rc != 0) {
2359 return rc;
2363 return walk_memory_regions_end(&data, 0, 0);
2366 static int dump_region(void *priv, abi_ulong start,
2367 abi_ulong end, unsigned long prot)
2369 FILE *f = (FILE *)priv;
2371 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2372 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2373 start, end, end - start,
2374 ((prot & PAGE_READ) ? 'r' : '-'),
2375 ((prot & PAGE_WRITE) ? 'w' : '-'),
2376 ((prot & PAGE_EXEC) ? 'x' : '-'));
2378 return (0);
2381 /* dump memory mappings */
2382 void page_dump(FILE *f)
2384 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2385 "start", "end", "size", "prot");
2386 walk_memory_regions(f, dump_region);
2389 int page_get_flags(target_ulong address)
2391 PageDesc *p;
2393 p = page_find(address >> TARGET_PAGE_BITS);
2394 if (!p)
2395 return 0;
2396 return p->flags;
2399 /* Modify the flags of a page and invalidate the code if necessary.
2400 The flag PAGE_WRITE_ORG is positioned automatically depending
2401 on PAGE_WRITE. The mmap_lock should already be held. */
2402 void page_set_flags(target_ulong start, target_ulong end, int flags)
2404 target_ulong addr, len;
2406 /* This function should never be called with addresses outside the
2407 guest address space. If this assert fires, it probably indicates
2408 a missing call to h2g_valid. */
2409 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2410 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2411 #endif
2412 assert(start < end);
2414 start = start & TARGET_PAGE_MASK;
2415 end = TARGET_PAGE_ALIGN(end);
2417 if (flags & PAGE_WRITE) {
2418 flags |= PAGE_WRITE_ORG;
2421 for (addr = start, len = end - start;
2422 len != 0;
2423 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2424 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2426 /* If the write protection bit is set, then we invalidate
2427 the code inside. */
2428 if (!(p->flags & PAGE_WRITE) &&
2429 (flags & PAGE_WRITE) &&
2430 p->first_tb) {
2431 tb_invalidate_phys_page(addr, 0, NULL);
2433 p->flags = flags;
2437 int page_check_range(target_ulong start, target_ulong len, int flags)
2439 PageDesc *p;
2440 target_ulong end;
2441 target_ulong addr;
2443 /* This function should never be called with addresses outside the
2444 guest address space. If this assert fires, it probably indicates
2445 a missing call to h2g_valid. */
2446 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2447 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2448 #endif
2450 if (len == 0) {
2451 return 0;
2453 if (start + len - 1 < start) {
2454 /* We've wrapped around. */
2455 return -1;
2458 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2459 start = start & TARGET_PAGE_MASK;
2461 for (addr = start, len = end - start;
2462 len != 0;
2463 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2464 p = page_find(addr >> TARGET_PAGE_BITS);
2465 if( !p )
2466 return -1;
2467 if( !(p->flags & PAGE_VALID) )
2468 return -1;
2470 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2471 return -1;
2472 if (flags & PAGE_WRITE) {
2473 if (!(p->flags & PAGE_WRITE_ORG))
2474 return -1;
2475 /* unprotect the page if it was put read-only because it
2476 contains translated code */
2477 if (!(p->flags & PAGE_WRITE)) {
2478 if (!page_unprotect(addr, 0, NULL))
2479 return -1;
2481 return 0;
2484 return 0;
2487 /* called from signal handler: invalidate the code and unprotect the
2488 page. Return TRUE if the fault was successfully handled. */
2489 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2491 unsigned int prot;
2492 PageDesc *p;
2493 target_ulong host_start, host_end, addr;
2495 /* Technically this isn't safe inside a signal handler. However we
2496 know this only ever happens in a synchronous SEGV handler, so in
2497 practice it seems to be ok. */
2498 mmap_lock();
2500 p = page_find(address >> TARGET_PAGE_BITS);
2501 if (!p) {
2502 mmap_unlock();
2503 return 0;
2506 /* if the page was really writable, then we change its
2507 protection back to writable */
2508 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2509 host_start = address & qemu_host_page_mask;
2510 host_end = host_start + qemu_host_page_size;
2512 prot = 0;
2513 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2514 p = page_find(addr >> TARGET_PAGE_BITS);
2515 p->flags |= PAGE_WRITE;
2516 prot |= p->flags;
2518 /* and since the content will be modified, we must invalidate
2519 the corresponding translated code. */
2520 tb_invalidate_phys_page(addr, pc, puc);
2521 #ifdef DEBUG_TB_CHECK
2522 tb_invalidate_check(addr);
2523 #endif
2525 mprotect((void *)g2h(host_start), qemu_host_page_size,
2526 prot & PAGE_BITS);
2528 mmap_unlock();
2529 return 1;
2531 mmap_unlock();
2532 return 0;
2535 static inline void tlb_set_dirty(CPUState *env,
2536 unsigned long addr, target_ulong vaddr)
2539 #endif /* defined(CONFIG_USER_ONLY) */
2541 #if !defined(CONFIG_USER_ONLY)
2543 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2544 typedef struct subpage_t {
2545 target_phys_addr_t base;
2546 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2547 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2548 } subpage_t;
2550 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2551 ram_addr_t memory, ram_addr_t region_offset);
2552 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2553 ram_addr_t orig_memory,
2554 ram_addr_t region_offset);
2555 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2556 need_subpage) \
2557 do { \
2558 if (addr > start_addr) \
2559 start_addr2 = 0; \
2560 else { \
2561 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2562 if (start_addr2 > 0) \
2563 need_subpage = 1; \
2566 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2567 end_addr2 = TARGET_PAGE_SIZE - 1; \
2568 else { \
2569 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2570 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2571 need_subpage = 1; \
2573 } while (0)
2575 /* register physical memory.
2576 For RAM, 'size' must be a multiple of the target page size.
2577 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2578 io memory page. The address used when calling the IO function is
2579 the offset from the start of the region, plus region_offset. Both
2580 start_addr and region_offset are rounded down to a page boundary
2581 before calculating this offset. This should not be a problem unless
2582 the low bits of start_addr and region_offset differ. */
2583 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2584 ram_addr_t size,
2585 ram_addr_t phys_offset,
2586 ram_addr_t region_offset)
2588 target_phys_addr_t addr, end_addr;
2589 PhysPageDesc *p;
2590 CPUState *env;
2591 ram_addr_t orig_size = size;
2592 subpage_t *subpage;
2594 cpu_notify_set_memory(start_addr, size, phys_offset);
2596 if (phys_offset == IO_MEM_UNASSIGNED) {
2597 region_offset = start_addr;
2599 region_offset &= TARGET_PAGE_MASK;
2600 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2601 end_addr = start_addr + (target_phys_addr_t)size;
2602 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2603 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2604 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2605 ram_addr_t orig_memory = p->phys_offset;
2606 target_phys_addr_t start_addr2, end_addr2;
2607 int need_subpage = 0;
2609 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2610 need_subpage);
2611 if (need_subpage) {
2612 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2613 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2614 &p->phys_offset, orig_memory,
2615 p->region_offset);
2616 } else {
2617 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2618 >> IO_MEM_SHIFT];
2620 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2621 region_offset);
2622 p->region_offset = 0;
2623 } else {
2624 p->phys_offset = phys_offset;
2625 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2626 (phys_offset & IO_MEM_ROMD))
2627 phys_offset += TARGET_PAGE_SIZE;
2629 } else {
2630 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2631 p->phys_offset = phys_offset;
2632 p->region_offset = region_offset;
2633 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2634 (phys_offset & IO_MEM_ROMD)) {
2635 phys_offset += TARGET_PAGE_SIZE;
2636 } else {
2637 target_phys_addr_t start_addr2, end_addr2;
2638 int need_subpage = 0;
2640 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2641 end_addr2, need_subpage);
2643 if (need_subpage) {
2644 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2645 &p->phys_offset, IO_MEM_UNASSIGNED,
2646 addr & TARGET_PAGE_MASK);
2647 subpage_register(subpage, start_addr2, end_addr2,
2648 phys_offset, region_offset);
2649 p->region_offset = 0;
2653 region_offset += TARGET_PAGE_SIZE;
2656 /* since each CPU stores ram addresses in its TLB cache, we must
2657 reset the modified entries */
2658 /* XXX: slow ! */
2659 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2660 tlb_flush(env, 1);
2664 /* XXX: temporary until new memory mapping API */
2665 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2667 PhysPageDesc *p;
2669 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2670 if (!p)
2671 return IO_MEM_UNASSIGNED;
2672 return p->phys_offset;
2675 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2677 if (kvm_enabled())
2678 kvm_coalesce_mmio_region(addr, size);
2681 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2683 if (kvm_enabled())
2684 kvm_uncoalesce_mmio_region(addr, size);
2687 void qemu_flush_coalesced_mmio_buffer(void)
2689 if (kvm_enabled())
2690 kvm_flush_coalesced_mmio_buffer();
2693 #if defined(__linux__) && !defined(TARGET_S390X)
2695 #include <sys/vfs.h>
2697 #define HUGETLBFS_MAGIC 0x958458f6
2699 static long gethugepagesize(const char *path)
2701 struct statfs fs;
2702 int ret;
2704 do {
2705 ret = statfs(path, &fs);
2706 } while (ret != 0 && errno == EINTR);
2708 if (ret != 0) {
2709 perror(path);
2710 return 0;
2713 if (fs.f_type != HUGETLBFS_MAGIC)
2714 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2716 return fs.f_bsize;
2719 static void *file_ram_alloc(ram_addr_t memory, const char *path)
2721 char *filename;
2722 void *area;
2723 int fd;
2724 #ifdef MAP_POPULATE
2725 int flags;
2726 #endif
2727 unsigned long hpagesize;
2729 hpagesize = gethugepagesize(path);
2730 if (!hpagesize) {
2731 return NULL;
2734 if (memory < hpagesize) {
2735 return NULL;
2738 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2739 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2740 return NULL;
2743 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2744 return NULL;
2747 fd = mkstemp(filename);
2748 if (fd < 0) {
2749 perror("unable to create backing store for hugepages");
2750 free(filename);
2751 return NULL;
2753 unlink(filename);
2754 free(filename);
2756 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2759 * ftruncate is not supported by hugetlbfs in older
2760 * hosts, so don't bother bailing out on errors.
2761 * If anything goes wrong with it under other filesystems,
2762 * mmap will fail.
2764 if (ftruncate(fd, memory))
2765 perror("ftruncate");
2767 #ifdef MAP_POPULATE
2768 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2769 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2770 * to sidestep this quirk.
2772 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2773 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2774 #else
2775 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2776 #endif
2777 if (area == MAP_FAILED) {
2778 perror("file_ram_alloc: can't mmap RAM pages");
2779 close(fd);
2780 return (NULL);
2782 return area;
2784 #endif
2786 static ram_addr_t find_ram_offset(ram_addr_t size)
2788 RAMBlock *block;
2789 ram_addr_t last = 0;
2791 QLIST_FOREACH(block, &ram_list.blocks, next)
2792 last = MAX(last, block->offset + block->length);
2794 return last;
2797 ram_addr_t qemu_ram_map(ram_addr_t size, void *host)
2799 RAMBlock *new_block;
2801 size = TARGET_PAGE_ALIGN(size);
2802 new_block = qemu_malloc(sizeof(*new_block));
2804 new_block->host = host;
2806 new_block->offset = find_ram_offset(size);
2807 new_block->length = size;
2809 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2811 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2812 (new_block->offset + size) >> TARGET_PAGE_BITS);
2813 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2814 0xff, size >> TARGET_PAGE_BITS);
2816 if (kvm_enabled())
2817 kvm_setup_guest_memory(new_block->host, size);
2819 return new_block->offset;
2822 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2824 RAMBlock *new_block;
2826 size = TARGET_PAGE_ALIGN(size);
2827 new_block = qemu_malloc(sizeof(*new_block));
2829 if (mem_path) {
2830 #if defined (__linux__) && !defined(TARGET_S390X)
2831 new_block->host = file_ram_alloc(size, mem_path);
2832 if (!new_block->host) {
2833 new_block->host = qemu_vmalloc(size);
2834 #ifdef MADV_MERGEABLE
2835 madvise(new_block->host, size, MADV_MERGEABLE);
2836 #endif
2838 #else
2839 fprintf(stderr, "-mem-path option unsupported\n");
2840 exit(1);
2841 #endif
2842 } else {
2843 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2844 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2845 new_block->host = mmap((void*)0x1000000, size,
2846 PROT_EXEC|PROT_READ|PROT_WRITE,
2847 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2848 #else
2849 new_block->host = qemu_vmalloc(size);
2850 #endif
2851 #ifdef MADV_MERGEABLE
2852 madvise(new_block->host, size, MADV_MERGEABLE);
2853 #endif
2855 new_block->offset = find_ram_offset(size);
2856 new_block->length = size;
2858 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2860 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2861 (new_block->offset + size) >> TARGET_PAGE_BITS);
2862 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2863 0xff, size >> TARGET_PAGE_BITS);
2865 if (kvm_enabled())
2866 kvm_setup_guest_memory(new_block->host, size);
2868 return new_block->offset;
2871 void qemu_ram_free(ram_addr_t addr)
2873 /* TODO: implement this. */
2876 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2877 With the exception of the softmmu code in this file, this should
2878 only be used for local memory (e.g. video ram) that the device owns,
2879 and knows it isn't going to access beyond the end of the block.
2881 It should not be used for general purpose DMA.
2882 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2884 void *qemu_get_ram_ptr(ram_addr_t addr)
2886 RAMBlock *block;
2888 QLIST_FOREACH(block, &ram_list.blocks, next) {
2889 if (addr - block->offset < block->length) {
2890 QLIST_REMOVE(block, next);
2891 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2892 return block->host + (addr - block->offset);
2896 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2897 abort();
2899 return NULL;
2902 int do_qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2904 RAMBlock *block;
2905 uint8_t *host = ptr;
2907 QLIST_FOREACH(block, &ram_list.blocks, next) {
2908 if (host - block->host < block->length) {
2909 *ram_addr = block->offset + (host - block->host);
2910 return 0;
2913 return -1;
2916 /* Some of the softmmu routines need to translate from a host pointer
2917 (typically a TLB entry) back to a ram offset. */
2918 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2920 ram_addr_t ram_addr;
2922 if (do_qemu_ram_addr_from_host(ptr, &ram_addr)) {
2923 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2924 abort();
2926 return ram_addr;
2929 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2931 #ifdef DEBUG_UNASSIGNED
2932 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2933 #endif
2934 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2935 do_unassigned_access(addr, 0, 0, 0, 1);
2936 #endif
2937 return 0;
2940 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2942 #ifdef DEBUG_UNASSIGNED
2943 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2944 #endif
2945 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2946 do_unassigned_access(addr, 0, 0, 0, 2);
2947 #endif
2948 return 0;
2951 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2953 #ifdef DEBUG_UNASSIGNED
2954 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2955 #endif
2956 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2957 do_unassigned_access(addr, 0, 0, 0, 4);
2958 #endif
2959 return 0;
2962 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2964 #ifdef DEBUG_UNASSIGNED
2965 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2966 #endif
2967 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2968 do_unassigned_access(addr, 1, 0, 0, 1);
2969 #endif
2972 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2974 #ifdef DEBUG_UNASSIGNED
2975 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2976 #endif
2977 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2978 do_unassigned_access(addr, 1, 0, 0, 2);
2979 #endif
2982 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2984 #ifdef DEBUG_UNASSIGNED
2985 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2986 #endif
2987 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2988 do_unassigned_access(addr, 1, 0, 0, 4);
2989 #endif
2992 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2993 unassigned_mem_readb,
2994 unassigned_mem_readw,
2995 unassigned_mem_readl,
2998 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2999 unassigned_mem_writeb,
3000 unassigned_mem_writew,
3001 unassigned_mem_writel,
3004 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3005 uint32_t val)
3007 int dirty_flags;
3008 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3009 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3010 #if !defined(CONFIG_USER_ONLY)
3011 tb_invalidate_phys_page_fast(ram_addr, 1);
3012 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3013 #endif
3015 stb_p(qemu_get_ram_ptr(ram_addr), val);
3016 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3017 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3018 /* we remove the notdirty callback only if the code has been
3019 flushed */
3020 if (dirty_flags == 0xff)
3021 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3024 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3025 uint32_t val)
3027 int dirty_flags;
3028 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3029 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3030 #if !defined(CONFIG_USER_ONLY)
3031 tb_invalidate_phys_page_fast(ram_addr, 2);
3032 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3033 #endif
3035 stw_p(qemu_get_ram_ptr(ram_addr), val);
3036 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3037 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3038 /* we remove the notdirty callback only if the code has been
3039 flushed */
3040 if (dirty_flags == 0xff)
3041 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3044 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3045 uint32_t val)
3047 int dirty_flags;
3048 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3049 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3050 #if !defined(CONFIG_USER_ONLY)
3051 tb_invalidate_phys_page_fast(ram_addr, 4);
3052 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3053 #endif
3055 stl_p(qemu_get_ram_ptr(ram_addr), val);
3056 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3057 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3058 /* we remove the notdirty callback only if the code has been
3059 flushed */
3060 if (dirty_flags == 0xff)
3061 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3064 static CPUReadMemoryFunc * const error_mem_read[3] = {
3065 NULL, /* never used */
3066 NULL, /* never used */
3067 NULL, /* never used */
3070 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3071 notdirty_mem_writeb,
3072 notdirty_mem_writew,
3073 notdirty_mem_writel,
3076 /* Generate a debug exception if a watchpoint has been hit. */
3077 static void check_watchpoint(int offset, int len_mask, int flags)
3079 CPUState *env = cpu_single_env;
3080 target_ulong pc, cs_base;
3081 TranslationBlock *tb;
3082 target_ulong vaddr;
3083 CPUWatchpoint *wp;
3084 int cpu_flags;
3086 if (env->watchpoint_hit) {
3087 /* We re-entered the check after replacing the TB. Now raise
3088 * the debug interrupt so that is will trigger after the
3089 * current instruction. */
3090 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3091 return;
3093 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3094 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3095 if ((vaddr == (wp->vaddr & len_mask) ||
3096 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3097 wp->flags |= BP_WATCHPOINT_HIT;
3098 if (!env->watchpoint_hit) {
3099 env->watchpoint_hit = wp;
3100 tb = tb_find_pc(env->mem_io_pc);
3101 if (!tb) {
3102 cpu_abort(env, "check_watchpoint: could not find TB for "
3103 "pc=%p", (void *)env->mem_io_pc);
3105 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3106 tb_phys_invalidate(tb, -1);
3107 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3108 env->exception_index = EXCP_DEBUG;
3109 } else {
3110 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3111 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3113 cpu_resume_from_signal(env, NULL);
3115 } else {
3116 wp->flags &= ~BP_WATCHPOINT_HIT;
3121 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3122 so these check for a hit then pass through to the normal out-of-line
3123 phys routines. */
3124 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3126 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3127 return ldub_phys(addr);
3130 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3132 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3133 return lduw_phys(addr);
3136 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3138 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3139 return ldl_phys(addr);
3142 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3143 uint32_t val)
3145 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3146 stb_phys(addr, val);
3149 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3150 uint32_t val)
3152 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3153 stw_phys(addr, val);
3156 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3157 uint32_t val)
3159 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3160 stl_phys(addr, val);
3163 static CPUReadMemoryFunc * const watch_mem_read[3] = {
3164 watch_mem_readb,
3165 watch_mem_readw,
3166 watch_mem_readl,
3169 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3170 watch_mem_writeb,
3171 watch_mem_writew,
3172 watch_mem_writel,
3175 static inline uint32_t subpage_readlen (subpage_t *mmio,
3176 target_phys_addr_t addr,
3177 unsigned int len)
3179 unsigned int idx = SUBPAGE_IDX(addr);
3180 #if defined(DEBUG_SUBPAGE)
3181 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3182 mmio, len, addr, idx);
3183 #endif
3185 addr += mmio->region_offset[idx];
3186 idx = mmio->sub_io_index[idx];
3187 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3190 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3191 uint32_t value, unsigned int len)
3193 unsigned int idx = SUBPAGE_IDX(addr);
3194 #if defined(DEBUG_SUBPAGE)
3195 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3196 __func__, mmio, len, addr, idx, value);
3197 #endif
3199 addr += mmio->region_offset[idx];
3200 idx = mmio->sub_io_index[idx];
3201 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3204 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3206 return subpage_readlen(opaque, addr, 0);
3209 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3210 uint32_t value)
3212 subpage_writelen(opaque, addr, value, 0);
3215 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3217 return subpage_readlen(opaque, addr, 1);
3220 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3221 uint32_t value)
3223 subpage_writelen(opaque, addr, value, 1);
3226 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3228 return subpage_readlen(opaque, addr, 2);
3231 static void subpage_writel (void *opaque, target_phys_addr_t addr,
3232 uint32_t value)
3234 subpage_writelen(opaque, addr, value, 2);
3237 static CPUReadMemoryFunc * const subpage_read[] = {
3238 &subpage_readb,
3239 &subpage_readw,
3240 &subpage_readl,
3243 static CPUWriteMemoryFunc * const subpage_write[] = {
3244 &subpage_writeb,
3245 &subpage_writew,
3246 &subpage_writel,
3249 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3250 ram_addr_t memory, ram_addr_t region_offset)
3252 int idx, eidx;
3254 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3255 return -1;
3256 idx = SUBPAGE_IDX(start);
3257 eidx = SUBPAGE_IDX(end);
3258 #if defined(DEBUG_SUBPAGE)
3259 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3260 mmio, start, end, idx, eidx, memory);
3261 #endif
3262 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3263 for (; idx <= eidx; idx++) {
3264 mmio->sub_io_index[idx] = memory;
3265 mmio->region_offset[idx] = region_offset;
3268 return 0;
3271 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3272 ram_addr_t orig_memory,
3273 ram_addr_t region_offset)
3275 subpage_t *mmio;
3276 int subpage_memory;
3278 mmio = qemu_mallocz(sizeof(subpage_t));
3280 mmio->base = base;
3281 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3282 #if defined(DEBUG_SUBPAGE)
3283 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3284 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3285 #endif
3286 *phys = subpage_memory | IO_MEM_SUBPAGE;
3287 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3289 return mmio;
3292 static int get_free_io_mem_idx(void)
3294 int i;
3296 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3297 if (!io_mem_used[i]) {
3298 io_mem_used[i] = 1;
3299 return i;
3301 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3302 return -1;
3305 /* mem_read and mem_write are arrays of functions containing the
3306 function to access byte (index 0), word (index 1) and dword (index
3307 2). Functions can be omitted with a NULL function pointer.
3308 If io_index is non zero, the corresponding io zone is
3309 modified. If it is zero, a new io zone is allocated. The return
3310 value can be used with cpu_register_physical_memory(). (-1) is
3311 returned if error. */
3312 static int cpu_register_io_memory_fixed(int io_index,
3313 CPUReadMemoryFunc * const *mem_read,
3314 CPUWriteMemoryFunc * const *mem_write,
3315 void *opaque)
3317 int i;
3319 if (io_index <= 0) {
3320 io_index = get_free_io_mem_idx();
3321 if (io_index == -1)
3322 return io_index;
3323 } else {
3324 io_index >>= IO_MEM_SHIFT;
3325 if (io_index >= IO_MEM_NB_ENTRIES)
3326 return -1;
3329 for (i = 0; i < 3; ++i) {
3330 io_mem_read[io_index][i]
3331 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3333 for (i = 0; i < 3; ++i) {
3334 io_mem_write[io_index][i]
3335 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3337 io_mem_opaque[io_index] = opaque;
3339 return (io_index << IO_MEM_SHIFT);
3342 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3343 CPUWriteMemoryFunc * const *mem_write,
3344 void *opaque)
3346 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3349 void cpu_unregister_io_memory(int io_table_address)
3351 int i;
3352 int io_index = io_table_address >> IO_MEM_SHIFT;
3354 for (i=0;i < 3; i++) {
3355 io_mem_read[io_index][i] = unassigned_mem_read[i];
3356 io_mem_write[io_index][i] = unassigned_mem_write[i];
3358 io_mem_opaque[io_index] = NULL;
3359 io_mem_used[io_index] = 0;
3362 static void io_mem_init(void)
3364 int i;
3366 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3367 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3368 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3369 for (i=0; i<5; i++)
3370 io_mem_used[i] = 1;
3372 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3373 watch_mem_write, NULL);
3376 #endif /* !defined(CONFIG_USER_ONLY) */
3378 /* physical memory access (slow version, mainly for debug) */
3379 #if defined(CONFIG_USER_ONLY)
3380 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3381 uint8_t *buf, int len, int is_write)
3383 int l, flags;
3384 target_ulong page;
3385 void * p;
3387 while (len > 0) {
3388 page = addr & TARGET_PAGE_MASK;
3389 l = (page + TARGET_PAGE_SIZE) - addr;
3390 if (l > len)
3391 l = len;
3392 flags = page_get_flags(page);
3393 if (!(flags & PAGE_VALID))
3394 return -1;
3395 if (is_write) {
3396 if (!(flags & PAGE_WRITE))
3397 return -1;
3398 /* XXX: this code should not depend on lock_user */
3399 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3400 return -1;
3401 memcpy(p, buf, l);
3402 unlock_user(p, addr, l);
3403 } else {
3404 if (!(flags & PAGE_READ))
3405 return -1;
3406 /* XXX: this code should not depend on lock_user */
3407 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3408 return -1;
3409 memcpy(buf, p, l);
3410 unlock_user(p, addr, 0);
3412 len -= l;
3413 buf += l;
3414 addr += l;
3416 return 0;
3419 #else
3420 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3421 int len, int is_write)
3423 int l, io_index;
3424 uint8_t *ptr;
3425 uint32_t val;
3426 target_phys_addr_t page;
3427 unsigned long pd;
3428 PhysPageDesc *p;
3430 while (len > 0) {
3431 page = addr & TARGET_PAGE_MASK;
3432 l = (page + TARGET_PAGE_SIZE) - addr;
3433 if (l > len)
3434 l = len;
3435 p = phys_page_find(page >> TARGET_PAGE_BITS);
3436 if (!p) {
3437 pd = IO_MEM_UNASSIGNED;
3438 } else {
3439 pd = p->phys_offset;
3442 if (is_write) {
3443 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3444 target_phys_addr_t addr1 = addr;
3445 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3446 if (p)
3447 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3448 /* XXX: could force cpu_single_env to NULL to avoid
3449 potential bugs */
3450 if (l >= 4 && ((addr1 & 3) == 0)) {
3451 /* 32 bit write access */
3452 val = ldl_p(buf);
3453 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3454 l = 4;
3455 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3456 /* 16 bit write access */
3457 val = lduw_p(buf);
3458 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3459 l = 2;
3460 } else {
3461 /* 8 bit write access */
3462 val = ldub_p(buf);
3463 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3464 l = 1;
3466 } else {
3467 unsigned long addr1;
3468 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3469 /* RAM case */
3470 ptr = qemu_get_ram_ptr(addr1);
3471 memcpy(ptr, buf, l);
3472 if (!cpu_physical_memory_is_dirty(addr1)) {
3473 /* invalidate code */
3474 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3475 /* set dirty bit */
3476 cpu_physical_memory_set_dirty_flags(
3477 addr1, (0xff & ~CODE_DIRTY_FLAG));
3479 /* qemu doesn't execute guest code directly, but kvm does
3480 therefore flush instruction caches */
3481 if (kvm_enabled())
3482 flush_icache_range((unsigned long)ptr,
3483 ((unsigned long)ptr)+l);
3485 } else {
3486 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3487 !(pd & IO_MEM_ROMD)) {
3488 target_phys_addr_t addr1 = addr;
3489 /* I/O case */
3490 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3491 if (p)
3492 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3493 if (l >= 4 && ((addr1 & 3) == 0)) {
3494 /* 32 bit read access */
3495 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3496 stl_p(buf, val);
3497 l = 4;
3498 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3499 /* 16 bit read access */
3500 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3501 stw_p(buf, val);
3502 l = 2;
3503 } else {
3504 /* 8 bit read access */
3505 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3506 stb_p(buf, val);
3507 l = 1;
3509 } else {
3510 /* RAM case */
3511 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3512 (addr & ~TARGET_PAGE_MASK);
3513 memcpy(buf, ptr, l);
3516 len -= l;
3517 buf += l;
3518 addr += l;
3522 /* used for ROM loading : can write in RAM and ROM */
3523 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3524 const uint8_t *buf, int len)
3526 int l;
3527 uint8_t *ptr;
3528 target_phys_addr_t page;
3529 unsigned long pd;
3530 PhysPageDesc *p;
3532 while (len > 0) {
3533 page = addr & TARGET_PAGE_MASK;
3534 l = (page + TARGET_PAGE_SIZE) - addr;
3535 if (l > len)
3536 l = len;
3537 p = phys_page_find(page >> TARGET_PAGE_BITS);
3538 if (!p) {
3539 pd = IO_MEM_UNASSIGNED;
3540 } else {
3541 pd = p->phys_offset;
3544 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3545 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3546 !(pd & IO_MEM_ROMD)) {
3547 /* do nothing */
3548 } else {
3549 unsigned long addr1;
3550 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3551 /* ROM/RAM case */
3552 ptr = qemu_get_ram_ptr(addr1);
3553 memcpy(ptr, buf, l);
3555 len -= l;
3556 buf += l;
3557 addr += l;
3561 typedef struct {
3562 void *buffer;
3563 target_phys_addr_t addr;
3564 target_phys_addr_t len;
3565 } BounceBuffer;
3567 static BounceBuffer bounce;
3569 typedef struct MapClient {
3570 void *opaque;
3571 void (*callback)(void *opaque);
3572 QLIST_ENTRY(MapClient) link;
3573 } MapClient;
3575 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3576 = QLIST_HEAD_INITIALIZER(map_client_list);
3578 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3580 MapClient *client = qemu_malloc(sizeof(*client));
3582 client->opaque = opaque;
3583 client->callback = callback;
3584 QLIST_INSERT_HEAD(&map_client_list, client, link);
3585 return client;
3588 void cpu_unregister_map_client(void *_client)
3590 MapClient *client = (MapClient *)_client;
3592 QLIST_REMOVE(client, link);
3593 qemu_free(client);
3596 static void cpu_notify_map_clients(void)
3598 MapClient *client;
3600 while (!QLIST_EMPTY(&map_client_list)) {
3601 client = QLIST_FIRST(&map_client_list);
3602 client->callback(client->opaque);
3603 cpu_unregister_map_client(client);
3607 /* Map a physical memory region into a host virtual address.
3608 * May map a subset of the requested range, given by and returned in *plen.
3609 * May return NULL if resources needed to perform the mapping are exhausted.
3610 * Use only for reads OR writes - not for read-modify-write operations.
3611 * Use cpu_register_map_client() to know when retrying the map operation is
3612 * likely to succeed.
3614 void *cpu_physical_memory_map(target_phys_addr_t addr,
3615 target_phys_addr_t *plen,
3616 int is_write)
3618 target_phys_addr_t len = *plen;
3619 target_phys_addr_t done = 0;
3620 int l;
3621 uint8_t *ret = NULL;
3622 uint8_t *ptr;
3623 target_phys_addr_t page;
3624 unsigned long pd;
3625 PhysPageDesc *p;
3626 unsigned long addr1;
3628 while (len > 0) {
3629 page = addr & TARGET_PAGE_MASK;
3630 l = (page + TARGET_PAGE_SIZE) - addr;
3631 if (l > len)
3632 l = len;
3633 p = phys_page_find(page >> TARGET_PAGE_BITS);
3634 if (!p) {
3635 pd = IO_MEM_UNASSIGNED;
3636 } else {
3637 pd = p->phys_offset;
3640 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3641 if (done || bounce.buffer) {
3642 break;
3644 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3645 bounce.addr = addr;
3646 bounce.len = l;
3647 if (!is_write) {
3648 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3650 ptr = bounce.buffer;
3651 } else {
3652 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3653 ptr = qemu_get_ram_ptr(addr1);
3655 if (!done) {
3656 ret = ptr;
3657 } else if (ret + done != ptr) {
3658 break;
3661 len -= l;
3662 addr += l;
3663 done += l;
3665 *plen = done;
3666 return ret;
3669 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3670 * Will also mark the memory as dirty if is_write == 1. access_len gives
3671 * the amount of memory that was actually read or written by the caller.
3673 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3674 int is_write, target_phys_addr_t access_len)
3676 unsigned long flush_len = (unsigned long)access_len;
3678 if (buffer != bounce.buffer) {
3679 if (is_write) {
3680 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3681 while (access_len) {
3682 unsigned l;
3683 l = TARGET_PAGE_SIZE;
3684 if (l > access_len)
3685 l = access_len;
3686 if (!cpu_physical_memory_is_dirty(addr1)) {
3687 /* invalidate code */
3688 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3689 /* set dirty bit */
3690 cpu_physical_memory_set_dirty_flags(
3691 addr1, (0xff & ~CODE_DIRTY_FLAG));
3693 addr1 += l;
3694 access_len -= l;
3696 dma_flush_range((unsigned long)buffer,
3697 (unsigned long)buffer + flush_len);
3699 return;
3701 if (is_write) {
3702 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3704 qemu_vfree(bounce.buffer);
3705 bounce.buffer = NULL;
3706 cpu_notify_map_clients();
3709 /* warning: addr must be aligned */
3710 uint32_t ldl_phys(target_phys_addr_t addr)
3712 int io_index;
3713 uint8_t *ptr;
3714 uint32_t val;
3715 unsigned long pd;
3716 PhysPageDesc *p;
3718 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3719 if (!p) {
3720 pd = IO_MEM_UNASSIGNED;
3721 } else {
3722 pd = p->phys_offset;
3725 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3726 !(pd & IO_MEM_ROMD)) {
3727 /* I/O case */
3728 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3729 if (p)
3730 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3731 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3732 } else {
3733 /* RAM case */
3734 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3735 (addr & ~TARGET_PAGE_MASK);
3736 val = ldl_p(ptr);
3738 return val;
3741 /* warning: addr must be aligned */
3742 uint64_t ldq_phys(target_phys_addr_t addr)
3744 int io_index;
3745 uint8_t *ptr;
3746 uint64_t val;
3747 unsigned long pd;
3748 PhysPageDesc *p;
3750 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3751 if (!p) {
3752 pd = IO_MEM_UNASSIGNED;
3753 } else {
3754 pd = p->phys_offset;
3757 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3758 !(pd & IO_MEM_ROMD)) {
3759 /* I/O case */
3760 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3761 if (p)
3762 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3763 #ifdef TARGET_WORDS_BIGENDIAN
3764 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3765 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3766 #else
3767 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3768 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3769 #endif
3770 } else {
3771 /* RAM case */
3772 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3773 (addr & ~TARGET_PAGE_MASK);
3774 val = ldq_p(ptr);
3776 return val;
3779 /* XXX: optimize */
3780 uint32_t ldub_phys(target_phys_addr_t addr)
3782 uint8_t val;
3783 cpu_physical_memory_read(addr, &val, 1);
3784 return val;
3787 /* warning: addr must be aligned */
3788 uint32_t lduw_phys(target_phys_addr_t addr)
3790 int io_index;
3791 uint8_t *ptr;
3792 uint64_t val;
3793 unsigned long pd;
3794 PhysPageDesc *p;
3796 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3797 if (!p) {
3798 pd = IO_MEM_UNASSIGNED;
3799 } else {
3800 pd = p->phys_offset;
3803 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3804 !(pd & IO_MEM_ROMD)) {
3805 /* I/O case */
3806 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3807 if (p)
3808 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3809 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3810 } else {
3811 /* RAM case */
3812 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3813 (addr & ~TARGET_PAGE_MASK);
3814 val = lduw_p(ptr);
3816 return val;
3819 /* warning: addr must be aligned. The ram page is not masked as dirty
3820 and the code inside is not invalidated. It is useful if the dirty
3821 bits are used to track modified PTEs */
3822 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3824 int io_index;
3825 uint8_t *ptr;
3826 unsigned long pd;
3827 PhysPageDesc *p;
3829 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3830 if (!p) {
3831 pd = IO_MEM_UNASSIGNED;
3832 } else {
3833 pd = p->phys_offset;
3836 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3837 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3838 if (p)
3839 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3840 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3841 } else {
3842 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3843 ptr = qemu_get_ram_ptr(addr1);
3844 stl_p(ptr, val);
3846 if (unlikely(in_migration)) {
3847 if (!cpu_physical_memory_is_dirty(addr1)) {
3848 /* invalidate code */
3849 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3850 /* set dirty bit */
3851 cpu_physical_memory_set_dirty_flags(
3852 addr1, (0xff & ~CODE_DIRTY_FLAG));
3858 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3860 int io_index;
3861 uint8_t *ptr;
3862 unsigned long pd;
3863 PhysPageDesc *p;
3865 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3866 if (!p) {
3867 pd = IO_MEM_UNASSIGNED;
3868 } else {
3869 pd = p->phys_offset;
3872 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3873 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3874 if (p)
3875 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3876 #ifdef TARGET_WORDS_BIGENDIAN
3877 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3878 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3879 #else
3880 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3881 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3882 #endif
3883 } else {
3884 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3885 (addr & ~TARGET_PAGE_MASK);
3886 stq_p(ptr, val);
3890 /* warning: addr must be aligned */
3891 void stl_phys(target_phys_addr_t addr, uint32_t val)
3893 int io_index;
3894 uint8_t *ptr;
3895 unsigned long pd;
3896 PhysPageDesc *p;
3898 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3899 if (!p) {
3900 pd = IO_MEM_UNASSIGNED;
3901 } else {
3902 pd = p->phys_offset;
3905 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3906 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3907 if (p)
3908 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3909 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3910 } else {
3911 unsigned long addr1;
3912 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3913 /* RAM case */
3914 ptr = qemu_get_ram_ptr(addr1);
3915 stl_p(ptr, val);
3916 if (!cpu_physical_memory_is_dirty(addr1)) {
3917 /* invalidate code */
3918 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3919 /* set dirty bit */
3920 cpu_physical_memory_set_dirty_flags(addr1,
3921 (0xff & ~CODE_DIRTY_FLAG));
3926 /* XXX: optimize */
3927 void stb_phys(target_phys_addr_t addr, uint32_t val)
3929 uint8_t v = val;
3930 cpu_physical_memory_write(addr, &v, 1);
3933 /* warning: addr must be aligned */
3934 void stw_phys(target_phys_addr_t addr, uint32_t val)
3936 int io_index;
3937 uint8_t *ptr;
3938 unsigned long pd;
3939 PhysPageDesc *p;
3941 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3942 if (!p) {
3943 pd = IO_MEM_UNASSIGNED;
3944 } else {
3945 pd = p->phys_offset;
3948 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3949 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3950 if (p)
3951 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3952 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3953 } else {
3954 unsigned long addr1;
3955 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3956 /* RAM case */
3957 ptr = qemu_get_ram_ptr(addr1);
3958 stw_p(ptr, val);
3959 if (!cpu_physical_memory_is_dirty(addr1)) {
3960 /* invalidate code */
3961 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3962 /* set dirty bit */
3963 cpu_physical_memory_set_dirty_flags(addr1,
3964 (0xff & ~CODE_DIRTY_FLAG));
3969 /* XXX: optimize */
3970 void stq_phys(target_phys_addr_t addr, uint64_t val)
3972 val = tswap64(val);
3973 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3976 /* virtual memory access for debug (includes writing to ROM) */
3977 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3978 uint8_t *buf, int len, int is_write)
3980 int l;
3981 target_phys_addr_t phys_addr;
3982 target_ulong page;
3984 while (len > 0) {
3985 page = addr & TARGET_PAGE_MASK;
3986 phys_addr = cpu_get_phys_page_debug(env, page);
3987 /* if no physical page mapped, return an error */
3988 if (phys_addr == -1)
3989 return -1;
3990 l = (page + TARGET_PAGE_SIZE) - addr;
3991 if (l > len)
3992 l = len;
3993 phys_addr += (addr & ~TARGET_PAGE_MASK);
3994 if (is_write)
3995 cpu_physical_memory_write_rom(phys_addr, buf, l);
3996 else
3997 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3998 len -= l;
3999 buf += l;
4000 addr += l;
4002 return 0;
4004 #endif
4006 /* in deterministic execution mode, instructions doing device I/Os
4007 must be at the end of the TB */
4008 void cpu_io_recompile(CPUState *env, void *retaddr)
4010 TranslationBlock *tb;
4011 uint32_t n, cflags;
4012 target_ulong pc, cs_base;
4013 uint64_t flags;
4015 tb = tb_find_pc((unsigned long)retaddr);
4016 if (!tb) {
4017 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4018 retaddr);
4020 n = env->icount_decr.u16.low + tb->icount;
4021 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4022 /* Calculate how many instructions had been executed before the fault
4023 occurred. */
4024 n = n - env->icount_decr.u16.low;
4025 /* Generate a new TB ending on the I/O insn. */
4026 n++;
4027 /* On MIPS and SH, delay slot instructions can only be restarted if
4028 they were already the first instruction in the TB. If this is not
4029 the first instruction in a TB then re-execute the preceding
4030 branch. */
4031 #if defined(TARGET_MIPS)
4032 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4033 env->active_tc.PC -= 4;
4034 env->icount_decr.u16.low++;
4035 env->hflags &= ~MIPS_HFLAG_BMASK;
4037 #elif defined(TARGET_SH4)
4038 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4039 && n > 1) {
4040 env->pc -= 2;
4041 env->icount_decr.u16.low++;
4042 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4044 #endif
4045 /* This should never happen. */
4046 if (n > CF_COUNT_MASK)
4047 cpu_abort(env, "TB too big during recompile");
4049 cflags = n | CF_LAST_IO;
4050 pc = tb->pc;
4051 cs_base = tb->cs_base;
4052 flags = tb->flags;
4053 tb_phys_invalidate(tb, -1);
4054 /* FIXME: In theory this could raise an exception. In practice
4055 we have already translated the block once so it's probably ok. */
4056 tb_gen_code(env, pc, cs_base, flags, cflags);
4057 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4058 the first in the TB) then we end up generating a whole new TB and
4059 repeating the fault, which is horribly inefficient.
4060 Better would be to execute just this insn uncached, or generate a
4061 second new TB. */
4062 cpu_resume_from_signal(env, NULL);
4065 #if !defined(CONFIG_USER_ONLY)
4067 void dump_exec_info(FILE *f,
4068 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4070 int i, target_code_size, max_target_code_size;
4071 int direct_jmp_count, direct_jmp2_count, cross_page;
4072 TranslationBlock *tb;
4074 target_code_size = 0;
4075 max_target_code_size = 0;
4076 cross_page = 0;
4077 direct_jmp_count = 0;
4078 direct_jmp2_count = 0;
4079 for(i = 0; i < nb_tbs; i++) {
4080 tb = &tbs[i];
4081 target_code_size += tb->size;
4082 if (tb->size > max_target_code_size)
4083 max_target_code_size = tb->size;
4084 if (tb->page_addr[1] != -1)
4085 cross_page++;
4086 if (tb->tb_next_offset[0] != 0xffff) {
4087 direct_jmp_count++;
4088 if (tb->tb_next_offset[1] != 0xffff) {
4089 direct_jmp2_count++;
4093 /* XXX: avoid using doubles ? */
4094 cpu_fprintf(f, "Translation buffer state:\n");
4095 cpu_fprintf(f, "gen code size %ld/%ld\n",
4096 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4097 cpu_fprintf(f, "TB count %d/%d\n",
4098 nb_tbs, code_gen_max_blocks);
4099 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4100 nb_tbs ? target_code_size / nb_tbs : 0,
4101 max_target_code_size);
4102 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4103 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4104 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4105 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4106 cross_page,
4107 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4108 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4109 direct_jmp_count,
4110 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4111 direct_jmp2_count,
4112 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4113 cpu_fprintf(f, "\nStatistics:\n");
4114 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4115 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4116 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4117 #ifdef CONFIG_PROFILER
4118 tcg_dump_info(f, cpu_fprintf);
4119 #endif
4122 #define MMUSUFFIX _cmmu
4123 #define GETPC() NULL
4124 #define env cpu_single_env
4125 #define SOFTMMU_CODE_ACCESS
4127 #define SHIFT 0
4128 #include "softmmu_template.h"
4130 #define SHIFT 1
4131 #include "softmmu_template.h"
4133 #define SHIFT 2
4134 #include "softmmu_template.h"
4136 #define SHIFT 3
4137 #include "softmmu_template.h"
4139 #undef env
4141 #endif