kvm test: setup_vm should map APIC
[qemu-kvm/stefanha.git] / exec.c
blob825c8ca233d3a801fd9b9285b5dcf29e820f0174
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <unistd.h>
32 #include <inttypes.h>
34 #include "cpu.h"
35 #include "exec-all.h"
36 #include "qemu-common.h"
37 #include "cache-utils.h"
39 #if !defined(TARGET_IA64)
40 #include "tcg.h"
41 #endif
42 #include "qemu-kvm.h"
44 #include "hw/hw.h"
45 #include "osdep.h"
46 #include "kvm.h"
47 #include "qemu-timer.h"
48 #if defined(CONFIG_USER_ONLY)
49 #include <qemu.h>
50 #include <signal.h>
51 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
52 #include <sys/param.h>
53 #if __FreeBSD_version >= 700104
54 #define HAVE_KINFO_GETVMMAP
55 #define sigqueue sigqueue_freebsd /* avoid redefinition */
56 #include <sys/time.h>
57 #include <sys/proc.h>
58 #include <machine/profile.h>
59 #define _KERNEL
60 #include <sys/user.h>
61 #undef _KERNEL
62 #undef sigqueue
63 #include <libutil.h>
64 #endif
65 #endif
66 #endif
68 //#define DEBUG_TB_INVALIDATE
69 //#define DEBUG_FLUSH
70 //#define DEBUG_TLB
71 //#define DEBUG_UNASSIGNED
73 /* make various TB consistency checks */
74 //#define DEBUG_TB_CHECK
75 //#define DEBUG_TLB_CHECK
77 //#define DEBUG_IOPORT
78 //#define DEBUG_SUBPAGE
80 #if !defined(CONFIG_USER_ONLY)
81 /* TB consistency checks only implemented for usermode emulation. */
82 #undef DEBUG_TB_CHECK
83 #endif
85 #define SMC_BITMAP_USE_THRESHOLD 10
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 static int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #elif defined(_WIN32)
102 /* Maximum alignment for Win32 is 16. */
103 #define code_gen_section \
104 __attribute__((aligned (16)))
105 #else
106 #define code_gen_section \
107 __attribute__((aligned (32)))
108 #endif
110 uint8_t code_gen_prologue[1024] code_gen_section;
111 static uint8_t *code_gen_buffer;
112 static unsigned long code_gen_buffer_size;
113 /* threshold to flush the translated code buffer */
114 static unsigned long code_gen_buffer_max_size;
115 uint8_t *code_gen_ptr;
117 #if !defined(CONFIG_USER_ONLY)
118 int phys_ram_fd;
119 static int in_migration;
121 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
122 #endif
124 CPUState *first_cpu;
125 /* current CPU in the current thread. It is only valid inside
126 cpu_exec() */
127 CPUState *cpu_single_env;
128 /* 0 = Do not count executed instructions.
129 1 = Precise instruction counting.
130 2 = Adaptive rate instruction counting. */
131 int use_icount = 0;
132 /* Current instruction counter. While executing translated code this may
133 include some instructions that have not yet been executed. */
134 int64_t qemu_icount;
136 typedef struct PageDesc {
137 /* list of TBs intersecting this ram page */
138 TranslationBlock *first_tb;
139 /* in order to optimize self modifying code, we count the number
140 of lookups we do to a given page to use a bitmap */
141 unsigned int code_write_count;
142 uint8_t *code_bitmap;
143 #if defined(CONFIG_USER_ONLY)
144 unsigned long flags;
145 #endif
146 } PageDesc;
148 /* In system mode we want L1_MAP to be based on ram offsets,
149 while in user mode we want it to be based on virtual addresses. */
150 #if !defined(CONFIG_USER_ONLY)
151 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
152 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
153 #else
154 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
155 #endif
156 #else
157 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
158 #endif
160 /* Size of the L2 (and L3, etc) page tables. */
161 #define L2_BITS 10
162 #define L2_SIZE (1 << L2_BITS)
164 /* The bits remaining after N lower levels of page tables. */
165 #define P_L1_BITS_REM \
166 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
167 #define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
170 /* Size of the L1 page table. Avoid silly small sizes. */
171 #if P_L1_BITS_REM < 4
172 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
173 #else
174 #define P_L1_BITS P_L1_BITS_REM
175 #endif
177 #if V_L1_BITS_REM < 4
178 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
179 #else
180 #define V_L1_BITS V_L1_BITS_REM
181 #endif
183 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
184 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
186 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
187 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
189 unsigned long qemu_real_host_page_size;
190 unsigned long qemu_host_page_bits;
191 unsigned long qemu_host_page_size;
192 unsigned long qemu_host_page_mask;
194 /* This is a multi-level map on the virtual address space.
195 The bottom level has pointers to PageDesc. */
196 static void *l1_map[V_L1_SIZE];
198 #if !defined(CONFIG_USER_ONLY)
199 typedef struct PhysPageDesc {
200 /* offset in host memory of the page + io_index in the low bits */
201 ram_addr_t phys_offset;
202 ram_addr_t region_offset;
203 } PhysPageDesc;
205 /* This is a multi-level map on the physical address space.
206 The bottom level has pointers to PhysPageDesc. */
207 static void *l1_phys_map[P_L1_SIZE];
209 static void io_mem_init(void);
211 /* io memory support */
212 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
213 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
214 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
215 static char io_mem_used[IO_MEM_NB_ENTRIES];
216 static int io_mem_watch;
217 #endif
219 /* log support */
220 #ifdef WIN32
221 static const char *logfilename = "qemu.log";
222 #else
223 static const char *logfilename = "/tmp/qemu.log";
224 #endif
225 FILE *logfile;
226 int loglevel;
227 static int log_append = 0;
229 /* statistics */
230 #if !defined(CONFIG_USER_ONLY)
231 static int tlb_flush_count;
232 #endif
233 static int tb_flush_count;
234 static int tb_phys_invalidate_count;
236 #ifdef _WIN32
237 static void map_exec(void *addr, long size)
239 DWORD old_protect;
240 VirtualProtect(addr, size,
241 PAGE_EXECUTE_READWRITE, &old_protect);
244 #else
245 static void map_exec(void *addr, long size)
247 unsigned long start, end, page_size;
249 page_size = getpagesize();
250 start = (unsigned long)addr;
251 start &= ~(page_size - 1);
253 end = (unsigned long)addr + size;
254 end += page_size - 1;
255 end &= ~(page_size - 1);
257 mprotect((void *)start, end - start,
258 PROT_READ | PROT_WRITE | PROT_EXEC);
260 #endif
262 static void page_init(void)
264 /* NOTE: we can always suppose that qemu_host_page_size >=
265 TARGET_PAGE_SIZE */
266 #ifdef _WIN32
268 SYSTEM_INFO system_info;
270 GetSystemInfo(&system_info);
271 qemu_real_host_page_size = system_info.dwPageSize;
273 #else
274 qemu_real_host_page_size = getpagesize();
275 #endif
276 if (qemu_host_page_size == 0)
277 qemu_host_page_size = qemu_real_host_page_size;
278 if (qemu_host_page_size < TARGET_PAGE_SIZE)
279 qemu_host_page_size = TARGET_PAGE_SIZE;
280 qemu_host_page_bits = 0;
281 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
282 qemu_host_page_bits++;
283 qemu_host_page_mask = ~(qemu_host_page_size - 1);
285 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
287 #ifdef HAVE_KINFO_GETVMMAP
288 struct kinfo_vmentry *freep;
289 int i, cnt;
291 freep = kinfo_getvmmap(getpid(), &cnt);
292 if (freep) {
293 mmap_lock();
294 for (i = 0; i < cnt; i++) {
295 unsigned long startaddr, endaddr;
297 startaddr = freep[i].kve_start;
298 endaddr = freep[i].kve_end;
299 if (h2g_valid(startaddr)) {
300 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
302 if (h2g_valid(endaddr)) {
303 endaddr = h2g(endaddr);
304 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
305 } else {
306 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
307 endaddr = ~0ul;
308 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
309 #endif
313 free(freep);
314 mmap_unlock();
316 #else
317 FILE *f;
319 last_brk = (unsigned long)sbrk(0);
321 f = fopen("/compat/linux/proc/self/maps", "r");
322 if (f) {
323 mmap_lock();
325 do {
326 unsigned long startaddr, endaddr;
327 int n;
329 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
331 if (n == 2 && h2g_valid(startaddr)) {
332 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
334 if (h2g_valid(endaddr)) {
335 endaddr = h2g(endaddr);
336 } else {
337 endaddr = ~0ul;
339 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
341 } while (!feof(f));
343 fclose(f);
344 mmap_unlock();
346 #endif
348 #endif
351 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
353 PageDesc *pd;
354 void **lp;
355 int i;
357 #if defined(CONFIG_USER_ONLY)
358 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
359 # define ALLOC(P, SIZE) \
360 do { \
361 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
362 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
363 } while (0)
364 #else
365 # define ALLOC(P, SIZE) \
366 do { P = qemu_mallocz(SIZE); } while (0)
367 #endif
369 /* Level 1. Always allocated. */
370 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
372 /* Level 2..N-1. */
373 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
374 void **p = *lp;
376 if (p == NULL) {
377 if (!alloc) {
378 return NULL;
380 ALLOC(p, sizeof(void *) * L2_SIZE);
381 *lp = p;
384 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
387 pd = *lp;
388 if (pd == NULL) {
389 if (!alloc) {
390 return NULL;
392 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
393 *lp = pd;
396 #undef ALLOC
398 return pd + (index & (L2_SIZE - 1));
401 static inline PageDesc *page_find(tb_page_addr_t index)
403 return page_find_alloc(index, 0);
406 #if !defined(CONFIG_USER_ONLY)
407 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
409 PhysPageDesc *pd;
410 void **lp;
411 int i;
413 /* Level 1. Always allocated. */
414 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
416 /* Level 2..N-1. */
417 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
418 void **p = *lp;
419 if (p == NULL) {
420 if (!alloc) {
421 return NULL;
423 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
425 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
428 pd = *lp;
429 if (pd == NULL) {
430 int i;
432 if (!alloc) {
433 return NULL;
436 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
438 for (i = 0; i < L2_SIZE; i++) {
439 pd[i].phys_offset = IO_MEM_UNASSIGNED;
440 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
444 return pd + (index & (L2_SIZE - 1));
447 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
449 return phys_page_find_alloc(index, 0);
452 static void tlb_protect_code(ram_addr_t ram_addr);
453 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
454 target_ulong vaddr);
455 #define mmap_lock() do { } while(0)
456 #define mmap_unlock() do { } while(0)
457 #endif
459 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
461 #if defined(CONFIG_USER_ONLY)
462 /* Currently it is not recommended to allocate big chunks of data in
463 user mode. It will change when a dedicated libc will be used */
464 #define USE_STATIC_CODE_GEN_BUFFER
465 #endif
467 #ifdef USE_STATIC_CODE_GEN_BUFFER
468 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
469 __attribute__((aligned (CODE_GEN_ALIGN)));
470 #endif
472 static void code_gen_alloc(unsigned long tb_size)
474 if (kvm_enabled())
475 return;
477 #ifdef USE_STATIC_CODE_GEN_BUFFER
478 code_gen_buffer = static_code_gen_buffer;
479 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
480 map_exec(code_gen_buffer, code_gen_buffer_size);
481 #else
482 code_gen_buffer_size = tb_size;
483 if (code_gen_buffer_size == 0) {
484 #if defined(CONFIG_USER_ONLY)
485 /* in user mode, phys_ram_size is not meaningful */
486 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
487 #else
488 /* XXX: needs adjustments */
489 code_gen_buffer_size = (unsigned long)(ram_size / 4);
490 #endif
492 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
493 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
494 /* The code gen buffer location may have constraints depending on
495 the host cpu and OS */
496 #if defined(__linux__)
498 int flags;
499 void *start = NULL;
501 flags = MAP_PRIVATE | MAP_ANONYMOUS;
502 #if defined(__x86_64__)
503 flags |= MAP_32BIT;
504 /* Cannot map more than that */
505 if (code_gen_buffer_size > (800 * 1024 * 1024))
506 code_gen_buffer_size = (800 * 1024 * 1024);
507 #elif defined(__sparc_v9__)
508 // Map the buffer below 2G, so we can use direct calls and branches
509 flags |= MAP_FIXED;
510 start = (void *) 0x60000000UL;
511 if (code_gen_buffer_size > (512 * 1024 * 1024))
512 code_gen_buffer_size = (512 * 1024 * 1024);
513 #elif defined(__arm__)
514 /* Map the buffer below 32M, so we can use direct calls and branches */
515 flags |= MAP_FIXED;
516 start = (void *) 0x01000000UL;
517 if (code_gen_buffer_size > 16 * 1024 * 1024)
518 code_gen_buffer_size = 16 * 1024 * 1024;
519 #elif defined(__s390x__)
520 /* Map the buffer so that we can use direct calls and branches. */
521 /* We have a +- 4GB range on the branches; leave some slop. */
522 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
523 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
525 start = (void *)0x90000000UL;
526 #endif
527 code_gen_buffer = mmap(start, code_gen_buffer_size,
528 PROT_WRITE | PROT_READ | PROT_EXEC,
529 flags, -1, 0);
530 if (code_gen_buffer == MAP_FAILED) {
531 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
532 exit(1);
535 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
537 int flags;
538 void *addr = NULL;
539 flags = MAP_PRIVATE | MAP_ANONYMOUS;
540 #if defined(__x86_64__)
541 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542 * 0x40000000 is free */
543 flags |= MAP_FIXED;
544 addr = (void *)0x40000000;
545 /* Cannot map more than that */
546 if (code_gen_buffer_size > (800 * 1024 * 1024))
547 code_gen_buffer_size = (800 * 1024 * 1024);
548 #endif
549 code_gen_buffer = mmap(addr, code_gen_buffer_size,
550 PROT_WRITE | PROT_READ | PROT_EXEC,
551 flags, -1, 0);
552 if (code_gen_buffer == MAP_FAILED) {
553 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
554 exit(1);
557 #else
558 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
559 map_exec(code_gen_buffer, code_gen_buffer_size);
560 #endif
561 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
562 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
563 code_gen_buffer_max_size = code_gen_buffer_size -
564 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
565 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
566 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
569 /* Must be called before using the QEMU cpus. 'tb_size' is the size
570 (in bytes) allocated to the translation buffer. Zero means default
571 size. */
572 void cpu_exec_init_all(unsigned long tb_size)
574 cpu_gen_init();
575 code_gen_alloc(tb_size);
576 code_gen_ptr = code_gen_buffer;
577 page_init();
578 #if !defined(CONFIG_USER_ONLY)
579 io_mem_init();
580 #endif
581 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
582 /* There's no guest base to take into account, so go ahead and
583 initialize the prologue now. */
584 tcg_prologue_init(&tcg_ctx);
585 #endif
588 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
590 static int cpu_common_post_load(void *opaque, int version_id)
592 CPUState *env = opaque;
594 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
595 version_id is increased. */
596 env->interrupt_request &= ~0x01;
597 tlb_flush(env, 1);
599 return 0;
602 static const VMStateDescription vmstate_cpu_common = {
603 .name = "cpu_common",
604 .version_id = 1,
605 .minimum_version_id = 1,
606 .minimum_version_id_old = 1,
607 .post_load = cpu_common_post_load,
608 .fields = (VMStateField []) {
609 VMSTATE_UINT32(halted, CPUState),
610 VMSTATE_UINT32(interrupt_request, CPUState),
611 VMSTATE_END_OF_LIST()
614 #endif
616 CPUState *qemu_get_cpu(int cpu)
618 CPUState *env = first_cpu;
620 while (env) {
621 if (env->cpu_index == cpu)
622 break;
623 env = env->next_cpu;
626 return env;
629 void cpu_exec_init(CPUState *env)
631 CPUState **penv;
632 int cpu_index;
634 #if defined(CONFIG_USER_ONLY)
635 cpu_list_lock();
636 #endif
637 env->next_cpu = NULL;
638 penv = &first_cpu;
639 cpu_index = 0;
640 while (*penv != NULL) {
641 penv = &(*penv)->next_cpu;
642 cpu_index++;
644 env->cpu_index = cpu_index;
645 env->numa_node = 0;
646 QTAILQ_INIT(&env->breakpoints);
647 QTAILQ_INIT(&env->watchpoints);
648 #ifdef __WIN32
649 env->thread_id = GetCurrentProcessId();
650 #else
651 env->thread_id = getpid();
652 #endif
653 *penv = env;
654 #if defined(CONFIG_USER_ONLY)
655 cpu_list_unlock();
656 #endif
657 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
658 vmstate_register(cpu_index, &vmstate_cpu_common, env);
659 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
660 cpu_save, cpu_load, env);
661 #endif
664 static inline void invalidate_page_bitmap(PageDesc *p)
666 if (p->code_bitmap) {
667 qemu_free(p->code_bitmap);
668 p->code_bitmap = NULL;
670 p->code_write_count = 0;
673 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
675 static void page_flush_tb_1 (int level, void **lp)
677 int i;
679 if (*lp == NULL) {
680 return;
682 if (level == 0) {
683 PageDesc *pd = *lp;
684 for (i = 0; i < L2_SIZE; ++i) {
685 pd[i].first_tb = NULL;
686 invalidate_page_bitmap(pd + i);
688 } else {
689 void **pp = *lp;
690 for (i = 0; i < L2_SIZE; ++i) {
691 page_flush_tb_1 (level - 1, pp + i);
696 static void page_flush_tb(void)
698 int i;
699 for (i = 0; i < V_L1_SIZE; i++) {
700 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
704 /* flush all the translation blocks */
705 /* XXX: tb_flush is currently not thread safe */
706 void tb_flush(CPUState *env1)
708 CPUState *env;
709 #if defined(DEBUG_FLUSH)
710 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
711 (unsigned long)(code_gen_ptr - code_gen_buffer),
712 nb_tbs, nb_tbs > 0 ?
713 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
714 #endif
715 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
716 cpu_abort(env1, "Internal error: code buffer overflow\n");
718 nb_tbs = 0;
720 for(env = first_cpu; env != NULL; env = env->next_cpu) {
721 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
724 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
725 page_flush_tb();
727 code_gen_ptr = code_gen_buffer;
728 /* XXX: flush processor icache at this point if cache flush is
729 expensive */
730 tb_flush_count++;
733 #ifdef DEBUG_TB_CHECK
735 static void tb_invalidate_check(target_ulong address)
737 TranslationBlock *tb;
738 int i;
739 address &= TARGET_PAGE_MASK;
740 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
741 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
742 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
743 address >= tb->pc + tb->size)) {
744 printf("ERROR invalidate: address=" TARGET_FMT_lx
745 " PC=%08lx size=%04x\n",
746 address, (long)tb->pc, tb->size);
752 /* verify that all the pages have correct rights for code */
753 static void tb_page_check(void)
755 TranslationBlock *tb;
756 int i, flags1, flags2;
758 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
759 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
760 flags1 = page_get_flags(tb->pc);
761 flags2 = page_get_flags(tb->pc + tb->size - 1);
762 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
763 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
764 (long)tb->pc, tb->size, flags1, flags2);
770 #endif
772 /* invalidate one TB */
773 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
774 int next_offset)
776 TranslationBlock *tb1;
777 for(;;) {
778 tb1 = *ptb;
779 if (tb1 == tb) {
780 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
781 break;
783 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
787 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
789 TranslationBlock *tb1;
790 unsigned int n1;
792 for(;;) {
793 tb1 = *ptb;
794 n1 = (long)tb1 & 3;
795 tb1 = (TranslationBlock *)((long)tb1 & ~3);
796 if (tb1 == tb) {
797 *ptb = tb1->page_next[n1];
798 break;
800 ptb = &tb1->page_next[n1];
804 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
806 TranslationBlock *tb1, **ptb;
807 unsigned int n1;
809 ptb = &tb->jmp_next[n];
810 tb1 = *ptb;
811 if (tb1) {
812 /* find tb(n) in circular list */
813 for(;;) {
814 tb1 = *ptb;
815 n1 = (long)tb1 & 3;
816 tb1 = (TranslationBlock *)((long)tb1 & ~3);
817 if (n1 == n && tb1 == tb)
818 break;
819 if (n1 == 2) {
820 ptb = &tb1->jmp_first;
821 } else {
822 ptb = &tb1->jmp_next[n1];
825 /* now we can suppress tb(n) from the list */
826 *ptb = tb->jmp_next[n];
828 tb->jmp_next[n] = NULL;
832 /* reset the jump entry 'n' of a TB so that it is not chained to
833 another TB */
834 static inline void tb_reset_jump(TranslationBlock *tb, int n)
836 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
839 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
841 CPUState *env;
842 PageDesc *p;
843 unsigned int h, n1;
844 tb_page_addr_t phys_pc;
845 TranslationBlock *tb1, *tb2;
847 /* remove the TB from the hash list */
848 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
849 h = tb_phys_hash_func(phys_pc);
850 tb_remove(&tb_phys_hash[h], tb,
851 offsetof(TranslationBlock, phys_hash_next));
853 /* remove the TB from the page list */
854 if (tb->page_addr[0] != page_addr) {
855 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
856 tb_page_remove(&p->first_tb, tb);
857 invalidate_page_bitmap(p);
859 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
860 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
861 tb_page_remove(&p->first_tb, tb);
862 invalidate_page_bitmap(p);
865 tb_invalidated_flag = 1;
867 /* remove the TB from the hash list */
868 h = tb_jmp_cache_hash_func(tb->pc);
869 for(env = first_cpu; env != NULL; env = env->next_cpu) {
870 if (env->tb_jmp_cache[h] == tb)
871 env->tb_jmp_cache[h] = NULL;
874 /* suppress this TB from the two jump lists */
875 tb_jmp_remove(tb, 0);
876 tb_jmp_remove(tb, 1);
878 /* suppress any remaining jumps to this TB */
879 tb1 = tb->jmp_first;
880 for(;;) {
881 n1 = (long)tb1 & 3;
882 if (n1 == 2)
883 break;
884 tb1 = (TranslationBlock *)((long)tb1 & ~3);
885 tb2 = tb1->jmp_next[n1];
886 tb_reset_jump(tb1, n1);
887 tb1->jmp_next[n1] = NULL;
888 tb1 = tb2;
890 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
892 tb_phys_invalidate_count++;
895 static inline void set_bits(uint8_t *tab, int start, int len)
897 int end, mask, end1;
899 end = start + len;
900 tab += start >> 3;
901 mask = 0xff << (start & 7);
902 if ((start & ~7) == (end & ~7)) {
903 if (start < end) {
904 mask &= ~(0xff << (end & 7));
905 *tab |= mask;
907 } else {
908 *tab++ |= mask;
909 start = (start + 8) & ~7;
910 end1 = end & ~7;
911 while (start < end1) {
912 *tab++ = 0xff;
913 start += 8;
915 if (start < end) {
916 mask = ~(0xff << (end & 7));
917 *tab |= mask;
922 static void build_page_bitmap(PageDesc *p)
924 int n, tb_start, tb_end;
925 TranslationBlock *tb;
927 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
929 tb = p->first_tb;
930 while (tb != NULL) {
931 n = (long)tb & 3;
932 tb = (TranslationBlock *)((long)tb & ~3);
933 /* NOTE: this is subtle as a TB may span two physical pages */
934 if (n == 0) {
935 /* NOTE: tb_end may be after the end of the page, but
936 it is not a problem */
937 tb_start = tb->pc & ~TARGET_PAGE_MASK;
938 tb_end = tb_start + tb->size;
939 if (tb_end > TARGET_PAGE_SIZE)
940 tb_end = TARGET_PAGE_SIZE;
941 } else {
942 tb_start = 0;
943 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
945 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
946 tb = tb->page_next[n];
950 TranslationBlock *tb_gen_code(CPUState *env,
951 target_ulong pc, target_ulong cs_base,
952 int flags, int cflags)
954 TranslationBlock *tb;
955 uint8_t *tc_ptr;
956 tb_page_addr_t phys_pc, phys_page2;
957 target_ulong virt_page2;
958 int code_gen_size;
960 phys_pc = get_page_addr_code(env, pc);
961 tb = tb_alloc(pc);
962 if (!tb) {
963 /* flush must be done */
964 tb_flush(env);
965 /* cannot fail at this point */
966 tb = tb_alloc(pc);
967 /* Don't forget to invalidate previous TB info. */
968 tb_invalidated_flag = 1;
970 tc_ptr = code_gen_ptr;
971 tb->tc_ptr = tc_ptr;
972 tb->cs_base = cs_base;
973 tb->flags = flags;
974 tb->cflags = cflags;
975 cpu_gen_code(env, tb, &code_gen_size);
976 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
978 /* check next page if needed */
979 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
980 phys_page2 = -1;
981 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
982 phys_page2 = get_page_addr_code(env, virt_page2);
984 tb_link_page(tb, phys_pc, phys_page2);
985 return tb;
988 /* invalidate all TBs which intersect with the target physical page
989 starting in range [start;end[. NOTE: start and end must refer to
990 the same physical page. 'is_cpu_write_access' should be true if called
991 from a real cpu write access: the virtual CPU will exit the current
992 TB if code is modified inside this TB. */
993 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
994 int is_cpu_write_access)
996 TranslationBlock *tb, *tb_next, *saved_tb;
997 CPUState *env = cpu_single_env;
998 tb_page_addr_t tb_start, tb_end;
999 PageDesc *p;
1000 int n;
1001 #ifdef TARGET_HAS_PRECISE_SMC
1002 int current_tb_not_found = is_cpu_write_access;
1003 TranslationBlock *current_tb = NULL;
1004 int current_tb_modified = 0;
1005 target_ulong current_pc = 0;
1006 target_ulong current_cs_base = 0;
1007 int current_flags = 0;
1008 #endif /* TARGET_HAS_PRECISE_SMC */
1010 p = page_find(start >> TARGET_PAGE_BITS);
1011 if (!p)
1012 return;
1013 if (!p->code_bitmap &&
1014 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1015 is_cpu_write_access) {
1016 /* build code bitmap */
1017 build_page_bitmap(p);
1020 /* we remove all the TBs in the range [start, end[ */
1021 /* XXX: see if in some cases it could be faster to invalidate all the code */
1022 tb = p->first_tb;
1023 while (tb != NULL) {
1024 n = (long)tb & 3;
1025 tb = (TranslationBlock *)((long)tb & ~3);
1026 tb_next = tb->page_next[n];
1027 /* NOTE: this is subtle as a TB may span two physical pages */
1028 if (n == 0) {
1029 /* NOTE: tb_end may be after the end of the page, but
1030 it is not a problem */
1031 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1032 tb_end = tb_start + tb->size;
1033 } else {
1034 tb_start = tb->page_addr[1];
1035 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1037 if (!(tb_end <= start || tb_start >= end)) {
1038 #ifdef TARGET_HAS_PRECISE_SMC
1039 if (current_tb_not_found) {
1040 current_tb_not_found = 0;
1041 current_tb = NULL;
1042 if (env->mem_io_pc) {
1043 /* now we have a real cpu fault */
1044 current_tb = tb_find_pc(env->mem_io_pc);
1047 if (current_tb == tb &&
1048 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1049 /* If we are modifying the current TB, we must stop
1050 its execution. We could be more precise by checking
1051 that the modification is after the current PC, but it
1052 would require a specialized function to partially
1053 restore the CPU state */
1055 current_tb_modified = 1;
1056 cpu_restore_state(current_tb, env,
1057 env->mem_io_pc, NULL);
1058 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1059 &current_flags);
1061 #endif /* TARGET_HAS_PRECISE_SMC */
1062 /* we need to do that to handle the case where a signal
1063 occurs while doing tb_phys_invalidate() */
1064 saved_tb = NULL;
1065 if (env) {
1066 saved_tb = env->current_tb;
1067 env->current_tb = NULL;
1069 tb_phys_invalidate(tb, -1);
1070 if (env) {
1071 env->current_tb = saved_tb;
1072 if (env->interrupt_request && env->current_tb)
1073 cpu_interrupt(env, env->interrupt_request);
1076 tb = tb_next;
1078 #if !defined(CONFIG_USER_ONLY)
1079 /* if no code remaining, no need to continue to use slow writes */
1080 if (!p->first_tb) {
1081 invalidate_page_bitmap(p);
1082 if (is_cpu_write_access) {
1083 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1086 #endif
1087 #ifdef TARGET_HAS_PRECISE_SMC
1088 if (current_tb_modified) {
1089 /* we generate a block containing just the instruction
1090 modifying the memory. It will ensure that it cannot modify
1091 itself */
1092 env->current_tb = NULL;
1093 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1094 cpu_resume_from_signal(env, NULL);
1096 #endif
1099 /* len must be <= 8 and start must be a multiple of len */
1100 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1102 PageDesc *p;
1103 int offset, b;
1104 #if 0
1105 if (1) {
1106 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1107 cpu_single_env->mem_io_vaddr, len,
1108 cpu_single_env->eip,
1109 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1111 #endif
1112 p = page_find(start >> TARGET_PAGE_BITS);
1113 if (!p)
1114 return;
1115 if (p->code_bitmap) {
1116 offset = start & ~TARGET_PAGE_MASK;
1117 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1118 if (b & ((1 << len) - 1))
1119 goto do_invalidate;
1120 } else {
1121 do_invalidate:
1122 tb_invalidate_phys_page_range(start, start + len, 1);
1126 #if !defined(CONFIG_SOFTMMU)
1127 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1128 unsigned long pc, void *puc)
1130 TranslationBlock *tb;
1131 PageDesc *p;
1132 int n;
1133 #ifdef TARGET_HAS_PRECISE_SMC
1134 TranslationBlock *current_tb = NULL;
1135 CPUState *env = cpu_single_env;
1136 int current_tb_modified = 0;
1137 target_ulong current_pc = 0;
1138 target_ulong current_cs_base = 0;
1139 int current_flags = 0;
1140 #endif
1142 addr &= TARGET_PAGE_MASK;
1143 p = page_find(addr >> TARGET_PAGE_BITS);
1144 if (!p)
1145 return;
1146 tb = p->first_tb;
1147 #ifdef TARGET_HAS_PRECISE_SMC
1148 if (tb && pc != 0) {
1149 current_tb = tb_find_pc(pc);
1151 #endif
1152 while (tb != NULL) {
1153 n = (long)tb & 3;
1154 tb = (TranslationBlock *)((long)tb & ~3);
1155 #ifdef TARGET_HAS_PRECISE_SMC
1156 if (current_tb == tb &&
1157 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1158 /* If we are modifying the current TB, we must stop
1159 its execution. We could be more precise by checking
1160 that the modification is after the current PC, but it
1161 would require a specialized function to partially
1162 restore the CPU state */
1164 current_tb_modified = 1;
1165 cpu_restore_state(current_tb, env, pc, puc);
1166 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1167 &current_flags);
1169 #endif /* TARGET_HAS_PRECISE_SMC */
1170 tb_phys_invalidate(tb, addr);
1171 tb = tb->page_next[n];
1173 p->first_tb = NULL;
1174 #ifdef TARGET_HAS_PRECISE_SMC
1175 if (current_tb_modified) {
1176 /* we generate a block containing just the instruction
1177 modifying the memory. It will ensure that it cannot modify
1178 itself */
1179 env->current_tb = NULL;
1180 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1181 cpu_resume_from_signal(env, puc);
1183 #endif
1185 #endif
1187 /* add the tb in the target page and protect it if necessary */
1188 static inline void tb_alloc_page(TranslationBlock *tb,
1189 unsigned int n, tb_page_addr_t page_addr)
1191 PageDesc *p;
1192 TranslationBlock *last_first_tb;
1194 tb->page_addr[n] = page_addr;
1195 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1196 tb->page_next[n] = p->first_tb;
1197 last_first_tb = p->first_tb;
1198 p->first_tb = (TranslationBlock *)((long)tb | n);
1199 invalidate_page_bitmap(p);
1201 #if defined(TARGET_HAS_SMC) || 1
1203 #if defined(CONFIG_USER_ONLY)
1204 if (p->flags & PAGE_WRITE) {
1205 target_ulong addr;
1206 PageDesc *p2;
1207 int prot;
1209 /* force the host page as non writable (writes will have a
1210 page fault + mprotect overhead) */
1211 page_addr &= qemu_host_page_mask;
1212 prot = 0;
1213 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1214 addr += TARGET_PAGE_SIZE) {
1216 p2 = page_find (addr >> TARGET_PAGE_BITS);
1217 if (!p2)
1218 continue;
1219 prot |= p2->flags;
1220 p2->flags &= ~PAGE_WRITE;
1222 mprotect(g2h(page_addr), qemu_host_page_size,
1223 (prot & PAGE_BITS) & ~PAGE_WRITE);
1224 #ifdef DEBUG_TB_INVALIDATE
1225 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1226 page_addr);
1227 #endif
1229 #else
1230 /* if some code is already present, then the pages are already
1231 protected. So we handle the case where only the first TB is
1232 allocated in a physical page */
1233 if (!last_first_tb) {
1234 tlb_protect_code(page_addr);
1236 #endif
1238 #endif /* TARGET_HAS_SMC */
1241 /* Allocate a new translation block. Flush the translation buffer if
1242 too many translation blocks or too much generated code. */
1243 TranslationBlock *tb_alloc(target_ulong pc)
1245 TranslationBlock *tb;
1247 if (nb_tbs >= code_gen_max_blocks ||
1248 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1249 return NULL;
1250 tb = &tbs[nb_tbs++];
1251 tb->pc = pc;
1252 tb->cflags = 0;
1253 return tb;
1256 void tb_free(TranslationBlock *tb)
1258 /* In practice this is mostly used for single use temporary TB
1259 Ignore the hard cases and just back up if this TB happens to
1260 be the last one generated. */
1261 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1262 code_gen_ptr = tb->tc_ptr;
1263 nb_tbs--;
1267 /* add a new TB and link it to the physical page tables. phys_page2 is
1268 (-1) to indicate that only one page contains the TB. */
1269 void tb_link_page(TranslationBlock *tb,
1270 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1272 unsigned int h;
1273 TranslationBlock **ptb;
1275 /* Grab the mmap lock to stop another thread invalidating this TB
1276 before we are done. */
1277 mmap_lock();
1278 /* add in the physical hash table */
1279 h = tb_phys_hash_func(phys_pc);
1280 ptb = &tb_phys_hash[h];
1281 tb->phys_hash_next = *ptb;
1282 *ptb = tb;
1284 /* add in the page list */
1285 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1286 if (phys_page2 != -1)
1287 tb_alloc_page(tb, 1, phys_page2);
1288 else
1289 tb->page_addr[1] = -1;
1291 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1292 tb->jmp_next[0] = NULL;
1293 tb->jmp_next[1] = NULL;
1295 /* init original jump addresses */
1296 if (tb->tb_next_offset[0] != 0xffff)
1297 tb_reset_jump(tb, 0);
1298 if (tb->tb_next_offset[1] != 0xffff)
1299 tb_reset_jump(tb, 1);
1301 #ifdef DEBUG_TB_CHECK
1302 tb_page_check();
1303 #endif
1304 mmap_unlock();
1307 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1308 tb[1].tc_ptr. Return NULL if not found */
1309 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1311 int m_min, m_max, m;
1312 unsigned long v;
1313 TranslationBlock *tb;
1315 if (nb_tbs <= 0)
1316 return NULL;
1317 if (tc_ptr < (unsigned long)code_gen_buffer ||
1318 tc_ptr >= (unsigned long)code_gen_ptr)
1319 return NULL;
1320 /* binary search (cf Knuth) */
1321 m_min = 0;
1322 m_max = nb_tbs - 1;
1323 while (m_min <= m_max) {
1324 m = (m_min + m_max) >> 1;
1325 tb = &tbs[m];
1326 v = (unsigned long)tb->tc_ptr;
1327 if (v == tc_ptr)
1328 return tb;
1329 else if (tc_ptr < v) {
1330 m_max = m - 1;
1331 } else {
1332 m_min = m + 1;
1335 return &tbs[m_max];
1338 static void tb_reset_jump_recursive(TranslationBlock *tb);
1340 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1342 TranslationBlock *tb1, *tb_next, **ptb;
1343 unsigned int n1;
1345 tb1 = tb->jmp_next[n];
1346 if (tb1 != NULL) {
1347 /* find head of list */
1348 for(;;) {
1349 n1 = (long)tb1 & 3;
1350 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1351 if (n1 == 2)
1352 break;
1353 tb1 = tb1->jmp_next[n1];
1355 /* we are now sure now that tb jumps to tb1 */
1356 tb_next = tb1;
1358 /* remove tb from the jmp_first list */
1359 ptb = &tb_next->jmp_first;
1360 for(;;) {
1361 tb1 = *ptb;
1362 n1 = (long)tb1 & 3;
1363 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1364 if (n1 == n && tb1 == tb)
1365 break;
1366 ptb = &tb1->jmp_next[n1];
1368 *ptb = tb->jmp_next[n];
1369 tb->jmp_next[n] = NULL;
1371 /* suppress the jump to next tb in generated code */
1372 tb_reset_jump(tb, n);
1374 /* suppress jumps in the tb on which we could have jumped */
1375 tb_reset_jump_recursive(tb_next);
1379 static void tb_reset_jump_recursive(TranslationBlock *tb)
1381 tb_reset_jump_recursive2(tb, 0);
1382 tb_reset_jump_recursive2(tb, 1);
1385 #if defined(TARGET_HAS_ICE)
1386 #if defined(CONFIG_USER_ONLY)
1387 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1389 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1391 #else
1392 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1394 target_phys_addr_t addr;
1395 target_ulong pd;
1396 ram_addr_t ram_addr;
1397 PhysPageDesc *p;
1399 addr = cpu_get_phys_page_debug(env, pc);
1400 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1401 if (!p) {
1402 pd = IO_MEM_UNASSIGNED;
1403 } else {
1404 pd = p->phys_offset;
1406 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1407 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1409 #endif
1410 #endif /* TARGET_HAS_ICE */
1412 #if defined(CONFIG_USER_ONLY)
1413 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1418 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1419 int flags, CPUWatchpoint **watchpoint)
1421 return -ENOSYS;
1423 #else
1424 /* Add a watchpoint. */
1425 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1426 int flags, CPUWatchpoint **watchpoint)
1428 target_ulong len_mask = ~(len - 1);
1429 CPUWatchpoint *wp;
1431 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1432 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1433 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1434 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1435 return -EINVAL;
1437 wp = qemu_malloc(sizeof(*wp));
1439 wp->vaddr = addr;
1440 wp->len_mask = len_mask;
1441 wp->flags = flags;
1443 /* keep all GDB-injected watchpoints in front */
1444 if (flags & BP_GDB)
1445 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1446 else
1447 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1449 tlb_flush_page(env, addr);
1451 if (watchpoint)
1452 *watchpoint = wp;
1453 return 0;
1456 /* Remove a specific watchpoint. */
1457 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1458 int flags)
1460 target_ulong len_mask = ~(len - 1);
1461 CPUWatchpoint *wp;
1463 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1464 if (addr == wp->vaddr && len_mask == wp->len_mask
1465 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1466 cpu_watchpoint_remove_by_ref(env, wp);
1467 return 0;
1470 return -ENOENT;
1473 /* Remove a specific watchpoint by reference. */
1474 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1476 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1478 tlb_flush_page(env, watchpoint->vaddr);
1480 qemu_free(watchpoint);
1483 /* Remove all matching watchpoints. */
1484 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1486 CPUWatchpoint *wp, *next;
1488 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1489 if (wp->flags & mask)
1490 cpu_watchpoint_remove_by_ref(env, wp);
1493 #endif
1495 /* Add a breakpoint. */
1496 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1497 CPUBreakpoint **breakpoint)
1499 #if defined(TARGET_HAS_ICE)
1500 CPUBreakpoint *bp;
1502 bp = qemu_malloc(sizeof(*bp));
1504 bp->pc = pc;
1505 bp->flags = flags;
1507 /* keep all GDB-injected breakpoints in front */
1508 if (flags & BP_GDB)
1509 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1510 else
1511 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1513 breakpoint_invalidate(env, pc);
1515 if (breakpoint)
1516 *breakpoint = bp;
1517 return 0;
1518 #else
1519 return -ENOSYS;
1520 #endif
1523 /* Remove a specific breakpoint. */
1524 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1526 #if defined(TARGET_HAS_ICE)
1527 CPUBreakpoint *bp;
1529 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1530 if (bp->pc == pc && bp->flags == flags) {
1531 cpu_breakpoint_remove_by_ref(env, bp);
1532 return 0;
1535 return -ENOENT;
1536 #else
1537 return -ENOSYS;
1538 #endif
1541 /* Remove a specific breakpoint by reference. */
1542 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1544 #if defined(TARGET_HAS_ICE)
1545 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1547 breakpoint_invalidate(env, breakpoint->pc);
1549 qemu_free(breakpoint);
1550 #endif
1553 /* Remove all matching breakpoints. */
1554 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1556 #if defined(TARGET_HAS_ICE)
1557 CPUBreakpoint *bp, *next;
1559 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1560 if (bp->flags & mask)
1561 cpu_breakpoint_remove_by_ref(env, bp);
1563 #endif
1566 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1567 CPU loop after each instruction */
1568 void cpu_single_step(CPUState *env, int enabled)
1570 #if defined(TARGET_HAS_ICE)
1571 if (env->singlestep_enabled != enabled) {
1572 env->singlestep_enabled = enabled;
1573 if (kvm_enabled())
1574 kvm_update_guest_debug(env, 0);
1575 else {
1576 /* must flush all the translated code to avoid inconsistencies */
1577 /* XXX: only flush what is necessary */
1578 tb_flush(env);
1581 #endif
1584 /* enable or disable low levels log */
1585 void cpu_set_log(int log_flags)
1587 loglevel = log_flags;
1588 if (loglevel && !logfile) {
1589 logfile = fopen(logfilename, log_append ? "a" : "w");
1590 if (!logfile) {
1591 perror(logfilename);
1592 _exit(1);
1594 #if !defined(CONFIG_SOFTMMU)
1595 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1597 static char logfile_buf[4096];
1598 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1600 #elif !defined(_WIN32)
1601 /* Win32 doesn't support line-buffering and requires size >= 2 */
1602 setvbuf(logfile, NULL, _IOLBF, 0);
1603 #endif
1604 log_append = 1;
1606 if (!loglevel && logfile) {
1607 fclose(logfile);
1608 logfile = NULL;
1612 void cpu_set_log_filename(const char *filename)
1614 logfilename = strdup(filename);
1615 if (logfile) {
1616 fclose(logfile);
1617 logfile = NULL;
1619 cpu_set_log(loglevel);
1622 static void cpu_unlink_tb(CPUState *env)
1624 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1625 problem and hope the cpu will stop of its own accord. For userspace
1626 emulation this often isn't actually as bad as it sounds. Often
1627 signals are used primarily to interrupt blocking syscalls. */
1628 TranslationBlock *tb;
1629 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1631 spin_lock(&interrupt_lock);
1632 tb = env->current_tb;
1633 /* if the cpu is currently executing code, we must unlink it and
1634 all the potentially executing TB */
1635 if (tb) {
1636 env->current_tb = NULL;
1637 tb_reset_jump_recursive(tb);
1639 spin_unlock(&interrupt_lock);
1642 /* mask must never be zero, except for A20 change call */
1643 void cpu_interrupt(CPUState *env, int mask)
1645 int old_mask;
1647 old_mask = env->interrupt_request;
1648 env->interrupt_request |= mask;
1649 if (kvm_enabled() && !kvm_irqchip_in_kernel())
1650 kvm_update_interrupt_request(env);
1652 #ifndef CONFIG_USER_ONLY
1654 * If called from iothread context, wake the target cpu in
1655 * case its halted.
1657 if (!qemu_cpu_self(env)) {
1658 qemu_cpu_kick(env);
1659 return;
1661 #endif
1663 if (use_icount) {
1664 env->icount_decr.u16.high = 0xffff;
1665 #ifndef CONFIG_USER_ONLY
1666 if (!can_do_io(env)
1667 && (mask & ~old_mask) != 0) {
1668 cpu_abort(env, "Raised interrupt while not in I/O function");
1670 #endif
1671 } else {
1672 cpu_unlink_tb(env);
1676 void cpu_reset_interrupt(CPUState *env, int mask)
1678 env->interrupt_request &= ~mask;
1681 void cpu_exit(CPUState *env)
1683 env->exit_request = 1;
1684 cpu_unlink_tb(env);
1687 const CPULogItem cpu_log_items[] = {
1688 { CPU_LOG_TB_OUT_ASM, "out_asm",
1689 "show generated host assembly code for each compiled TB" },
1690 { CPU_LOG_TB_IN_ASM, "in_asm",
1691 "show target assembly code for each compiled TB" },
1692 { CPU_LOG_TB_OP, "op",
1693 "show micro ops for each compiled TB" },
1694 { CPU_LOG_TB_OP_OPT, "op_opt",
1695 "show micro ops "
1696 #ifdef TARGET_I386
1697 "before eflags optimization and "
1698 #endif
1699 "after liveness analysis" },
1700 { CPU_LOG_INT, "int",
1701 "show interrupts/exceptions in short format" },
1702 { CPU_LOG_EXEC, "exec",
1703 "show trace before each executed TB (lots of logs)" },
1704 { CPU_LOG_TB_CPU, "cpu",
1705 "show CPU state before block translation" },
1706 #ifdef TARGET_I386
1707 { CPU_LOG_PCALL, "pcall",
1708 "show protected mode far calls/returns/exceptions" },
1709 { CPU_LOG_RESET, "cpu_reset",
1710 "show CPU state before CPU resets" },
1711 #endif
1712 #ifdef DEBUG_IOPORT
1713 { CPU_LOG_IOPORT, "ioport",
1714 "show all i/o ports accesses" },
1715 #endif
1716 { 0, NULL, NULL },
1719 #ifndef CONFIG_USER_ONLY
1720 static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1721 = QLIST_HEAD_INITIALIZER(memory_client_list);
1723 static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1724 ram_addr_t size,
1725 ram_addr_t phys_offset)
1727 CPUPhysMemoryClient *client;
1728 QLIST_FOREACH(client, &memory_client_list, list) {
1729 client->set_memory(client, start_addr, size, phys_offset);
1733 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1734 target_phys_addr_t end)
1736 CPUPhysMemoryClient *client;
1737 QLIST_FOREACH(client, &memory_client_list, list) {
1738 int r = client->sync_dirty_bitmap(client, start, end);
1739 if (r < 0)
1740 return r;
1742 return 0;
1745 static int cpu_notify_migration_log(int enable)
1747 CPUPhysMemoryClient *client;
1748 QLIST_FOREACH(client, &memory_client_list, list) {
1749 int r = client->migration_log(client, enable);
1750 if (r < 0)
1751 return r;
1753 return 0;
1756 static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1757 int level, void **lp)
1759 int i;
1761 if (*lp == NULL) {
1762 return;
1764 if (level == 0) {
1765 PhysPageDesc *pd = *lp;
1766 for (i = 0; i < L2_SIZE; ++i) {
1767 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1768 client->set_memory(client, pd[i].region_offset,
1769 TARGET_PAGE_SIZE, pd[i].phys_offset);
1772 } else {
1773 void **pp = *lp;
1774 for (i = 0; i < L2_SIZE; ++i) {
1775 phys_page_for_each_1(client, level - 1, pp + i);
1780 static void phys_page_for_each(CPUPhysMemoryClient *client)
1782 int i;
1783 for (i = 0; i < P_L1_SIZE; ++i) {
1784 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1785 l1_phys_map + 1);
1789 void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1791 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1792 phys_page_for_each(client);
1795 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1797 QLIST_REMOVE(client, list);
1799 #endif
1801 static int cmp1(const char *s1, int n, const char *s2)
1803 if (strlen(s2) != n)
1804 return 0;
1805 return memcmp(s1, s2, n) == 0;
1808 /* takes a comma separated list of log masks. Return 0 if error. */
1809 int cpu_str_to_log_mask(const char *str)
1811 const CPULogItem *item;
1812 int mask;
1813 const char *p, *p1;
1815 p = str;
1816 mask = 0;
1817 for(;;) {
1818 p1 = strchr(p, ',');
1819 if (!p1)
1820 p1 = p + strlen(p);
1821 if(cmp1(p,p1-p,"all")) {
1822 for(item = cpu_log_items; item->mask != 0; item++) {
1823 mask |= item->mask;
1825 } else {
1826 for(item = cpu_log_items; item->mask != 0; item++) {
1827 if (cmp1(p, p1 - p, item->name))
1828 goto found;
1830 return 0;
1832 found:
1833 mask |= item->mask;
1834 if (*p1 != ',')
1835 break;
1836 p = p1 + 1;
1838 return mask;
1841 void cpu_abort(CPUState *env, const char *fmt, ...)
1843 va_list ap;
1844 va_list ap2;
1846 va_start(ap, fmt);
1847 va_copy(ap2, ap);
1848 fprintf(stderr, "qemu: fatal: ");
1849 vfprintf(stderr, fmt, ap);
1850 fprintf(stderr, "\n");
1851 #ifdef TARGET_I386
1852 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1853 #else
1854 cpu_dump_state(env, stderr, fprintf, 0);
1855 #endif
1856 if (qemu_log_enabled()) {
1857 qemu_log("qemu: fatal: ");
1858 qemu_log_vprintf(fmt, ap2);
1859 qemu_log("\n");
1860 #ifdef TARGET_I386
1861 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1862 #else
1863 log_cpu_state(env, 0);
1864 #endif
1865 qemu_log_flush();
1866 qemu_log_close();
1868 va_end(ap2);
1869 va_end(ap);
1870 #if defined(CONFIG_USER_ONLY)
1872 struct sigaction act;
1873 sigfillset(&act.sa_mask);
1874 act.sa_handler = SIG_DFL;
1875 sigaction(SIGABRT, &act, NULL);
1877 #endif
1878 abort();
1881 CPUState *cpu_copy(CPUState *env)
1883 CPUState *new_env = cpu_init(env->cpu_model_str);
1884 CPUState *next_cpu = new_env->next_cpu;
1885 int cpu_index = new_env->cpu_index;
1886 #if defined(TARGET_HAS_ICE)
1887 CPUBreakpoint *bp;
1888 CPUWatchpoint *wp;
1889 #endif
1891 memcpy(new_env, env, sizeof(CPUState));
1893 /* Preserve chaining and index. */
1894 new_env->next_cpu = next_cpu;
1895 new_env->cpu_index = cpu_index;
1897 /* Clone all break/watchpoints.
1898 Note: Once we support ptrace with hw-debug register access, make sure
1899 BP_CPU break/watchpoints are handled correctly on clone. */
1900 QTAILQ_INIT(&env->breakpoints);
1901 QTAILQ_INIT(&env->watchpoints);
1902 #if defined(TARGET_HAS_ICE)
1903 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1904 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1906 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1907 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1908 wp->flags, NULL);
1910 #endif
1912 return new_env;
1915 #if !defined(CONFIG_USER_ONLY)
1917 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1919 unsigned int i;
1921 /* Discard jump cache entries for any tb which might potentially
1922 overlap the flushed page. */
1923 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1924 memset (&env->tb_jmp_cache[i], 0,
1925 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1927 i = tb_jmp_cache_hash_page(addr);
1928 memset (&env->tb_jmp_cache[i], 0,
1929 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1932 static CPUTLBEntry s_cputlb_empty_entry = {
1933 .addr_read = -1,
1934 .addr_write = -1,
1935 .addr_code = -1,
1936 .addend = -1,
1939 /* NOTE: if flush_global is true, also flush global entries (not
1940 implemented yet) */
1941 void tlb_flush(CPUState *env, int flush_global)
1943 int i;
1945 #if defined(DEBUG_TLB)
1946 printf("tlb_flush:\n");
1947 #endif
1948 /* must reset current TB so that interrupts cannot modify the
1949 links while we are modifying them */
1950 env->current_tb = NULL;
1952 for(i = 0; i < CPU_TLB_SIZE; i++) {
1953 int mmu_idx;
1954 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1955 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1959 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1961 env->tlb_flush_addr = -1;
1962 env->tlb_flush_mask = 0;
1963 tlb_flush_count++;
1966 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1968 if (addr == (tlb_entry->addr_read &
1969 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1970 addr == (tlb_entry->addr_write &
1971 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1972 addr == (tlb_entry->addr_code &
1973 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1974 *tlb_entry = s_cputlb_empty_entry;
1978 void tlb_flush_page(CPUState *env, target_ulong addr)
1980 int i;
1981 int mmu_idx;
1983 #if defined(DEBUG_TLB)
1984 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1985 #endif
1986 /* Check if we need to flush due to large pages. */
1987 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1988 #if defined(DEBUG_TLB)
1989 printf("tlb_flush_page: forced full flush ("
1990 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1991 env->tlb_flush_addr, env->tlb_flush_mask);
1992 #endif
1993 tlb_flush(env, 1);
1994 return;
1996 /* must reset current TB so that interrupts cannot modify the
1997 links while we are modifying them */
1998 env->current_tb = NULL;
2000 addr &= TARGET_PAGE_MASK;
2001 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2002 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2003 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2005 tlb_flush_jmp_cache(env, addr);
2008 /* update the TLBs so that writes to code in the virtual page 'addr'
2009 can be detected */
2010 static void tlb_protect_code(ram_addr_t ram_addr)
2012 cpu_physical_memory_reset_dirty(ram_addr,
2013 ram_addr + TARGET_PAGE_SIZE,
2014 CODE_DIRTY_FLAG);
2017 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2018 tested for self modifying code */
2019 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2020 target_ulong vaddr)
2022 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2025 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2026 unsigned long start, unsigned long length)
2028 unsigned long addr;
2029 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2030 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2031 if ((addr - start) < length) {
2032 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2037 /* Note: start and end must be within the same ram block. */
2038 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2039 int dirty_flags)
2041 CPUState *env;
2042 unsigned long length, start1;
2043 int i;
2045 start &= TARGET_PAGE_MASK;
2046 end = TARGET_PAGE_ALIGN(end);
2048 length = end - start;
2049 if (length == 0)
2050 return;
2051 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2053 /* we modify the TLB cache so that the dirty bit will be set again
2054 when accessing the range */
2055 start1 = (unsigned long)qemu_get_ram_ptr(start);
2056 /* Chek that we don't span multiple blocks - this breaks the
2057 address comparisons below. */
2058 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2059 != (end - 1) - start) {
2060 abort();
2063 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2064 int mmu_idx;
2065 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2066 for(i = 0; i < CPU_TLB_SIZE; i++)
2067 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2068 start1, length);
2073 int cpu_physical_memory_set_dirty_tracking(int enable)
2075 int ret = 0;
2076 in_migration = enable;
2077 ret = cpu_notify_migration_log(!!enable);
2078 return ret;
2081 int cpu_physical_memory_get_dirty_tracking(void)
2083 return in_migration;
2086 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2087 target_phys_addr_t end_addr)
2089 int ret;
2091 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2092 return ret;
2095 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2097 ram_addr_t ram_addr;
2098 void *p;
2100 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2101 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2102 + tlb_entry->addend);
2103 ram_addr = qemu_ram_addr_from_host(p);
2104 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2105 tlb_entry->addr_write |= TLB_NOTDIRTY;
2110 /* update the TLB according to the current state of the dirty bits */
2111 void cpu_tlb_update_dirty(CPUState *env)
2113 int i;
2114 int mmu_idx;
2115 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2116 for(i = 0; i < CPU_TLB_SIZE; i++)
2117 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2121 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2123 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2124 tlb_entry->addr_write = vaddr;
2127 /* update the TLB corresponding to virtual page vaddr
2128 so that it is no longer dirty */
2129 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2131 int i;
2132 int mmu_idx;
2134 vaddr &= TARGET_PAGE_MASK;
2135 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2136 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2137 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2140 /* Our TLB does not support large pages, so remember the area covered by
2141 large pages and trigger a full TLB flush if these are invalidated. */
2142 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2143 target_ulong size)
2145 target_ulong mask = ~(size - 1);
2147 if (env->tlb_flush_addr == (target_ulong)-1) {
2148 env->tlb_flush_addr = vaddr & mask;
2149 env->tlb_flush_mask = mask;
2150 return;
2152 /* Extend the existing region to include the new page.
2153 This is a compromise between unnecessary flushes and the cost
2154 of maintaining a full variable size TLB. */
2155 mask &= env->tlb_flush_mask;
2156 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2157 mask <<= 1;
2159 env->tlb_flush_addr &= mask;
2160 env->tlb_flush_mask = mask;
2163 /* Add a new TLB entry. At most one entry for a given virtual address
2164 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2165 supplied size is only used by tlb_flush_page. */
2166 void tlb_set_page(CPUState *env, target_ulong vaddr,
2167 target_phys_addr_t paddr, int prot,
2168 int mmu_idx, target_ulong size)
2170 PhysPageDesc *p;
2171 unsigned long pd;
2172 unsigned int index;
2173 target_ulong address;
2174 target_ulong code_address;
2175 unsigned long addend;
2176 CPUTLBEntry *te;
2177 CPUWatchpoint *wp;
2178 target_phys_addr_t iotlb;
2180 assert(size >= TARGET_PAGE_SIZE);
2181 if (size != TARGET_PAGE_SIZE) {
2182 tlb_add_large_page(env, vaddr, size);
2184 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2185 if (!p) {
2186 pd = IO_MEM_UNASSIGNED;
2187 } else {
2188 pd = p->phys_offset;
2190 #if defined(DEBUG_TLB)
2191 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2192 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2193 #endif
2195 address = vaddr;
2196 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2197 /* IO memory case (romd handled later) */
2198 address |= TLB_MMIO;
2200 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2201 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2202 /* Normal RAM. */
2203 iotlb = pd & TARGET_PAGE_MASK;
2204 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2205 iotlb |= IO_MEM_NOTDIRTY;
2206 else
2207 iotlb |= IO_MEM_ROM;
2208 } else {
2209 /* IO handlers are currently passed a physical address.
2210 It would be nice to pass an offset from the base address
2211 of that region. This would avoid having to special case RAM,
2212 and avoid full address decoding in every device.
2213 We can't use the high bits of pd for this because
2214 IO_MEM_ROMD uses these as a ram address. */
2215 iotlb = (pd & ~TARGET_PAGE_MASK);
2216 if (p) {
2217 iotlb += p->region_offset;
2218 } else {
2219 iotlb += paddr;
2223 code_address = address;
2224 /* Make accesses to pages with watchpoints go via the
2225 watchpoint trap routines. */
2226 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2227 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2228 iotlb = io_mem_watch + paddr;
2229 /* TODO: The memory case can be optimized by not trapping
2230 reads of pages with a write breakpoint. */
2231 address |= TLB_MMIO;
2235 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2236 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2237 te = &env->tlb_table[mmu_idx][index];
2238 te->addend = addend - vaddr;
2239 if (prot & PAGE_READ) {
2240 te->addr_read = address;
2241 } else {
2242 te->addr_read = -1;
2245 if (prot & PAGE_EXEC) {
2246 te->addr_code = code_address;
2247 } else {
2248 te->addr_code = -1;
2250 if (prot & PAGE_WRITE) {
2251 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2252 (pd & IO_MEM_ROMD)) {
2253 /* Write access calls the I/O callback. */
2254 te->addr_write = address | TLB_MMIO;
2255 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2256 !cpu_physical_memory_is_dirty(pd)) {
2257 te->addr_write = address | TLB_NOTDIRTY;
2258 } else {
2259 te->addr_write = address;
2261 } else {
2262 te->addr_write = -1;
2266 #else
2268 void tlb_flush(CPUState *env, int flush_global)
2272 void tlb_flush_page(CPUState *env, target_ulong addr)
2277 * Walks guest process memory "regions" one by one
2278 * and calls callback function 'fn' for each region.
2281 struct walk_memory_regions_data
2283 walk_memory_regions_fn fn;
2284 void *priv;
2285 unsigned long start;
2286 int prot;
2289 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2290 abi_ulong end, int new_prot)
2292 if (data->start != -1ul) {
2293 int rc = data->fn(data->priv, data->start, end, data->prot);
2294 if (rc != 0) {
2295 return rc;
2299 data->start = (new_prot ? end : -1ul);
2300 data->prot = new_prot;
2302 return 0;
2305 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2306 abi_ulong base, int level, void **lp)
2308 abi_ulong pa;
2309 int i, rc;
2311 if (*lp == NULL) {
2312 return walk_memory_regions_end(data, base, 0);
2315 if (level == 0) {
2316 PageDesc *pd = *lp;
2317 for (i = 0; i < L2_SIZE; ++i) {
2318 int prot = pd[i].flags;
2320 pa = base | (i << TARGET_PAGE_BITS);
2321 if (prot != data->prot) {
2322 rc = walk_memory_regions_end(data, pa, prot);
2323 if (rc != 0) {
2324 return rc;
2328 } else {
2329 void **pp = *lp;
2330 for (i = 0; i < L2_SIZE; ++i) {
2331 pa = base | ((abi_ulong)i <<
2332 (TARGET_PAGE_BITS + L2_BITS * level));
2333 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2334 if (rc != 0) {
2335 return rc;
2340 return 0;
2343 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2345 struct walk_memory_regions_data data;
2346 unsigned long i;
2348 data.fn = fn;
2349 data.priv = priv;
2350 data.start = -1ul;
2351 data.prot = 0;
2353 for (i = 0; i < V_L1_SIZE; i++) {
2354 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2355 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2356 if (rc != 0) {
2357 return rc;
2361 return walk_memory_regions_end(&data, 0, 0);
2364 static int dump_region(void *priv, abi_ulong start,
2365 abi_ulong end, unsigned long prot)
2367 FILE *f = (FILE *)priv;
2369 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2370 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2371 start, end, end - start,
2372 ((prot & PAGE_READ) ? 'r' : '-'),
2373 ((prot & PAGE_WRITE) ? 'w' : '-'),
2374 ((prot & PAGE_EXEC) ? 'x' : '-'));
2376 return (0);
2379 /* dump memory mappings */
2380 void page_dump(FILE *f)
2382 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2383 "start", "end", "size", "prot");
2384 walk_memory_regions(f, dump_region);
2387 int page_get_flags(target_ulong address)
2389 PageDesc *p;
2391 p = page_find(address >> TARGET_PAGE_BITS);
2392 if (!p)
2393 return 0;
2394 return p->flags;
2397 /* Modify the flags of a page and invalidate the code if necessary.
2398 The flag PAGE_WRITE_ORG is positioned automatically depending
2399 on PAGE_WRITE. The mmap_lock should already be held. */
2400 void page_set_flags(target_ulong start, target_ulong end, int flags)
2402 target_ulong addr, len;
2404 /* This function should never be called with addresses outside the
2405 guest address space. If this assert fires, it probably indicates
2406 a missing call to h2g_valid. */
2407 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2408 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2409 #endif
2410 assert(start < end);
2412 start = start & TARGET_PAGE_MASK;
2413 end = TARGET_PAGE_ALIGN(end);
2415 if (flags & PAGE_WRITE) {
2416 flags |= PAGE_WRITE_ORG;
2419 for (addr = start, len = end - start;
2420 len != 0;
2421 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2422 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2424 /* If the write protection bit is set, then we invalidate
2425 the code inside. */
2426 if (!(p->flags & PAGE_WRITE) &&
2427 (flags & PAGE_WRITE) &&
2428 p->first_tb) {
2429 tb_invalidate_phys_page(addr, 0, NULL);
2431 p->flags = flags;
2435 int page_check_range(target_ulong start, target_ulong len, int flags)
2437 PageDesc *p;
2438 target_ulong end;
2439 target_ulong addr;
2441 /* This function should never be called with addresses outside the
2442 guest address space. If this assert fires, it probably indicates
2443 a missing call to h2g_valid. */
2444 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2445 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2446 #endif
2448 if (len == 0) {
2449 return 0;
2451 if (start + len - 1 < start) {
2452 /* We've wrapped around. */
2453 return -1;
2456 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2457 start = start & TARGET_PAGE_MASK;
2459 for (addr = start, len = end - start;
2460 len != 0;
2461 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2462 p = page_find(addr >> TARGET_PAGE_BITS);
2463 if( !p )
2464 return -1;
2465 if( !(p->flags & PAGE_VALID) )
2466 return -1;
2468 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2469 return -1;
2470 if (flags & PAGE_WRITE) {
2471 if (!(p->flags & PAGE_WRITE_ORG))
2472 return -1;
2473 /* unprotect the page if it was put read-only because it
2474 contains translated code */
2475 if (!(p->flags & PAGE_WRITE)) {
2476 if (!page_unprotect(addr, 0, NULL))
2477 return -1;
2479 return 0;
2482 return 0;
2485 /* called from signal handler: invalidate the code and unprotect the
2486 page. Return TRUE if the fault was successfully handled. */
2487 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2489 unsigned int prot;
2490 PageDesc *p;
2491 target_ulong host_start, host_end, addr;
2493 /* Technically this isn't safe inside a signal handler. However we
2494 know this only ever happens in a synchronous SEGV handler, so in
2495 practice it seems to be ok. */
2496 mmap_lock();
2498 p = page_find(address >> TARGET_PAGE_BITS);
2499 if (!p) {
2500 mmap_unlock();
2501 return 0;
2504 /* if the page was really writable, then we change its
2505 protection back to writable */
2506 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2507 host_start = address & qemu_host_page_mask;
2508 host_end = host_start + qemu_host_page_size;
2510 prot = 0;
2511 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2512 p = page_find(addr >> TARGET_PAGE_BITS);
2513 p->flags |= PAGE_WRITE;
2514 prot |= p->flags;
2516 /* and since the content will be modified, we must invalidate
2517 the corresponding translated code. */
2518 tb_invalidate_phys_page(addr, pc, puc);
2519 #ifdef DEBUG_TB_CHECK
2520 tb_invalidate_check(addr);
2521 #endif
2523 mprotect((void *)g2h(host_start), qemu_host_page_size,
2524 prot & PAGE_BITS);
2526 mmap_unlock();
2527 return 1;
2529 mmap_unlock();
2530 return 0;
2533 static inline void tlb_set_dirty(CPUState *env,
2534 unsigned long addr, target_ulong vaddr)
2537 #endif /* defined(CONFIG_USER_ONLY) */
2539 #if !defined(CONFIG_USER_ONLY)
2541 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2542 typedef struct subpage_t {
2543 target_phys_addr_t base;
2544 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2545 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2546 } subpage_t;
2548 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2549 ram_addr_t memory, ram_addr_t region_offset);
2550 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2551 ram_addr_t orig_memory,
2552 ram_addr_t region_offset);
2553 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2554 need_subpage) \
2555 do { \
2556 if (addr > start_addr) \
2557 start_addr2 = 0; \
2558 else { \
2559 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2560 if (start_addr2 > 0) \
2561 need_subpage = 1; \
2564 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2565 end_addr2 = TARGET_PAGE_SIZE - 1; \
2566 else { \
2567 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2568 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2569 need_subpage = 1; \
2571 } while (0)
2573 /* register physical memory.
2574 For RAM, 'size' must be a multiple of the target page size.
2575 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2576 io memory page. The address used when calling the IO function is
2577 the offset from the start of the region, plus region_offset. Both
2578 start_addr and region_offset are rounded down to a page boundary
2579 before calculating this offset. This should not be a problem unless
2580 the low bits of start_addr and region_offset differ. */
2581 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2582 ram_addr_t size,
2583 ram_addr_t phys_offset,
2584 ram_addr_t region_offset)
2586 target_phys_addr_t addr, end_addr;
2587 PhysPageDesc *p;
2588 CPUState *env;
2589 ram_addr_t orig_size = size;
2590 subpage_t *subpage;
2592 cpu_notify_set_memory(start_addr, size, phys_offset);
2594 if (phys_offset == IO_MEM_UNASSIGNED) {
2595 region_offset = start_addr;
2597 region_offset &= TARGET_PAGE_MASK;
2598 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2599 end_addr = start_addr + (target_phys_addr_t)size;
2600 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2601 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2602 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2603 ram_addr_t orig_memory = p->phys_offset;
2604 target_phys_addr_t start_addr2, end_addr2;
2605 int need_subpage = 0;
2607 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2608 need_subpage);
2609 if (need_subpage) {
2610 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2611 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2612 &p->phys_offset, orig_memory,
2613 p->region_offset);
2614 } else {
2615 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2616 >> IO_MEM_SHIFT];
2618 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2619 region_offset);
2620 p->region_offset = 0;
2621 } else {
2622 p->phys_offset = phys_offset;
2623 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2624 (phys_offset & IO_MEM_ROMD))
2625 phys_offset += TARGET_PAGE_SIZE;
2627 } else {
2628 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2629 p->phys_offset = phys_offset;
2630 p->region_offset = region_offset;
2631 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2632 (phys_offset & IO_MEM_ROMD)) {
2633 phys_offset += TARGET_PAGE_SIZE;
2634 } else {
2635 target_phys_addr_t start_addr2, end_addr2;
2636 int need_subpage = 0;
2638 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2639 end_addr2, need_subpage);
2641 if (need_subpage) {
2642 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2643 &p->phys_offset, IO_MEM_UNASSIGNED,
2644 addr & TARGET_PAGE_MASK);
2645 subpage_register(subpage, start_addr2, end_addr2,
2646 phys_offset, region_offset);
2647 p->region_offset = 0;
2651 region_offset += TARGET_PAGE_SIZE;
2654 /* since each CPU stores ram addresses in its TLB cache, we must
2655 reset the modified entries */
2656 /* XXX: slow ! */
2657 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2658 tlb_flush(env, 1);
2662 /* XXX: temporary until new memory mapping API */
2663 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2665 PhysPageDesc *p;
2667 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2668 if (!p)
2669 return IO_MEM_UNASSIGNED;
2670 return p->phys_offset;
2673 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2675 if (kvm_enabled())
2676 kvm_coalesce_mmio_region(addr, size);
2679 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2681 if (kvm_enabled())
2682 kvm_uncoalesce_mmio_region(addr, size);
2685 void qemu_flush_coalesced_mmio_buffer(void)
2687 if (kvm_enabled())
2688 kvm_flush_coalesced_mmio_buffer();
2691 #if defined(__linux__) && !defined(TARGET_S390X)
2693 #include <sys/vfs.h>
2695 #define HUGETLBFS_MAGIC 0x958458f6
2697 static long gethugepagesize(const char *path)
2699 struct statfs fs;
2700 int ret;
2702 do {
2703 ret = statfs(path, &fs);
2704 } while (ret != 0 && errno == EINTR);
2706 if (ret != 0) {
2707 perror(path);
2708 return 0;
2711 if (fs.f_type != HUGETLBFS_MAGIC)
2712 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2714 return fs.f_bsize;
2717 static void *file_ram_alloc(ram_addr_t memory, const char *path)
2719 char *filename;
2720 void *area;
2721 int fd;
2722 #ifdef MAP_POPULATE
2723 int flags;
2724 #endif
2725 unsigned long hpagesize;
2727 hpagesize = gethugepagesize(path);
2728 if (!hpagesize) {
2729 return NULL;
2732 if (memory < hpagesize) {
2733 return NULL;
2736 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2737 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2738 return NULL;
2741 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2742 return NULL;
2745 fd = mkstemp(filename);
2746 if (fd < 0) {
2747 perror("unable to create backing store for hugepages");
2748 free(filename);
2749 return NULL;
2751 unlink(filename);
2752 free(filename);
2754 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2757 * ftruncate is not supported by hugetlbfs in older
2758 * hosts, so don't bother bailing out on errors.
2759 * If anything goes wrong with it under other filesystems,
2760 * mmap will fail.
2762 if (ftruncate(fd, memory))
2763 perror("ftruncate");
2765 #ifdef MAP_POPULATE
2766 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2767 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2768 * to sidestep this quirk.
2770 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2771 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2772 #else
2773 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2774 #endif
2775 if (area == MAP_FAILED) {
2776 perror("file_ram_alloc: can't mmap RAM pages");
2777 close(fd);
2778 return (NULL);
2780 return area;
2782 #endif
2784 ram_addr_t qemu_ram_map(ram_addr_t size, void *host)
2786 RAMBlock *new_block;
2788 size = TARGET_PAGE_ALIGN(size);
2789 new_block = qemu_malloc(sizeof(*new_block));
2791 new_block->host = host;
2793 new_block->offset = ram_list.last_offset;
2794 new_block->length = size;
2796 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2798 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2799 (ram_list.last_offset + size) >> TARGET_PAGE_BITS);
2800 memset(ram_list.phys_dirty + (ram_list.last_offset >> TARGET_PAGE_BITS),
2801 0xff, size >> TARGET_PAGE_BITS);
2803 ram_list.last_offset += size;
2805 if (kvm_enabled())
2806 kvm_setup_guest_memory(new_block->host, size);
2808 return new_block->offset;
2811 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2813 RAMBlock *new_block;
2815 size = TARGET_PAGE_ALIGN(size);
2816 new_block = qemu_malloc(sizeof(*new_block));
2818 if (mem_path) {
2819 #if defined (__linux__) && !defined(TARGET_S390X)
2820 new_block->host = file_ram_alloc(size, mem_path);
2821 if (!new_block->host) {
2822 new_block->host = qemu_vmalloc(size);
2823 #ifdef MADV_MERGEABLE
2824 madvise(new_block->host, size, MADV_MERGEABLE);
2825 #endif
2827 #else
2828 fprintf(stderr, "-mem-path option unsupported\n");
2829 exit(1);
2830 #endif
2831 } else {
2832 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2833 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2834 new_block->host = mmap((void*)0x1000000, size,
2835 PROT_EXEC|PROT_READ|PROT_WRITE,
2836 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2837 #else
2838 new_block->host = qemu_vmalloc(size);
2839 #endif
2840 #ifdef MADV_MERGEABLE
2841 madvise(new_block->host, size, MADV_MERGEABLE);
2842 #endif
2844 new_block->offset = ram_list.last_offset;
2845 new_block->length = size;
2847 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2849 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2850 (ram_list.last_offset + size) >> TARGET_PAGE_BITS);
2851 memset(ram_list.phys_dirty + (ram_list.last_offset >> TARGET_PAGE_BITS),
2852 0xff, size >> TARGET_PAGE_BITS);
2854 ram_list.last_offset += size;
2856 if (kvm_enabled())
2857 kvm_setup_guest_memory(new_block->host, size);
2859 return new_block->offset;
2862 void qemu_ram_free(ram_addr_t addr)
2864 /* TODO: implement this. */
2867 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2868 With the exception of the softmmu code in this file, this should
2869 only be used for local memory (e.g. video ram) that the device owns,
2870 and knows it isn't going to access beyond the end of the block.
2872 It should not be used for general purpose DMA.
2873 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2875 void *qemu_get_ram_ptr(ram_addr_t addr)
2877 RAMBlock *block;
2879 QLIST_FOREACH(block, &ram_list.blocks, next) {
2880 if (addr - block->offset < block->length) {
2881 QLIST_REMOVE(block, next);
2882 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2883 return block->host + (addr - block->offset);
2887 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2888 abort();
2890 return NULL;
2893 int do_qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2895 RAMBlock *block;
2896 uint8_t *host = ptr;
2898 QLIST_FOREACH(block, &ram_list.blocks, next) {
2899 if (host - block->host < block->length) {
2900 *ram_addr = block->offset + (host - block->host);
2901 return 0;
2904 return -1;
2907 /* Some of the softmmu routines need to translate from a host pointer
2908 (typically a TLB entry) back to a ram offset. */
2909 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2911 ram_addr_t ram_addr;
2913 if (do_qemu_ram_addr_from_host(ptr, &ram_addr)) {
2914 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2915 abort();
2917 return ram_addr;
2920 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2922 #ifdef DEBUG_UNASSIGNED
2923 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2924 #endif
2925 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2926 do_unassigned_access(addr, 0, 0, 0, 1);
2927 #endif
2928 return 0;
2931 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2933 #ifdef DEBUG_UNASSIGNED
2934 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2935 #endif
2936 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2937 do_unassigned_access(addr, 0, 0, 0, 2);
2938 #endif
2939 return 0;
2942 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2944 #ifdef DEBUG_UNASSIGNED
2945 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2946 #endif
2947 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2948 do_unassigned_access(addr, 0, 0, 0, 4);
2949 #endif
2950 return 0;
2953 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2955 #ifdef DEBUG_UNASSIGNED
2956 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2957 #endif
2958 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2959 do_unassigned_access(addr, 1, 0, 0, 1);
2960 #endif
2963 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2965 #ifdef DEBUG_UNASSIGNED
2966 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2967 #endif
2968 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2969 do_unassigned_access(addr, 1, 0, 0, 2);
2970 #endif
2973 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2975 #ifdef DEBUG_UNASSIGNED
2976 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2977 #endif
2978 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2979 do_unassigned_access(addr, 1, 0, 0, 4);
2980 #endif
2983 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2984 unassigned_mem_readb,
2985 unassigned_mem_readw,
2986 unassigned_mem_readl,
2989 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2990 unassigned_mem_writeb,
2991 unassigned_mem_writew,
2992 unassigned_mem_writel,
2995 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2996 uint32_t val)
2998 int dirty_flags;
2999 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3000 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3001 #if !defined(CONFIG_USER_ONLY)
3002 tb_invalidate_phys_page_fast(ram_addr, 1);
3003 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3004 #endif
3006 stb_p(qemu_get_ram_ptr(ram_addr), val);
3007 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3008 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3009 /* we remove the notdirty callback only if the code has been
3010 flushed */
3011 if (dirty_flags == 0xff)
3012 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3015 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3016 uint32_t val)
3018 int dirty_flags;
3019 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3020 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3021 #if !defined(CONFIG_USER_ONLY)
3022 tb_invalidate_phys_page_fast(ram_addr, 2);
3023 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3024 #endif
3026 stw_p(qemu_get_ram_ptr(ram_addr), val);
3027 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3028 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3029 /* we remove the notdirty callback only if the code has been
3030 flushed */
3031 if (dirty_flags == 0xff)
3032 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3035 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3036 uint32_t val)
3038 int dirty_flags;
3039 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3040 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3041 #if !defined(CONFIG_USER_ONLY)
3042 tb_invalidate_phys_page_fast(ram_addr, 4);
3043 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3044 #endif
3046 stl_p(qemu_get_ram_ptr(ram_addr), val);
3047 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3048 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3049 /* we remove the notdirty callback only if the code has been
3050 flushed */
3051 if (dirty_flags == 0xff)
3052 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3055 static CPUReadMemoryFunc * const error_mem_read[3] = {
3056 NULL, /* never used */
3057 NULL, /* never used */
3058 NULL, /* never used */
3061 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3062 notdirty_mem_writeb,
3063 notdirty_mem_writew,
3064 notdirty_mem_writel,
3067 /* Generate a debug exception if a watchpoint has been hit. */
3068 static void check_watchpoint(int offset, int len_mask, int flags)
3070 CPUState *env = cpu_single_env;
3071 target_ulong pc, cs_base;
3072 TranslationBlock *tb;
3073 target_ulong vaddr;
3074 CPUWatchpoint *wp;
3075 int cpu_flags;
3077 if (env->watchpoint_hit) {
3078 /* We re-entered the check after replacing the TB. Now raise
3079 * the debug interrupt so that is will trigger after the
3080 * current instruction. */
3081 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3082 return;
3084 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3085 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3086 if ((vaddr == (wp->vaddr & len_mask) ||
3087 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3088 wp->flags |= BP_WATCHPOINT_HIT;
3089 if (!env->watchpoint_hit) {
3090 env->watchpoint_hit = wp;
3091 tb = tb_find_pc(env->mem_io_pc);
3092 if (!tb) {
3093 cpu_abort(env, "check_watchpoint: could not find TB for "
3094 "pc=%p", (void *)env->mem_io_pc);
3096 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3097 tb_phys_invalidate(tb, -1);
3098 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3099 env->exception_index = EXCP_DEBUG;
3100 } else {
3101 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3102 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3104 cpu_resume_from_signal(env, NULL);
3106 } else {
3107 wp->flags &= ~BP_WATCHPOINT_HIT;
3112 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3113 so these check for a hit then pass through to the normal out-of-line
3114 phys routines. */
3115 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3117 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3118 return ldub_phys(addr);
3121 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3123 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3124 return lduw_phys(addr);
3127 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3129 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3130 return ldl_phys(addr);
3133 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3134 uint32_t val)
3136 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3137 stb_phys(addr, val);
3140 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3141 uint32_t val)
3143 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3144 stw_phys(addr, val);
3147 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3148 uint32_t val)
3150 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3151 stl_phys(addr, val);
3154 static CPUReadMemoryFunc * const watch_mem_read[3] = {
3155 watch_mem_readb,
3156 watch_mem_readw,
3157 watch_mem_readl,
3160 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3161 watch_mem_writeb,
3162 watch_mem_writew,
3163 watch_mem_writel,
3166 static inline uint32_t subpage_readlen (subpage_t *mmio,
3167 target_phys_addr_t addr,
3168 unsigned int len)
3170 unsigned int idx = SUBPAGE_IDX(addr);
3171 #if defined(DEBUG_SUBPAGE)
3172 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3173 mmio, len, addr, idx);
3174 #endif
3176 addr += mmio->region_offset[idx];
3177 idx = mmio->sub_io_index[idx];
3178 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3181 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3182 uint32_t value, unsigned int len)
3184 unsigned int idx = SUBPAGE_IDX(addr);
3185 #if defined(DEBUG_SUBPAGE)
3186 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3187 __func__, mmio, len, addr, idx, value);
3188 #endif
3190 addr += mmio->region_offset[idx];
3191 idx = mmio->sub_io_index[idx];
3192 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3195 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3197 return subpage_readlen(opaque, addr, 0);
3200 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3201 uint32_t value)
3203 subpage_writelen(opaque, addr, value, 0);
3206 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3208 return subpage_readlen(opaque, addr, 1);
3211 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3212 uint32_t value)
3214 subpage_writelen(opaque, addr, value, 1);
3217 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3219 return subpage_readlen(opaque, addr, 2);
3222 static void subpage_writel (void *opaque, target_phys_addr_t addr,
3223 uint32_t value)
3225 subpage_writelen(opaque, addr, value, 2);
3228 static CPUReadMemoryFunc * const subpage_read[] = {
3229 &subpage_readb,
3230 &subpage_readw,
3231 &subpage_readl,
3234 static CPUWriteMemoryFunc * const subpage_write[] = {
3235 &subpage_writeb,
3236 &subpage_writew,
3237 &subpage_writel,
3240 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3241 ram_addr_t memory, ram_addr_t region_offset)
3243 int idx, eidx;
3245 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3246 return -1;
3247 idx = SUBPAGE_IDX(start);
3248 eidx = SUBPAGE_IDX(end);
3249 #if defined(DEBUG_SUBPAGE)
3250 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3251 mmio, start, end, idx, eidx, memory);
3252 #endif
3253 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3254 for (; idx <= eidx; idx++) {
3255 mmio->sub_io_index[idx] = memory;
3256 mmio->region_offset[idx] = region_offset;
3259 return 0;
3262 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3263 ram_addr_t orig_memory,
3264 ram_addr_t region_offset)
3266 subpage_t *mmio;
3267 int subpage_memory;
3269 mmio = qemu_mallocz(sizeof(subpage_t));
3271 mmio->base = base;
3272 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3273 #if defined(DEBUG_SUBPAGE)
3274 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3275 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3276 #endif
3277 *phys = subpage_memory | IO_MEM_SUBPAGE;
3278 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3280 return mmio;
3283 static int get_free_io_mem_idx(void)
3285 int i;
3287 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3288 if (!io_mem_used[i]) {
3289 io_mem_used[i] = 1;
3290 return i;
3292 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3293 return -1;
3296 /* mem_read and mem_write are arrays of functions containing the
3297 function to access byte (index 0), word (index 1) and dword (index
3298 2). Functions can be omitted with a NULL function pointer.
3299 If io_index is non zero, the corresponding io zone is
3300 modified. If it is zero, a new io zone is allocated. The return
3301 value can be used with cpu_register_physical_memory(). (-1) is
3302 returned if error. */
3303 static int cpu_register_io_memory_fixed(int io_index,
3304 CPUReadMemoryFunc * const *mem_read,
3305 CPUWriteMemoryFunc * const *mem_write,
3306 void *opaque)
3308 int i;
3310 if (io_index <= 0) {
3311 io_index = get_free_io_mem_idx();
3312 if (io_index == -1)
3313 return io_index;
3314 } else {
3315 io_index >>= IO_MEM_SHIFT;
3316 if (io_index >= IO_MEM_NB_ENTRIES)
3317 return -1;
3320 for (i = 0; i < 3; ++i) {
3321 io_mem_read[io_index][i]
3322 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3324 for (i = 0; i < 3; ++i) {
3325 io_mem_write[io_index][i]
3326 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3328 io_mem_opaque[io_index] = opaque;
3330 return (io_index << IO_MEM_SHIFT);
3333 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3334 CPUWriteMemoryFunc * const *mem_write,
3335 void *opaque)
3337 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3340 void cpu_unregister_io_memory(int io_table_address)
3342 int i;
3343 int io_index = io_table_address >> IO_MEM_SHIFT;
3345 for (i=0;i < 3; i++) {
3346 io_mem_read[io_index][i] = unassigned_mem_read[i];
3347 io_mem_write[io_index][i] = unassigned_mem_write[i];
3349 io_mem_opaque[io_index] = NULL;
3350 io_mem_used[io_index] = 0;
3353 static void io_mem_init(void)
3355 int i;
3357 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3358 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3359 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3360 for (i=0; i<5; i++)
3361 io_mem_used[i] = 1;
3363 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3364 watch_mem_write, NULL);
3367 #endif /* !defined(CONFIG_USER_ONLY) */
3369 /* physical memory access (slow version, mainly for debug) */
3370 #if defined(CONFIG_USER_ONLY)
3371 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3372 uint8_t *buf, int len, int is_write)
3374 int l, flags;
3375 target_ulong page;
3376 void * p;
3378 while (len > 0) {
3379 page = addr & TARGET_PAGE_MASK;
3380 l = (page + TARGET_PAGE_SIZE) - addr;
3381 if (l > len)
3382 l = len;
3383 flags = page_get_flags(page);
3384 if (!(flags & PAGE_VALID))
3385 return -1;
3386 if (is_write) {
3387 if (!(flags & PAGE_WRITE))
3388 return -1;
3389 /* XXX: this code should not depend on lock_user */
3390 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3391 return -1;
3392 memcpy(p, buf, l);
3393 unlock_user(p, addr, l);
3394 } else {
3395 if (!(flags & PAGE_READ))
3396 return -1;
3397 /* XXX: this code should not depend on lock_user */
3398 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3399 return -1;
3400 memcpy(buf, p, l);
3401 unlock_user(p, addr, 0);
3403 len -= l;
3404 buf += l;
3405 addr += l;
3407 return 0;
3410 #else
3411 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3412 int len, int is_write)
3414 int l, io_index;
3415 uint8_t *ptr;
3416 uint32_t val;
3417 target_phys_addr_t page;
3418 unsigned long pd;
3419 PhysPageDesc *p;
3421 while (len > 0) {
3422 page = addr & TARGET_PAGE_MASK;
3423 l = (page + TARGET_PAGE_SIZE) - addr;
3424 if (l > len)
3425 l = len;
3426 p = phys_page_find(page >> TARGET_PAGE_BITS);
3427 if (!p) {
3428 pd = IO_MEM_UNASSIGNED;
3429 } else {
3430 pd = p->phys_offset;
3433 if (is_write) {
3434 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3435 target_phys_addr_t addr1 = addr;
3436 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3437 if (p)
3438 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3439 /* XXX: could force cpu_single_env to NULL to avoid
3440 potential bugs */
3441 if (l >= 4 && ((addr1 & 3) == 0)) {
3442 /* 32 bit write access */
3443 val = ldl_p(buf);
3444 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3445 l = 4;
3446 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3447 /* 16 bit write access */
3448 val = lduw_p(buf);
3449 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3450 l = 2;
3451 } else {
3452 /* 8 bit write access */
3453 val = ldub_p(buf);
3454 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3455 l = 1;
3457 } else {
3458 unsigned long addr1;
3459 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3460 /* RAM case */
3461 ptr = qemu_get_ram_ptr(addr1);
3462 memcpy(ptr, buf, l);
3463 if (!cpu_physical_memory_is_dirty(addr1)) {
3464 /* invalidate code */
3465 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3466 /* set dirty bit */
3467 cpu_physical_memory_set_dirty_flags(
3468 addr1, (0xff & ~CODE_DIRTY_FLAG));
3470 /* qemu doesn't execute guest code directly, but kvm does
3471 therefore flush instruction caches */
3472 if (kvm_enabled())
3473 flush_icache_range((unsigned long)ptr,
3474 ((unsigned long)ptr)+l);
3476 } else {
3477 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3478 !(pd & IO_MEM_ROMD)) {
3479 target_phys_addr_t addr1 = addr;
3480 /* I/O case */
3481 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3482 if (p)
3483 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3484 if (l >= 4 && ((addr1 & 3) == 0)) {
3485 /* 32 bit read access */
3486 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3487 stl_p(buf, val);
3488 l = 4;
3489 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3490 /* 16 bit read access */
3491 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3492 stw_p(buf, val);
3493 l = 2;
3494 } else {
3495 /* 8 bit read access */
3496 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3497 stb_p(buf, val);
3498 l = 1;
3500 } else {
3501 /* RAM case */
3502 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3503 (addr & ~TARGET_PAGE_MASK);
3504 memcpy(buf, ptr, l);
3507 len -= l;
3508 buf += l;
3509 addr += l;
3513 /* used for ROM loading : can write in RAM and ROM */
3514 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3515 const uint8_t *buf, int len)
3517 int l;
3518 uint8_t *ptr;
3519 target_phys_addr_t page;
3520 unsigned long pd;
3521 PhysPageDesc *p;
3523 while (len > 0) {
3524 page = addr & TARGET_PAGE_MASK;
3525 l = (page + TARGET_PAGE_SIZE) - addr;
3526 if (l > len)
3527 l = len;
3528 p = phys_page_find(page >> TARGET_PAGE_BITS);
3529 if (!p) {
3530 pd = IO_MEM_UNASSIGNED;
3531 } else {
3532 pd = p->phys_offset;
3535 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3536 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3537 !(pd & IO_MEM_ROMD)) {
3538 /* do nothing */
3539 } else {
3540 unsigned long addr1;
3541 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3542 /* ROM/RAM case */
3543 ptr = qemu_get_ram_ptr(addr1);
3544 memcpy(ptr, buf, l);
3546 len -= l;
3547 buf += l;
3548 addr += l;
3552 typedef struct {
3553 void *buffer;
3554 target_phys_addr_t addr;
3555 target_phys_addr_t len;
3556 } BounceBuffer;
3558 static BounceBuffer bounce;
3560 typedef struct MapClient {
3561 void *opaque;
3562 void (*callback)(void *opaque);
3563 QLIST_ENTRY(MapClient) link;
3564 } MapClient;
3566 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3567 = QLIST_HEAD_INITIALIZER(map_client_list);
3569 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3571 MapClient *client = qemu_malloc(sizeof(*client));
3573 client->opaque = opaque;
3574 client->callback = callback;
3575 QLIST_INSERT_HEAD(&map_client_list, client, link);
3576 return client;
3579 void cpu_unregister_map_client(void *_client)
3581 MapClient *client = (MapClient *)_client;
3583 QLIST_REMOVE(client, link);
3584 qemu_free(client);
3587 static void cpu_notify_map_clients(void)
3589 MapClient *client;
3591 while (!QLIST_EMPTY(&map_client_list)) {
3592 client = QLIST_FIRST(&map_client_list);
3593 client->callback(client->opaque);
3594 cpu_unregister_map_client(client);
3598 /* Map a physical memory region into a host virtual address.
3599 * May map a subset of the requested range, given by and returned in *plen.
3600 * May return NULL if resources needed to perform the mapping are exhausted.
3601 * Use only for reads OR writes - not for read-modify-write operations.
3602 * Use cpu_register_map_client() to know when retrying the map operation is
3603 * likely to succeed.
3605 void *cpu_physical_memory_map(target_phys_addr_t addr,
3606 target_phys_addr_t *plen,
3607 int is_write)
3609 target_phys_addr_t len = *plen;
3610 target_phys_addr_t done = 0;
3611 int l;
3612 uint8_t *ret = NULL;
3613 uint8_t *ptr;
3614 target_phys_addr_t page;
3615 unsigned long pd;
3616 PhysPageDesc *p;
3617 unsigned long addr1;
3619 while (len > 0) {
3620 page = addr & TARGET_PAGE_MASK;
3621 l = (page + TARGET_PAGE_SIZE) - addr;
3622 if (l > len)
3623 l = len;
3624 p = phys_page_find(page >> TARGET_PAGE_BITS);
3625 if (!p) {
3626 pd = IO_MEM_UNASSIGNED;
3627 } else {
3628 pd = p->phys_offset;
3631 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3632 if (done || bounce.buffer) {
3633 break;
3635 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3636 bounce.addr = addr;
3637 bounce.len = l;
3638 if (!is_write) {
3639 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3641 ptr = bounce.buffer;
3642 } else {
3643 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3644 ptr = qemu_get_ram_ptr(addr1);
3646 if (!done) {
3647 ret = ptr;
3648 } else if (ret + done != ptr) {
3649 break;
3652 len -= l;
3653 addr += l;
3654 done += l;
3656 *plen = done;
3657 return ret;
3660 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3661 * Will also mark the memory as dirty if is_write == 1. access_len gives
3662 * the amount of memory that was actually read or written by the caller.
3664 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3665 int is_write, target_phys_addr_t access_len)
3667 unsigned long flush_len = (unsigned long)access_len;
3669 if (buffer != bounce.buffer) {
3670 if (is_write) {
3671 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3672 while (access_len) {
3673 unsigned l;
3674 l = TARGET_PAGE_SIZE;
3675 if (l > access_len)
3676 l = access_len;
3677 if (!cpu_physical_memory_is_dirty(addr1)) {
3678 /* invalidate code */
3679 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3680 /* set dirty bit */
3681 cpu_physical_memory_set_dirty_flags(
3682 addr1, (0xff & ~CODE_DIRTY_FLAG));
3684 addr1 += l;
3685 access_len -= l;
3687 dma_flush_range((unsigned long)buffer,
3688 (unsigned long)buffer + flush_len);
3690 return;
3692 if (is_write) {
3693 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3695 qemu_vfree(bounce.buffer);
3696 bounce.buffer = NULL;
3697 cpu_notify_map_clients();
3700 /* warning: addr must be aligned */
3701 uint32_t ldl_phys(target_phys_addr_t addr)
3703 int io_index;
3704 uint8_t *ptr;
3705 uint32_t val;
3706 unsigned long pd;
3707 PhysPageDesc *p;
3709 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3710 if (!p) {
3711 pd = IO_MEM_UNASSIGNED;
3712 } else {
3713 pd = p->phys_offset;
3716 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3717 !(pd & IO_MEM_ROMD)) {
3718 /* I/O case */
3719 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3720 if (p)
3721 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3722 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3723 } else {
3724 /* RAM case */
3725 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3726 (addr & ~TARGET_PAGE_MASK);
3727 val = ldl_p(ptr);
3729 return val;
3732 /* warning: addr must be aligned */
3733 uint64_t ldq_phys(target_phys_addr_t addr)
3735 int io_index;
3736 uint8_t *ptr;
3737 uint64_t val;
3738 unsigned long pd;
3739 PhysPageDesc *p;
3741 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3742 if (!p) {
3743 pd = IO_MEM_UNASSIGNED;
3744 } else {
3745 pd = p->phys_offset;
3748 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3749 !(pd & IO_MEM_ROMD)) {
3750 /* I/O case */
3751 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3752 if (p)
3753 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3754 #ifdef TARGET_WORDS_BIGENDIAN
3755 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3756 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3757 #else
3758 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3759 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3760 #endif
3761 } else {
3762 /* RAM case */
3763 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3764 (addr & ~TARGET_PAGE_MASK);
3765 val = ldq_p(ptr);
3767 return val;
3770 /* XXX: optimize */
3771 uint32_t ldub_phys(target_phys_addr_t addr)
3773 uint8_t val;
3774 cpu_physical_memory_read(addr, &val, 1);
3775 return val;
3778 /* warning: addr must be aligned */
3779 uint32_t lduw_phys(target_phys_addr_t addr)
3781 int io_index;
3782 uint8_t *ptr;
3783 uint64_t val;
3784 unsigned long pd;
3785 PhysPageDesc *p;
3787 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3788 if (!p) {
3789 pd = IO_MEM_UNASSIGNED;
3790 } else {
3791 pd = p->phys_offset;
3794 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3795 !(pd & IO_MEM_ROMD)) {
3796 /* I/O case */
3797 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3798 if (p)
3799 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3800 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3801 } else {
3802 /* RAM case */
3803 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3804 (addr & ~TARGET_PAGE_MASK);
3805 val = lduw_p(ptr);
3807 return val;
3810 /* warning: addr must be aligned. The ram page is not masked as dirty
3811 and the code inside is not invalidated. It is useful if the dirty
3812 bits are used to track modified PTEs */
3813 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3815 int io_index;
3816 uint8_t *ptr;
3817 unsigned long pd;
3818 PhysPageDesc *p;
3820 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3821 if (!p) {
3822 pd = IO_MEM_UNASSIGNED;
3823 } else {
3824 pd = p->phys_offset;
3827 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3828 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3829 if (p)
3830 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3831 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3832 } else {
3833 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3834 ptr = qemu_get_ram_ptr(addr1);
3835 stl_p(ptr, val);
3837 if (unlikely(in_migration)) {
3838 if (!cpu_physical_memory_is_dirty(addr1)) {
3839 /* invalidate code */
3840 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3841 /* set dirty bit */
3842 cpu_physical_memory_set_dirty_flags(
3843 addr1, (0xff & ~CODE_DIRTY_FLAG));
3849 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3851 int io_index;
3852 uint8_t *ptr;
3853 unsigned long pd;
3854 PhysPageDesc *p;
3856 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3857 if (!p) {
3858 pd = IO_MEM_UNASSIGNED;
3859 } else {
3860 pd = p->phys_offset;
3863 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3864 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3865 if (p)
3866 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3867 #ifdef TARGET_WORDS_BIGENDIAN
3868 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3869 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3870 #else
3871 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3872 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3873 #endif
3874 } else {
3875 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3876 (addr & ~TARGET_PAGE_MASK);
3877 stq_p(ptr, val);
3881 /* warning: addr must be aligned */
3882 void stl_phys(target_phys_addr_t addr, uint32_t val)
3884 int io_index;
3885 uint8_t *ptr;
3886 unsigned long pd;
3887 PhysPageDesc *p;
3889 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3890 if (!p) {
3891 pd = IO_MEM_UNASSIGNED;
3892 } else {
3893 pd = p->phys_offset;
3896 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3897 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3898 if (p)
3899 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3900 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3901 } else {
3902 unsigned long addr1;
3903 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3904 /* RAM case */
3905 ptr = qemu_get_ram_ptr(addr1);
3906 stl_p(ptr, val);
3907 if (!cpu_physical_memory_is_dirty(addr1)) {
3908 /* invalidate code */
3909 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3910 /* set dirty bit */
3911 cpu_physical_memory_set_dirty_flags(addr1,
3912 (0xff & ~CODE_DIRTY_FLAG));
3917 /* XXX: optimize */
3918 void stb_phys(target_phys_addr_t addr, uint32_t val)
3920 uint8_t v = val;
3921 cpu_physical_memory_write(addr, &v, 1);
3924 /* warning: addr must be aligned */
3925 void stw_phys(target_phys_addr_t addr, uint32_t val)
3927 int io_index;
3928 uint8_t *ptr;
3929 unsigned long pd;
3930 PhysPageDesc *p;
3932 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3933 if (!p) {
3934 pd = IO_MEM_UNASSIGNED;
3935 } else {
3936 pd = p->phys_offset;
3939 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3940 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3941 if (p)
3942 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3943 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3944 } else {
3945 unsigned long addr1;
3946 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3947 /* RAM case */
3948 ptr = qemu_get_ram_ptr(addr1);
3949 stw_p(ptr, val);
3950 if (!cpu_physical_memory_is_dirty(addr1)) {
3951 /* invalidate code */
3952 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3953 /* set dirty bit */
3954 cpu_physical_memory_set_dirty_flags(addr1,
3955 (0xff & ~CODE_DIRTY_FLAG));
3960 /* XXX: optimize */
3961 void stq_phys(target_phys_addr_t addr, uint64_t val)
3963 val = tswap64(val);
3964 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3967 /* virtual memory access for debug (includes writing to ROM) */
3968 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3969 uint8_t *buf, int len, int is_write)
3971 int l;
3972 target_phys_addr_t phys_addr;
3973 target_ulong page;
3975 while (len > 0) {
3976 page = addr & TARGET_PAGE_MASK;
3977 phys_addr = cpu_get_phys_page_debug(env, page);
3978 /* if no physical page mapped, return an error */
3979 if (phys_addr == -1)
3980 return -1;
3981 l = (page + TARGET_PAGE_SIZE) - addr;
3982 if (l > len)
3983 l = len;
3984 phys_addr += (addr & ~TARGET_PAGE_MASK);
3985 if (is_write)
3986 cpu_physical_memory_write_rom(phys_addr, buf, l);
3987 else
3988 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3989 len -= l;
3990 buf += l;
3991 addr += l;
3993 return 0;
3995 #endif
3997 /* in deterministic execution mode, instructions doing device I/Os
3998 must be at the end of the TB */
3999 void cpu_io_recompile(CPUState *env, void *retaddr)
4001 TranslationBlock *tb;
4002 uint32_t n, cflags;
4003 target_ulong pc, cs_base;
4004 uint64_t flags;
4006 tb = tb_find_pc((unsigned long)retaddr);
4007 if (!tb) {
4008 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4009 retaddr);
4011 n = env->icount_decr.u16.low + tb->icount;
4012 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4013 /* Calculate how many instructions had been executed before the fault
4014 occurred. */
4015 n = n - env->icount_decr.u16.low;
4016 /* Generate a new TB ending on the I/O insn. */
4017 n++;
4018 /* On MIPS and SH, delay slot instructions can only be restarted if
4019 they were already the first instruction in the TB. If this is not
4020 the first instruction in a TB then re-execute the preceding
4021 branch. */
4022 #if defined(TARGET_MIPS)
4023 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4024 env->active_tc.PC -= 4;
4025 env->icount_decr.u16.low++;
4026 env->hflags &= ~MIPS_HFLAG_BMASK;
4028 #elif defined(TARGET_SH4)
4029 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4030 && n > 1) {
4031 env->pc -= 2;
4032 env->icount_decr.u16.low++;
4033 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4035 #endif
4036 /* This should never happen. */
4037 if (n > CF_COUNT_MASK)
4038 cpu_abort(env, "TB too big during recompile");
4040 cflags = n | CF_LAST_IO;
4041 pc = tb->pc;
4042 cs_base = tb->cs_base;
4043 flags = tb->flags;
4044 tb_phys_invalidate(tb, -1);
4045 /* FIXME: In theory this could raise an exception. In practice
4046 we have already translated the block once so it's probably ok. */
4047 tb_gen_code(env, pc, cs_base, flags, cflags);
4048 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4049 the first in the TB) then we end up generating a whole new TB and
4050 repeating the fault, which is horribly inefficient.
4051 Better would be to execute just this insn uncached, or generate a
4052 second new TB. */
4053 cpu_resume_from_signal(env, NULL);
4056 #if !defined(CONFIG_USER_ONLY)
4058 void dump_exec_info(FILE *f,
4059 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4061 int i, target_code_size, max_target_code_size;
4062 int direct_jmp_count, direct_jmp2_count, cross_page;
4063 TranslationBlock *tb;
4065 target_code_size = 0;
4066 max_target_code_size = 0;
4067 cross_page = 0;
4068 direct_jmp_count = 0;
4069 direct_jmp2_count = 0;
4070 for(i = 0; i < nb_tbs; i++) {
4071 tb = &tbs[i];
4072 target_code_size += tb->size;
4073 if (tb->size > max_target_code_size)
4074 max_target_code_size = tb->size;
4075 if (tb->page_addr[1] != -1)
4076 cross_page++;
4077 if (tb->tb_next_offset[0] != 0xffff) {
4078 direct_jmp_count++;
4079 if (tb->tb_next_offset[1] != 0xffff) {
4080 direct_jmp2_count++;
4084 /* XXX: avoid using doubles ? */
4085 cpu_fprintf(f, "Translation buffer state:\n");
4086 cpu_fprintf(f, "gen code size %ld/%ld\n",
4087 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4088 cpu_fprintf(f, "TB count %d/%d\n",
4089 nb_tbs, code_gen_max_blocks);
4090 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4091 nb_tbs ? target_code_size / nb_tbs : 0,
4092 max_target_code_size);
4093 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4094 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4095 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4096 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4097 cross_page,
4098 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4099 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4100 direct_jmp_count,
4101 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4102 direct_jmp2_count,
4103 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4104 cpu_fprintf(f, "\nStatistics:\n");
4105 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4106 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4107 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4108 #ifdef CONFIG_PROFILER
4109 tcg_dump_info(f, cpu_fprintf);
4110 #endif
4113 #define MMUSUFFIX _cmmu
4114 #define GETPC() NULL
4115 #define env cpu_single_env
4116 #define SOFTMMU_CODE_ACCESS
4118 #define SHIFT 0
4119 #include "softmmu_template.h"
4121 #define SHIFT 1
4122 #include "softmmu_template.h"
4124 #define SHIFT 2
4125 #include "softmmu_template.h"
4127 #define SHIFT 3
4128 #include "softmmu_template.h"
4130 #undef env
4132 #endif