Add basic audio functionality to vnc.c
[qemu/mini2440.git] / exec.c
blobb03fe0f92c151af433e760266efec12c86fb52a6
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #include "hw/hw.h"
41 #include "osdep.h"
42 #include "kvm.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #endif
47 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_FLUSH
49 //#define DEBUG_TLB
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
61 #undef DEBUG_TB_CHECK
62 #endif
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 #else
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #endif
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 static int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #else
102 #define code_gen_section \
103 __attribute__((aligned (32)))
104 #endif
106 uint8_t code_gen_prologue[1024] code_gen_section;
107 static uint8_t *code_gen_buffer;
108 static unsigned long code_gen_buffer_size;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size;
111 uint8_t *code_gen_ptr;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size;
115 int phys_ram_fd;
116 uint8_t *phys_ram_base;
117 uint8_t *phys_ram_dirty;
118 static int in_migration;
119 static ram_addr_t phys_ram_alloc_offset = 0;
120 #endif
122 CPUState *first_cpu;
123 /* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
125 CPUState *cpu_single_env;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
129 int use_icount = 0;
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132 int64_t qemu_icount;
134 typedef struct PageDesc {
135 /* list of TBs intersecting this ram page */
136 TranslationBlock *first_tb;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141 #if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143 #endif
144 } PageDesc;
146 typedef struct PhysPageDesc {
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset;
149 ram_addr_t region_offset;
150 } PhysPageDesc;
152 #define L2_BITS 10
153 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154 /* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
158 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159 #else
160 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
161 #endif
163 #define L1_SIZE (1 << L1_BITS)
164 #define L2_SIZE (1 << L2_BITS)
166 unsigned long qemu_real_host_page_size;
167 unsigned long qemu_host_page_bits;
168 unsigned long qemu_host_page_size;
169 unsigned long qemu_host_page_mask;
171 /* XXX: for system emulation, it could just be an array */
172 static PageDesc *l1_map[L1_SIZE];
173 static PhysPageDesc **l1_phys_map;
175 #if !defined(CONFIG_USER_ONLY)
176 static void io_mem_init(void);
178 /* io memory support */
179 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
181 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
182 static int io_mem_nb;
183 static int io_mem_watch;
184 #endif
186 /* log support */
187 static const char *logfilename = "/tmp/qemu.log";
188 FILE *logfile;
189 int loglevel;
190 static int log_append = 0;
192 /* statistics */
193 static int tlb_flush_count;
194 static int tb_flush_count;
195 static int tb_phys_invalidate_count;
197 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198 typedef struct subpage_t {
199 target_phys_addr_t base;
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204 } subpage_t;
206 #ifdef _WIN32
207 static void map_exec(void *addr, long size)
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
214 #else
215 static void map_exec(void *addr, long size)
217 unsigned long start, end, page_size;
219 page_size = getpagesize();
220 start = (unsigned long)addr;
221 start &= ~(page_size - 1);
223 end = (unsigned long)addr + size;
224 end += page_size - 1;
225 end &= ~(page_size - 1);
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
230 #endif
232 static void page_init(void)
234 /* NOTE: we can always suppose that qemu_host_page_size >=
235 TARGET_PAGE_SIZE */
236 #ifdef _WIN32
238 SYSTEM_INFO system_info;
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
243 #else
244 qemu_real_host_page_size = getpagesize();
245 #endif
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
257 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
263 mmap_lock();
264 last_brk = (unsigned long)sbrk(0);
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
274 page_set_flags(startaddr & TARGET_PAGE_MASK,
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
278 } while (!feof(f));
279 fclose(f);
281 mmap_unlock();
283 #endif
286 static inline PageDesc **page_l1_map(target_ulong index)
288 #if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
292 return NULL;
293 #endif
294 return &l1_map[index >> L2_BITS];
297 static inline PageDesc *page_find_alloc(target_ulong index)
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
307 #if defined(CONFIG_USER_ONLY)
308 unsigned long addr;
309 size_t len = sizeof(PageDesc) * L2_SIZE;
310 /* Don't use qemu_malloc because it may recurse. */
311 p = mmap(0, len, PROT_READ | PROT_WRITE,
312 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
313 *lp = p;
314 addr = h2g(p);
315 if (addr == (target_ulong)addr) {
316 page_set_flags(addr & TARGET_PAGE_MASK,
317 TARGET_PAGE_ALIGN(addr + len),
318 PAGE_RESERVED);
320 #else
321 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
322 *lp = p;
323 #endif
325 return p + (index & (L2_SIZE - 1));
328 static inline PageDesc *page_find(target_ulong index)
330 PageDesc **lp, *p;
331 lp = page_l1_map(index);
332 if (!lp)
333 return NULL;
335 p = *lp;
336 if (!p)
337 return 0;
338 return p + (index & (L2_SIZE - 1));
341 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
343 void **lp, **p;
344 PhysPageDesc *pd;
346 p = (void **)l1_phys_map;
347 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
349 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
350 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
351 #endif
352 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
353 p = *lp;
354 if (!p) {
355 /* allocate if not found */
356 if (!alloc)
357 return NULL;
358 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
359 memset(p, 0, sizeof(void *) * L1_SIZE);
360 *lp = p;
362 #endif
363 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
364 pd = *lp;
365 if (!pd) {
366 int i;
367 /* allocate if not found */
368 if (!alloc)
369 return NULL;
370 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
371 *lp = pd;
372 for (i = 0; i < L2_SIZE; i++)
373 pd[i].phys_offset = IO_MEM_UNASSIGNED;
375 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
378 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
380 return phys_page_find_alloc(index, 0);
383 #if !defined(CONFIG_USER_ONLY)
384 static void tlb_protect_code(ram_addr_t ram_addr);
385 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
386 target_ulong vaddr);
387 #define mmap_lock() do { } while(0)
388 #define mmap_unlock() do { } while(0)
389 #endif
391 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
393 #if defined(CONFIG_USER_ONLY)
394 /* Currently it is not recommanded to allocate big chunks of data in
395 user mode. It will change when a dedicated libc will be used */
396 #define USE_STATIC_CODE_GEN_BUFFER
397 #endif
399 #ifdef USE_STATIC_CODE_GEN_BUFFER
400 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
401 #endif
403 static void code_gen_alloc(unsigned long tb_size)
405 #ifdef USE_STATIC_CODE_GEN_BUFFER
406 code_gen_buffer = static_code_gen_buffer;
407 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
408 map_exec(code_gen_buffer, code_gen_buffer_size);
409 #else
410 code_gen_buffer_size = tb_size;
411 if (code_gen_buffer_size == 0) {
412 #if defined(CONFIG_USER_ONLY)
413 /* in user mode, phys_ram_size is not meaningful */
414 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415 #else
416 /* XXX: needs ajustments */
417 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
418 #endif
420 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
421 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
422 /* The code gen buffer location may have constraints depending on
423 the host cpu and OS */
424 #if defined(__linux__)
426 int flags;
427 void *start = NULL;
429 flags = MAP_PRIVATE | MAP_ANONYMOUS;
430 #if defined(__x86_64__)
431 flags |= MAP_32BIT;
432 /* Cannot map more than that */
433 if (code_gen_buffer_size > (800 * 1024 * 1024))
434 code_gen_buffer_size = (800 * 1024 * 1024);
435 #elif defined(__sparc_v9__)
436 // Map the buffer below 2G, so we can use direct calls and branches
437 flags |= MAP_FIXED;
438 start = (void *) 0x60000000UL;
439 if (code_gen_buffer_size > (512 * 1024 * 1024))
440 code_gen_buffer_size = (512 * 1024 * 1024);
441 #elif defined(__arm__)
442 /* Map the buffer below 32M, so we can use direct calls and branches */
443 flags |= MAP_FIXED;
444 start = (void *) 0x01000000UL;
445 if (code_gen_buffer_size > 16 * 1024 * 1024)
446 code_gen_buffer_size = 16 * 1024 * 1024;
447 #endif
448 code_gen_buffer = mmap(start, code_gen_buffer_size,
449 PROT_WRITE | PROT_READ | PROT_EXEC,
450 flags, -1, 0);
451 if (code_gen_buffer == MAP_FAILED) {
452 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
453 exit(1);
456 #elif defined(__FreeBSD__)
458 int flags;
459 void *addr = NULL;
460 flags = MAP_PRIVATE | MAP_ANONYMOUS;
461 #if defined(__x86_64__)
462 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
463 * 0x40000000 is free */
464 flags |= MAP_FIXED;
465 addr = (void *)0x40000000;
466 /* Cannot map more than that */
467 if (code_gen_buffer_size > (800 * 1024 * 1024))
468 code_gen_buffer_size = (800 * 1024 * 1024);
469 #endif
470 code_gen_buffer = mmap(addr, code_gen_buffer_size,
471 PROT_WRITE | PROT_READ | PROT_EXEC,
472 flags, -1, 0);
473 if (code_gen_buffer == MAP_FAILED) {
474 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
475 exit(1);
478 #else
479 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
480 if (!code_gen_buffer) {
481 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482 exit(1);
484 map_exec(code_gen_buffer, code_gen_buffer_size);
485 #endif
486 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
487 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
488 code_gen_buffer_max_size = code_gen_buffer_size -
489 code_gen_max_block_size();
490 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
491 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
494 /* Must be called before using the QEMU cpus. 'tb_size' is the size
495 (in bytes) allocated to the translation buffer. Zero means default
496 size. */
497 void cpu_exec_init_all(unsigned long tb_size)
499 cpu_gen_init();
500 code_gen_alloc(tb_size);
501 code_gen_ptr = code_gen_buffer;
502 page_init();
503 #if !defined(CONFIG_USER_ONLY)
504 io_mem_init();
505 #endif
508 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
510 #define CPU_COMMON_SAVE_VERSION 1
512 static void cpu_common_save(QEMUFile *f, void *opaque)
514 CPUState *env = opaque;
516 qemu_put_be32s(f, &env->halted);
517 qemu_put_be32s(f, &env->interrupt_request);
520 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
522 CPUState *env = opaque;
524 if (version_id != CPU_COMMON_SAVE_VERSION)
525 return -EINVAL;
527 qemu_get_be32s(f, &env->halted);
528 qemu_get_be32s(f, &env->interrupt_request);
529 tlb_flush(env, 1);
531 return 0;
533 #endif
535 void cpu_exec_init(CPUState *env)
537 CPUState **penv;
538 int cpu_index;
540 env->next_cpu = NULL;
541 penv = &first_cpu;
542 cpu_index = 0;
543 while (*penv != NULL) {
544 penv = (CPUState **)&(*penv)->next_cpu;
545 cpu_index++;
547 env->cpu_index = cpu_index;
548 TAILQ_INIT(&env->breakpoints);
549 TAILQ_INIT(&env->watchpoints);
550 *penv = env;
551 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
552 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
553 cpu_common_save, cpu_common_load, env);
554 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
555 cpu_save, cpu_load, env);
556 #endif
559 static inline void invalidate_page_bitmap(PageDesc *p)
561 if (p->code_bitmap) {
562 qemu_free(p->code_bitmap);
563 p->code_bitmap = NULL;
565 p->code_write_count = 0;
568 /* set to NULL all the 'first_tb' fields in all PageDescs */
569 static void page_flush_tb(void)
571 int i, j;
572 PageDesc *p;
574 for(i = 0; i < L1_SIZE; i++) {
575 p = l1_map[i];
576 if (p) {
577 for(j = 0; j < L2_SIZE; j++) {
578 p->first_tb = NULL;
579 invalidate_page_bitmap(p);
580 p++;
586 /* flush all the translation blocks */
587 /* XXX: tb_flush is currently not thread safe */
588 void tb_flush(CPUState *env1)
590 CPUState *env;
591 #if defined(DEBUG_FLUSH)
592 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
593 (unsigned long)(code_gen_ptr - code_gen_buffer),
594 nb_tbs, nb_tbs > 0 ?
595 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
596 #endif
597 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
598 cpu_abort(env1, "Internal error: code buffer overflow\n");
600 nb_tbs = 0;
602 for(env = first_cpu; env != NULL; env = env->next_cpu) {
603 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
606 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
607 page_flush_tb();
609 code_gen_ptr = code_gen_buffer;
610 /* XXX: flush processor icache at this point if cache flush is
611 expensive */
612 tb_flush_count++;
615 #ifdef DEBUG_TB_CHECK
617 static void tb_invalidate_check(target_ulong address)
619 TranslationBlock *tb;
620 int i;
621 address &= TARGET_PAGE_MASK;
622 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
623 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
624 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
625 address >= tb->pc + tb->size)) {
626 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
627 address, (long)tb->pc, tb->size);
633 /* verify that all the pages have correct rights for code */
634 static void tb_page_check(void)
636 TranslationBlock *tb;
637 int i, flags1, flags2;
639 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
640 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
641 flags1 = page_get_flags(tb->pc);
642 flags2 = page_get_flags(tb->pc + tb->size - 1);
643 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
644 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
645 (long)tb->pc, tb->size, flags1, flags2);
651 static void tb_jmp_check(TranslationBlock *tb)
653 TranslationBlock *tb1;
654 unsigned int n1;
656 /* suppress any remaining jumps to this TB */
657 tb1 = tb->jmp_first;
658 for(;;) {
659 n1 = (long)tb1 & 3;
660 tb1 = (TranslationBlock *)((long)tb1 & ~3);
661 if (n1 == 2)
662 break;
663 tb1 = tb1->jmp_next[n1];
665 /* check end of list */
666 if (tb1 != tb) {
667 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
671 #endif
673 /* invalidate one TB */
674 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
675 int next_offset)
677 TranslationBlock *tb1;
678 for(;;) {
679 tb1 = *ptb;
680 if (tb1 == tb) {
681 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
682 break;
684 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
688 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
690 TranslationBlock *tb1;
691 unsigned int n1;
693 for(;;) {
694 tb1 = *ptb;
695 n1 = (long)tb1 & 3;
696 tb1 = (TranslationBlock *)((long)tb1 & ~3);
697 if (tb1 == tb) {
698 *ptb = tb1->page_next[n1];
699 break;
701 ptb = &tb1->page_next[n1];
705 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
707 TranslationBlock *tb1, **ptb;
708 unsigned int n1;
710 ptb = &tb->jmp_next[n];
711 tb1 = *ptb;
712 if (tb1) {
713 /* find tb(n) in circular list */
714 for(;;) {
715 tb1 = *ptb;
716 n1 = (long)tb1 & 3;
717 tb1 = (TranslationBlock *)((long)tb1 & ~3);
718 if (n1 == n && tb1 == tb)
719 break;
720 if (n1 == 2) {
721 ptb = &tb1->jmp_first;
722 } else {
723 ptb = &tb1->jmp_next[n1];
726 /* now we can suppress tb(n) from the list */
727 *ptb = tb->jmp_next[n];
729 tb->jmp_next[n] = NULL;
733 /* reset the jump entry 'n' of a TB so that it is not chained to
734 another TB */
735 static inline void tb_reset_jump(TranslationBlock *tb, int n)
737 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
740 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
742 CPUState *env;
743 PageDesc *p;
744 unsigned int h, n1;
745 target_phys_addr_t phys_pc;
746 TranslationBlock *tb1, *tb2;
748 /* remove the TB from the hash list */
749 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
750 h = tb_phys_hash_func(phys_pc);
751 tb_remove(&tb_phys_hash[h], tb,
752 offsetof(TranslationBlock, phys_hash_next));
754 /* remove the TB from the page list */
755 if (tb->page_addr[0] != page_addr) {
756 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
757 tb_page_remove(&p->first_tb, tb);
758 invalidate_page_bitmap(p);
760 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
761 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
762 tb_page_remove(&p->first_tb, tb);
763 invalidate_page_bitmap(p);
766 tb_invalidated_flag = 1;
768 /* remove the TB from the hash list */
769 h = tb_jmp_cache_hash_func(tb->pc);
770 for(env = first_cpu; env != NULL; env = env->next_cpu) {
771 if (env->tb_jmp_cache[h] == tb)
772 env->tb_jmp_cache[h] = NULL;
775 /* suppress this TB from the two jump lists */
776 tb_jmp_remove(tb, 0);
777 tb_jmp_remove(tb, 1);
779 /* suppress any remaining jumps to this TB */
780 tb1 = tb->jmp_first;
781 for(;;) {
782 n1 = (long)tb1 & 3;
783 if (n1 == 2)
784 break;
785 tb1 = (TranslationBlock *)((long)tb1 & ~3);
786 tb2 = tb1->jmp_next[n1];
787 tb_reset_jump(tb1, n1);
788 tb1->jmp_next[n1] = NULL;
789 tb1 = tb2;
791 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
793 tb_phys_invalidate_count++;
796 static inline void set_bits(uint8_t *tab, int start, int len)
798 int end, mask, end1;
800 end = start + len;
801 tab += start >> 3;
802 mask = 0xff << (start & 7);
803 if ((start & ~7) == (end & ~7)) {
804 if (start < end) {
805 mask &= ~(0xff << (end & 7));
806 *tab |= mask;
808 } else {
809 *tab++ |= mask;
810 start = (start + 8) & ~7;
811 end1 = end & ~7;
812 while (start < end1) {
813 *tab++ = 0xff;
814 start += 8;
816 if (start < end) {
817 mask = ~(0xff << (end & 7));
818 *tab |= mask;
823 static void build_page_bitmap(PageDesc *p)
825 int n, tb_start, tb_end;
826 TranslationBlock *tb;
828 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
829 if (!p->code_bitmap)
830 return;
832 tb = p->first_tb;
833 while (tb != NULL) {
834 n = (long)tb & 3;
835 tb = (TranslationBlock *)((long)tb & ~3);
836 /* NOTE: this is subtle as a TB may span two physical pages */
837 if (n == 0) {
838 /* NOTE: tb_end may be after the end of the page, but
839 it is not a problem */
840 tb_start = tb->pc & ~TARGET_PAGE_MASK;
841 tb_end = tb_start + tb->size;
842 if (tb_end > TARGET_PAGE_SIZE)
843 tb_end = TARGET_PAGE_SIZE;
844 } else {
845 tb_start = 0;
846 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
848 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
849 tb = tb->page_next[n];
853 TranslationBlock *tb_gen_code(CPUState *env,
854 target_ulong pc, target_ulong cs_base,
855 int flags, int cflags)
857 TranslationBlock *tb;
858 uint8_t *tc_ptr;
859 target_ulong phys_pc, phys_page2, virt_page2;
860 int code_gen_size;
862 phys_pc = get_phys_addr_code(env, pc);
863 tb = tb_alloc(pc);
864 if (!tb) {
865 /* flush must be done */
866 tb_flush(env);
867 /* cannot fail at this point */
868 tb = tb_alloc(pc);
869 /* Don't forget to invalidate previous TB info. */
870 tb_invalidated_flag = 1;
872 tc_ptr = code_gen_ptr;
873 tb->tc_ptr = tc_ptr;
874 tb->cs_base = cs_base;
875 tb->flags = flags;
876 tb->cflags = cflags;
877 cpu_gen_code(env, tb, &code_gen_size);
878 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
880 /* check next page if needed */
881 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
882 phys_page2 = -1;
883 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
884 phys_page2 = get_phys_addr_code(env, virt_page2);
886 tb_link_phys(tb, phys_pc, phys_page2);
887 return tb;
890 /* invalidate all TBs which intersect with the target physical page
891 starting in range [start;end[. NOTE: start and end must refer to
892 the same physical page. 'is_cpu_write_access' should be true if called
893 from a real cpu write access: the virtual CPU will exit the current
894 TB if code is modified inside this TB. */
895 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
896 int is_cpu_write_access)
898 TranslationBlock *tb, *tb_next, *saved_tb;
899 CPUState *env = cpu_single_env;
900 target_ulong tb_start, tb_end;
901 PageDesc *p;
902 int n;
903 #ifdef TARGET_HAS_PRECISE_SMC
904 int current_tb_not_found = is_cpu_write_access;
905 TranslationBlock *current_tb = NULL;
906 int current_tb_modified = 0;
907 target_ulong current_pc = 0;
908 target_ulong current_cs_base = 0;
909 int current_flags = 0;
910 #endif /* TARGET_HAS_PRECISE_SMC */
912 p = page_find(start >> TARGET_PAGE_BITS);
913 if (!p)
914 return;
915 if (!p->code_bitmap &&
916 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
917 is_cpu_write_access) {
918 /* build code bitmap */
919 build_page_bitmap(p);
922 /* we remove all the TBs in the range [start, end[ */
923 /* XXX: see if in some cases it could be faster to invalidate all the code */
924 tb = p->first_tb;
925 while (tb != NULL) {
926 n = (long)tb & 3;
927 tb = (TranslationBlock *)((long)tb & ~3);
928 tb_next = tb->page_next[n];
929 /* NOTE: this is subtle as a TB may span two physical pages */
930 if (n == 0) {
931 /* NOTE: tb_end may be after the end of the page, but
932 it is not a problem */
933 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934 tb_end = tb_start + tb->size;
935 } else {
936 tb_start = tb->page_addr[1];
937 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
939 if (!(tb_end <= start || tb_start >= end)) {
940 #ifdef TARGET_HAS_PRECISE_SMC
941 if (current_tb_not_found) {
942 current_tb_not_found = 0;
943 current_tb = NULL;
944 if (env->mem_io_pc) {
945 /* now we have a real cpu fault */
946 current_tb = tb_find_pc(env->mem_io_pc);
949 if (current_tb == tb &&
950 (current_tb->cflags & CF_COUNT_MASK) != 1) {
951 /* If we are modifying the current TB, we must stop
952 its execution. We could be more precise by checking
953 that the modification is after the current PC, but it
954 would require a specialized function to partially
955 restore the CPU state */
957 current_tb_modified = 1;
958 cpu_restore_state(current_tb, env,
959 env->mem_io_pc, NULL);
960 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
961 &current_flags);
963 #endif /* TARGET_HAS_PRECISE_SMC */
964 /* we need to do that to handle the case where a signal
965 occurs while doing tb_phys_invalidate() */
966 saved_tb = NULL;
967 if (env) {
968 saved_tb = env->current_tb;
969 env->current_tb = NULL;
971 tb_phys_invalidate(tb, -1);
972 if (env) {
973 env->current_tb = saved_tb;
974 if (env->interrupt_request && env->current_tb)
975 cpu_interrupt(env, env->interrupt_request);
978 tb = tb_next;
980 #if !defined(CONFIG_USER_ONLY)
981 /* if no code remaining, no need to continue to use slow writes */
982 if (!p->first_tb) {
983 invalidate_page_bitmap(p);
984 if (is_cpu_write_access) {
985 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
988 #endif
989 #ifdef TARGET_HAS_PRECISE_SMC
990 if (current_tb_modified) {
991 /* we generate a block containing just the instruction
992 modifying the memory. It will ensure that it cannot modify
993 itself */
994 env->current_tb = NULL;
995 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
996 cpu_resume_from_signal(env, NULL);
998 #endif
1001 /* len must be <= 8 and start must be a multiple of len */
1002 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1004 PageDesc *p;
1005 int offset, b;
1006 #if 0
1007 if (1) {
1008 if (loglevel) {
1009 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1010 cpu_single_env->mem_io_vaddr, len,
1011 cpu_single_env->eip,
1012 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1015 #endif
1016 p = page_find(start >> TARGET_PAGE_BITS);
1017 if (!p)
1018 return;
1019 if (p->code_bitmap) {
1020 offset = start & ~TARGET_PAGE_MASK;
1021 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1022 if (b & ((1 << len) - 1))
1023 goto do_invalidate;
1024 } else {
1025 do_invalidate:
1026 tb_invalidate_phys_page_range(start, start + len, 1);
1030 #if !defined(CONFIG_SOFTMMU)
1031 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1032 unsigned long pc, void *puc)
1034 TranslationBlock *tb;
1035 PageDesc *p;
1036 int n;
1037 #ifdef TARGET_HAS_PRECISE_SMC
1038 TranslationBlock *current_tb = NULL;
1039 CPUState *env = cpu_single_env;
1040 int current_tb_modified = 0;
1041 target_ulong current_pc = 0;
1042 target_ulong current_cs_base = 0;
1043 int current_flags = 0;
1044 #endif
1046 addr &= TARGET_PAGE_MASK;
1047 p = page_find(addr >> TARGET_PAGE_BITS);
1048 if (!p)
1049 return;
1050 tb = p->first_tb;
1051 #ifdef TARGET_HAS_PRECISE_SMC
1052 if (tb && pc != 0) {
1053 current_tb = tb_find_pc(pc);
1055 #endif
1056 while (tb != NULL) {
1057 n = (long)tb & 3;
1058 tb = (TranslationBlock *)((long)tb & ~3);
1059 #ifdef TARGET_HAS_PRECISE_SMC
1060 if (current_tb == tb &&
1061 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1062 /* If we are modifying the current TB, we must stop
1063 its execution. We could be more precise by checking
1064 that the modification is after the current PC, but it
1065 would require a specialized function to partially
1066 restore the CPU state */
1068 current_tb_modified = 1;
1069 cpu_restore_state(current_tb, env, pc, puc);
1070 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1071 &current_flags);
1073 #endif /* TARGET_HAS_PRECISE_SMC */
1074 tb_phys_invalidate(tb, addr);
1075 tb = tb->page_next[n];
1077 p->first_tb = NULL;
1078 #ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_modified) {
1080 /* we generate a block containing just the instruction
1081 modifying the memory. It will ensure that it cannot modify
1082 itself */
1083 env->current_tb = NULL;
1084 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1085 cpu_resume_from_signal(env, puc);
1087 #endif
1089 #endif
1091 /* add the tb in the target page and protect it if necessary */
1092 static inline void tb_alloc_page(TranslationBlock *tb,
1093 unsigned int n, target_ulong page_addr)
1095 PageDesc *p;
1096 TranslationBlock *last_first_tb;
1098 tb->page_addr[n] = page_addr;
1099 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1100 tb->page_next[n] = p->first_tb;
1101 last_first_tb = p->first_tb;
1102 p->first_tb = (TranslationBlock *)((long)tb | n);
1103 invalidate_page_bitmap(p);
1105 #if defined(TARGET_HAS_SMC) || 1
1107 #if defined(CONFIG_USER_ONLY)
1108 if (p->flags & PAGE_WRITE) {
1109 target_ulong addr;
1110 PageDesc *p2;
1111 int prot;
1113 /* force the host page as non writable (writes will have a
1114 page fault + mprotect overhead) */
1115 page_addr &= qemu_host_page_mask;
1116 prot = 0;
1117 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1118 addr += TARGET_PAGE_SIZE) {
1120 p2 = page_find (addr >> TARGET_PAGE_BITS);
1121 if (!p2)
1122 continue;
1123 prot |= p2->flags;
1124 p2->flags &= ~PAGE_WRITE;
1125 page_get_flags(addr);
1127 mprotect(g2h(page_addr), qemu_host_page_size,
1128 (prot & PAGE_BITS) & ~PAGE_WRITE);
1129 #ifdef DEBUG_TB_INVALIDATE
1130 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1131 page_addr);
1132 #endif
1134 #else
1135 /* if some code is already present, then the pages are already
1136 protected. So we handle the case where only the first TB is
1137 allocated in a physical page */
1138 if (!last_first_tb) {
1139 tlb_protect_code(page_addr);
1141 #endif
1143 #endif /* TARGET_HAS_SMC */
1146 /* Allocate a new translation block. Flush the translation buffer if
1147 too many translation blocks or too much generated code. */
1148 TranslationBlock *tb_alloc(target_ulong pc)
1150 TranslationBlock *tb;
1152 if (nb_tbs >= code_gen_max_blocks ||
1153 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1154 return NULL;
1155 tb = &tbs[nb_tbs++];
1156 tb->pc = pc;
1157 tb->cflags = 0;
1158 return tb;
1161 void tb_free(TranslationBlock *tb)
1163 /* In practice this is mostly used for single use temporary TB
1164 Ignore the hard cases and just back up if this TB happens to
1165 be the last one generated. */
1166 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1167 code_gen_ptr = tb->tc_ptr;
1168 nb_tbs--;
1172 /* add a new TB and link it to the physical page tables. phys_page2 is
1173 (-1) to indicate that only one page contains the TB. */
1174 void tb_link_phys(TranslationBlock *tb,
1175 target_ulong phys_pc, target_ulong phys_page2)
1177 unsigned int h;
1178 TranslationBlock **ptb;
1180 /* Grab the mmap lock to stop another thread invalidating this TB
1181 before we are done. */
1182 mmap_lock();
1183 /* add in the physical hash table */
1184 h = tb_phys_hash_func(phys_pc);
1185 ptb = &tb_phys_hash[h];
1186 tb->phys_hash_next = *ptb;
1187 *ptb = tb;
1189 /* add in the page list */
1190 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1191 if (phys_page2 != -1)
1192 tb_alloc_page(tb, 1, phys_page2);
1193 else
1194 tb->page_addr[1] = -1;
1196 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1197 tb->jmp_next[0] = NULL;
1198 tb->jmp_next[1] = NULL;
1200 /* init original jump addresses */
1201 if (tb->tb_next_offset[0] != 0xffff)
1202 tb_reset_jump(tb, 0);
1203 if (tb->tb_next_offset[1] != 0xffff)
1204 tb_reset_jump(tb, 1);
1206 #ifdef DEBUG_TB_CHECK
1207 tb_page_check();
1208 #endif
1209 mmap_unlock();
1212 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1213 tb[1].tc_ptr. Return NULL if not found */
1214 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1216 int m_min, m_max, m;
1217 unsigned long v;
1218 TranslationBlock *tb;
1220 if (nb_tbs <= 0)
1221 return NULL;
1222 if (tc_ptr < (unsigned long)code_gen_buffer ||
1223 tc_ptr >= (unsigned long)code_gen_ptr)
1224 return NULL;
1225 /* binary search (cf Knuth) */
1226 m_min = 0;
1227 m_max = nb_tbs - 1;
1228 while (m_min <= m_max) {
1229 m = (m_min + m_max) >> 1;
1230 tb = &tbs[m];
1231 v = (unsigned long)tb->tc_ptr;
1232 if (v == tc_ptr)
1233 return tb;
1234 else if (tc_ptr < v) {
1235 m_max = m - 1;
1236 } else {
1237 m_min = m + 1;
1240 return &tbs[m_max];
1243 static void tb_reset_jump_recursive(TranslationBlock *tb);
1245 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1247 TranslationBlock *tb1, *tb_next, **ptb;
1248 unsigned int n1;
1250 tb1 = tb->jmp_next[n];
1251 if (tb1 != NULL) {
1252 /* find head of list */
1253 for(;;) {
1254 n1 = (long)tb1 & 3;
1255 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1256 if (n1 == 2)
1257 break;
1258 tb1 = tb1->jmp_next[n1];
1260 /* we are now sure now that tb jumps to tb1 */
1261 tb_next = tb1;
1263 /* remove tb from the jmp_first list */
1264 ptb = &tb_next->jmp_first;
1265 for(;;) {
1266 tb1 = *ptb;
1267 n1 = (long)tb1 & 3;
1268 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1269 if (n1 == n && tb1 == tb)
1270 break;
1271 ptb = &tb1->jmp_next[n1];
1273 *ptb = tb->jmp_next[n];
1274 tb->jmp_next[n] = NULL;
1276 /* suppress the jump to next tb in generated code */
1277 tb_reset_jump(tb, n);
1279 /* suppress jumps in the tb on which we could have jumped */
1280 tb_reset_jump_recursive(tb_next);
1284 static void tb_reset_jump_recursive(TranslationBlock *tb)
1286 tb_reset_jump_recursive2(tb, 0);
1287 tb_reset_jump_recursive2(tb, 1);
1290 #if defined(TARGET_HAS_ICE)
1291 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1293 target_phys_addr_t addr;
1294 target_ulong pd;
1295 ram_addr_t ram_addr;
1296 PhysPageDesc *p;
1298 addr = cpu_get_phys_page_debug(env, pc);
1299 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1300 if (!p) {
1301 pd = IO_MEM_UNASSIGNED;
1302 } else {
1303 pd = p->phys_offset;
1305 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1306 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1308 #endif
1310 /* Add a watchpoint. */
1311 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1312 int flags, CPUWatchpoint **watchpoint)
1314 target_ulong len_mask = ~(len - 1);
1315 CPUWatchpoint *wp;
1317 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1318 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1319 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1320 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1321 return -EINVAL;
1323 wp = qemu_malloc(sizeof(*wp));
1324 if (!wp)
1325 return -ENOMEM;
1327 wp->vaddr = addr;
1328 wp->len_mask = len_mask;
1329 wp->flags = flags;
1331 /* keep all GDB-injected watchpoints in front */
1332 if (flags & BP_GDB)
1333 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1334 else
1335 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1337 tlb_flush_page(env, addr);
1339 if (watchpoint)
1340 *watchpoint = wp;
1341 return 0;
1344 /* Remove a specific watchpoint. */
1345 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1346 int flags)
1348 target_ulong len_mask = ~(len - 1);
1349 CPUWatchpoint *wp;
1351 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1352 if (addr == wp->vaddr && len_mask == wp->len_mask
1353 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1354 cpu_watchpoint_remove_by_ref(env, wp);
1355 return 0;
1358 return -ENOENT;
1361 /* Remove a specific watchpoint by reference. */
1362 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1364 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1366 tlb_flush_page(env, watchpoint->vaddr);
1368 qemu_free(watchpoint);
1371 /* Remove all matching watchpoints. */
1372 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1374 CPUWatchpoint *wp, *next;
1376 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1377 if (wp->flags & mask)
1378 cpu_watchpoint_remove_by_ref(env, wp);
1382 /* Add a breakpoint. */
1383 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1384 CPUBreakpoint **breakpoint)
1386 #if defined(TARGET_HAS_ICE)
1387 CPUBreakpoint *bp;
1389 bp = qemu_malloc(sizeof(*bp));
1390 if (!bp)
1391 return -ENOMEM;
1393 bp->pc = pc;
1394 bp->flags = flags;
1396 /* keep all GDB-injected breakpoints in front */
1397 if (flags & BP_GDB)
1398 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1399 else
1400 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1402 breakpoint_invalidate(env, pc);
1404 if (breakpoint)
1405 *breakpoint = bp;
1406 return 0;
1407 #else
1408 return -ENOSYS;
1409 #endif
1412 /* Remove a specific breakpoint. */
1413 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1415 #if defined(TARGET_HAS_ICE)
1416 CPUBreakpoint *bp;
1418 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1419 if (bp->pc == pc && bp->flags == flags) {
1420 cpu_breakpoint_remove_by_ref(env, bp);
1421 return 0;
1424 return -ENOENT;
1425 #else
1426 return -ENOSYS;
1427 #endif
1430 /* Remove a specific breakpoint by reference. */
1431 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1433 #if defined(TARGET_HAS_ICE)
1434 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1436 breakpoint_invalidate(env, breakpoint->pc);
1438 qemu_free(breakpoint);
1439 #endif
1442 /* Remove all matching breakpoints. */
1443 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1445 #if defined(TARGET_HAS_ICE)
1446 CPUBreakpoint *bp, *next;
1448 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1449 if (bp->flags & mask)
1450 cpu_breakpoint_remove_by_ref(env, bp);
1452 #endif
1455 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1456 CPU loop after each instruction */
1457 void cpu_single_step(CPUState *env, int enabled)
1459 #if defined(TARGET_HAS_ICE)
1460 if (env->singlestep_enabled != enabled) {
1461 env->singlestep_enabled = enabled;
1462 /* must flush all the translated code to avoid inconsistancies */
1463 /* XXX: only flush what is necessary */
1464 tb_flush(env);
1466 #endif
1469 /* enable or disable low levels log */
1470 void cpu_set_log(int log_flags)
1472 loglevel = log_flags;
1473 if (loglevel && !logfile) {
1474 logfile = fopen(logfilename, log_append ? "a" : "w");
1475 if (!logfile) {
1476 perror(logfilename);
1477 _exit(1);
1479 #if !defined(CONFIG_SOFTMMU)
1480 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1482 static char logfile_buf[4096];
1483 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1485 #else
1486 setvbuf(logfile, NULL, _IOLBF, 0);
1487 #endif
1488 log_append = 1;
1490 if (!loglevel && logfile) {
1491 fclose(logfile);
1492 logfile = NULL;
1496 void cpu_set_log_filename(const char *filename)
1498 logfilename = strdup(filename);
1499 if (logfile) {
1500 fclose(logfile);
1501 logfile = NULL;
1503 cpu_set_log(loglevel);
1506 /* mask must never be zero, except for A20 change call */
1507 void cpu_interrupt(CPUState *env, int mask)
1509 #if !defined(USE_NPTL)
1510 TranslationBlock *tb;
1511 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1512 #endif
1513 int old_mask;
1515 old_mask = env->interrupt_request;
1516 /* FIXME: This is probably not threadsafe. A different thread could
1517 be in the middle of a read-modify-write operation. */
1518 env->interrupt_request |= mask;
1519 #if defined(USE_NPTL)
1520 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1521 problem and hope the cpu will stop of its own accord. For userspace
1522 emulation this often isn't actually as bad as it sounds. Often
1523 signals are used primarily to interrupt blocking syscalls. */
1524 #else
1525 if (use_icount) {
1526 env->icount_decr.u16.high = 0xffff;
1527 #ifndef CONFIG_USER_ONLY
1528 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1529 an async event happened and we need to process it. */
1530 if (!can_do_io(env)
1531 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1532 cpu_abort(env, "Raised interrupt while not in I/O function");
1534 #endif
1535 } else {
1536 tb = env->current_tb;
1537 /* if the cpu is currently executing code, we must unlink it and
1538 all the potentially executing TB */
1539 if (tb && !testandset(&interrupt_lock)) {
1540 env->current_tb = NULL;
1541 tb_reset_jump_recursive(tb);
1542 resetlock(&interrupt_lock);
1545 #endif
1548 void cpu_reset_interrupt(CPUState *env, int mask)
1550 env->interrupt_request &= ~mask;
1553 const CPULogItem cpu_log_items[] = {
1554 { CPU_LOG_TB_OUT_ASM, "out_asm",
1555 "show generated host assembly code for each compiled TB" },
1556 { CPU_LOG_TB_IN_ASM, "in_asm",
1557 "show target assembly code for each compiled TB" },
1558 { CPU_LOG_TB_OP, "op",
1559 "show micro ops for each compiled TB" },
1560 { CPU_LOG_TB_OP_OPT, "op_opt",
1561 "show micro ops "
1562 #ifdef TARGET_I386
1563 "before eflags optimization and "
1564 #endif
1565 "after liveness analysis" },
1566 { CPU_LOG_INT, "int",
1567 "show interrupts/exceptions in short format" },
1568 { CPU_LOG_EXEC, "exec",
1569 "show trace before each executed TB (lots of logs)" },
1570 { CPU_LOG_TB_CPU, "cpu",
1571 "show CPU state before block translation" },
1572 #ifdef TARGET_I386
1573 { CPU_LOG_PCALL, "pcall",
1574 "show protected mode far calls/returns/exceptions" },
1575 #endif
1576 #ifdef DEBUG_IOPORT
1577 { CPU_LOG_IOPORT, "ioport",
1578 "show all i/o ports accesses" },
1579 #endif
1580 { 0, NULL, NULL },
1583 static int cmp1(const char *s1, int n, const char *s2)
1585 if (strlen(s2) != n)
1586 return 0;
1587 return memcmp(s1, s2, n) == 0;
1590 /* takes a comma separated list of log masks. Return 0 if error. */
1591 int cpu_str_to_log_mask(const char *str)
1593 const CPULogItem *item;
1594 int mask;
1595 const char *p, *p1;
1597 p = str;
1598 mask = 0;
1599 for(;;) {
1600 p1 = strchr(p, ',');
1601 if (!p1)
1602 p1 = p + strlen(p);
1603 if(cmp1(p,p1-p,"all")) {
1604 for(item = cpu_log_items; item->mask != 0; item++) {
1605 mask |= item->mask;
1607 } else {
1608 for(item = cpu_log_items; item->mask != 0; item++) {
1609 if (cmp1(p, p1 - p, item->name))
1610 goto found;
1612 return 0;
1614 found:
1615 mask |= item->mask;
1616 if (*p1 != ',')
1617 break;
1618 p = p1 + 1;
1620 return mask;
1623 void cpu_abort(CPUState *env, const char *fmt, ...)
1625 va_list ap;
1626 va_list ap2;
1628 va_start(ap, fmt);
1629 va_copy(ap2, ap);
1630 fprintf(stderr, "qemu: fatal: ");
1631 vfprintf(stderr, fmt, ap);
1632 fprintf(stderr, "\n");
1633 #ifdef TARGET_I386
1634 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1635 #else
1636 cpu_dump_state(env, stderr, fprintf, 0);
1637 #endif
1638 if (logfile) {
1639 fprintf(logfile, "qemu: fatal: ");
1640 vfprintf(logfile, fmt, ap2);
1641 fprintf(logfile, "\n");
1642 #ifdef TARGET_I386
1643 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1644 #else
1645 cpu_dump_state(env, logfile, fprintf, 0);
1646 #endif
1647 fflush(logfile);
1648 fclose(logfile);
1650 va_end(ap2);
1651 va_end(ap);
1652 abort();
1655 CPUState *cpu_copy(CPUState *env)
1657 CPUState *new_env = cpu_init(env->cpu_model_str);
1658 /* preserve chaining and index */
1659 CPUState *next_cpu = new_env->next_cpu;
1660 int cpu_index = new_env->cpu_index;
1661 memcpy(new_env, env, sizeof(CPUState));
1662 new_env->next_cpu = next_cpu;
1663 new_env->cpu_index = cpu_index;
1664 return new_env;
1667 #if !defined(CONFIG_USER_ONLY)
1669 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1671 unsigned int i;
1673 /* Discard jump cache entries for any tb which might potentially
1674 overlap the flushed page. */
1675 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1676 memset (&env->tb_jmp_cache[i], 0,
1677 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1679 i = tb_jmp_cache_hash_page(addr);
1680 memset (&env->tb_jmp_cache[i], 0,
1681 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1684 /* NOTE: if flush_global is true, also flush global entries (not
1685 implemented yet) */
1686 void tlb_flush(CPUState *env, int flush_global)
1688 int i;
1690 #if defined(DEBUG_TLB)
1691 printf("tlb_flush:\n");
1692 #endif
1693 /* must reset current TB so that interrupts cannot modify the
1694 links while we are modifying them */
1695 env->current_tb = NULL;
1697 for(i = 0; i < CPU_TLB_SIZE; i++) {
1698 env->tlb_table[0][i].addr_read = -1;
1699 env->tlb_table[0][i].addr_write = -1;
1700 env->tlb_table[0][i].addr_code = -1;
1701 env->tlb_table[1][i].addr_read = -1;
1702 env->tlb_table[1][i].addr_write = -1;
1703 env->tlb_table[1][i].addr_code = -1;
1704 #if (NB_MMU_MODES >= 3)
1705 env->tlb_table[2][i].addr_read = -1;
1706 env->tlb_table[2][i].addr_write = -1;
1707 env->tlb_table[2][i].addr_code = -1;
1708 #if (NB_MMU_MODES == 4)
1709 env->tlb_table[3][i].addr_read = -1;
1710 env->tlb_table[3][i].addr_write = -1;
1711 env->tlb_table[3][i].addr_code = -1;
1712 #endif
1713 #endif
1716 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1718 #ifdef USE_KQEMU
1719 if (env->kqemu_enabled) {
1720 kqemu_flush(env, flush_global);
1722 #endif
1723 tlb_flush_count++;
1726 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1728 if (addr == (tlb_entry->addr_read &
1729 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1730 addr == (tlb_entry->addr_write &
1731 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1732 addr == (tlb_entry->addr_code &
1733 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1734 tlb_entry->addr_read = -1;
1735 tlb_entry->addr_write = -1;
1736 tlb_entry->addr_code = -1;
1740 void tlb_flush_page(CPUState *env, target_ulong addr)
1742 int i;
1744 #if defined(DEBUG_TLB)
1745 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1746 #endif
1747 /* must reset current TB so that interrupts cannot modify the
1748 links while we are modifying them */
1749 env->current_tb = NULL;
1751 addr &= TARGET_PAGE_MASK;
1752 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1753 tlb_flush_entry(&env->tlb_table[0][i], addr);
1754 tlb_flush_entry(&env->tlb_table[1][i], addr);
1755 #if (NB_MMU_MODES >= 3)
1756 tlb_flush_entry(&env->tlb_table[2][i], addr);
1757 #if (NB_MMU_MODES == 4)
1758 tlb_flush_entry(&env->tlb_table[3][i], addr);
1759 #endif
1760 #endif
1762 tlb_flush_jmp_cache(env, addr);
1764 #ifdef USE_KQEMU
1765 if (env->kqemu_enabled) {
1766 kqemu_flush_page(env, addr);
1768 #endif
1771 /* update the TLBs so that writes to code in the virtual page 'addr'
1772 can be detected */
1773 static void tlb_protect_code(ram_addr_t ram_addr)
1775 cpu_physical_memory_reset_dirty(ram_addr,
1776 ram_addr + TARGET_PAGE_SIZE,
1777 CODE_DIRTY_FLAG);
1780 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1781 tested for self modifying code */
1782 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1783 target_ulong vaddr)
1785 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1788 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1789 unsigned long start, unsigned long length)
1791 unsigned long addr;
1792 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1793 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1794 if ((addr - start) < length) {
1795 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1800 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1801 int dirty_flags)
1803 CPUState *env;
1804 unsigned long length, start1;
1805 int i, mask, len;
1806 uint8_t *p;
1808 start &= TARGET_PAGE_MASK;
1809 end = TARGET_PAGE_ALIGN(end);
1811 length = end - start;
1812 if (length == 0)
1813 return;
1814 len = length >> TARGET_PAGE_BITS;
1815 #ifdef USE_KQEMU
1816 /* XXX: should not depend on cpu context */
1817 env = first_cpu;
1818 if (env->kqemu_enabled) {
1819 ram_addr_t addr;
1820 addr = start;
1821 for(i = 0; i < len; i++) {
1822 kqemu_set_notdirty(env, addr);
1823 addr += TARGET_PAGE_SIZE;
1826 #endif
1827 mask = ~dirty_flags;
1828 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1829 for(i = 0; i < len; i++)
1830 p[i] &= mask;
1832 /* we modify the TLB cache so that the dirty bit will be set again
1833 when accessing the range */
1834 start1 = start + (unsigned long)phys_ram_base;
1835 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1836 for(i = 0; i < CPU_TLB_SIZE; i++)
1837 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1838 for(i = 0; i < CPU_TLB_SIZE; i++)
1839 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1840 #if (NB_MMU_MODES >= 3)
1841 for(i = 0; i < CPU_TLB_SIZE; i++)
1842 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1843 #if (NB_MMU_MODES == 4)
1844 for(i = 0; i < CPU_TLB_SIZE; i++)
1845 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1846 #endif
1847 #endif
1851 int cpu_physical_memory_set_dirty_tracking(int enable)
1853 in_migration = enable;
1854 return 0;
1857 int cpu_physical_memory_get_dirty_tracking(void)
1859 return in_migration;
1862 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1864 if (kvm_enabled())
1865 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1868 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1870 ram_addr_t ram_addr;
1872 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1873 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1874 tlb_entry->addend - (unsigned long)phys_ram_base;
1875 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1876 tlb_entry->addr_write |= TLB_NOTDIRTY;
1881 /* update the TLB according to the current state of the dirty bits */
1882 void cpu_tlb_update_dirty(CPUState *env)
1884 int i;
1885 for(i = 0; i < CPU_TLB_SIZE; i++)
1886 tlb_update_dirty(&env->tlb_table[0][i]);
1887 for(i = 0; i < CPU_TLB_SIZE; i++)
1888 tlb_update_dirty(&env->tlb_table[1][i]);
1889 #if (NB_MMU_MODES >= 3)
1890 for(i = 0; i < CPU_TLB_SIZE; i++)
1891 tlb_update_dirty(&env->tlb_table[2][i]);
1892 #if (NB_MMU_MODES == 4)
1893 for(i = 0; i < CPU_TLB_SIZE; i++)
1894 tlb_update_dirty(&env->tlb_table[3][i]);
1895 #endif
1896 #endif
1899 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1901 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1902 tlb_entry->addr_write = vaddr;
1905 /* update the TLB corresponding to virtual page vaddr
1906 so that it is no longer dirty */
1907 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1909 int i;
1911 vaddr &= TARGET_PAGE_MASK;
1912 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1913 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1914 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1915 #if (NB_MMU_MODES >= 3)
1916 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1917 #if (NB_MMU_MODES == 4)
1918 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1919 #endif
1920 #endif
1923 /* add a new TLB entry. At most one entry for a given virtual address
1924 is permitted. Return 0 if OK or 2 if the page could not be mapped
1925 (can only happen in non SOFTMMU mode for I/O pages or pages
1926 conflicting with the host address space). */
1927 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1928 target_phys_addr_t paddr, int prot,
1929 int mmu_idx, int is_softmmu)
1931 PhysPageDesc *p;
1932 unsigned long pd;
1933 unsigned int index;
1934 target_ulong address;
1935 target_ulong code_address;
1936 target_phys_addr_t addend;
1937 int ret;
1938 CPUTLBEntry *te;
1939 CPUWatchpoint *wp;
1940 target_phys_addr_t iotlb;
1942 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1943 if (!p) {
1944 pd = IO_MEM_UNASSIGNED;
1945 } else {
1946 pd = p->phys_offset;
1948 #if defined(DEBUG_TLB)
1949 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1950 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1951 #endif
1953 ret = 0;
1954 address = vaddr;
1955 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1956 /* IO memory case (romd handled later) */
1957 address |= TLB_MMIO;
1959 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1960 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1961 /* Normal RAM. */
1962 iotlb = pd & TARGET_PAGE_MASK;
1963 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1964 iotlb |= IO_MEM_NOTDIRTY;
1965 else
1966 iotlb |= IO_MEM_ROM;
1967 } else {
1968 /* IO handlers are currently passed a phsical address.
1969 It would be nice to pass an offset from the base address
1970 of that region. This would avoid having to special case RAM,
1971 and avoid full address decoding in every device.
1972 We can't use the high bits of pd for this because
1973 IO_MEM_ROMD uses these as a ram address. */
1974 iotlb = (pd & ~TARGET_PAGE_MASK);
1975 if (p) {
1976 /* FIXME: What if this isn't page aligned? */
1977 iotlb += p->region_offset;
1978 } else {
1979 iotlb += paddr;
1983 code_address = address;
1984 /* Make accesses to pages with watchpoints go via the
1985 watchpoint trap routines. */
1986 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1987 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1988 iotlb = io_mem_watch + paddr;
1989 /* TODO: The memory case can be optimized by not trapping
1990 reads of pages with a write breakpoint. */
1991 address |= TLB_MMIO;
1995 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1996 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1997 te = &env->tlb_table[mmu_idx][index];
1998 te->addend = addend - vaddr;
1999 if (prot & PAGE_READ) {
2000 te->addr_read = address;
2001 } else {
2002 te->addr_read = -1;
2005 if (prot & PAGE_EXEC) {
2006 te->addr_code = code_address;
2007 } else {
2008 te->addr_code = -1;
2010 if (prot & PAGE_WRITE) {
2011 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2012 (pd & IO_MEM_ROMD)) {
2013 /* Write access calls the I/O callback. */
2014 te->addr_write = address | TLB_MMIO;
2015 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2016 !cpu_physical_memory_is_dirty(pd)) {
2017 te->addr_write = address | TLB_NOTDIRTY;
2018 } else {
2019 te->addr_write = address;
2021 } else {
2022 te->addr_write = -1;
2024 return ret;
2027 #else
2029 void tlb_flush(CPUState *env, int flush_global)
2033 void tlb_flush_page(CPUState *env, target_ulong addr)
2037 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2038 target_phys_addr_t paddr, int prot,
2039 int mmu_idx, int is_softmmu)
2041 return 0;
2044 /* dump memory mappings */
2045 void page_dump(FILE *f)
2047 unsigned long start, end;
2048 int i, j, prot, prot1;
2049 PageDesc *p;
2051 fprintf(f, "%-8s %-8s %-8s %s\n",
2052 "start", "end", "size", "prot");
2053 start = -1;
2054 end = -1;
2055 prot = 0;
2056 for(i = 0; i <= L1_SIZE; i++) {
2057 if (i < L1_SIZE)
2058 p = l1_map[i];
2059 else
2060 p = NULL;
2061 for(j = 0;j < L2_SIZE; j++) {
2062 if (!p)
2063 prot1 = 0;
2064 else
2065 prot1 = p[j].flags;
2066 if (prot1 != prot) {
2067 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2068 if (start != -1) {
2069 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2070 start, end, end - start,
2071 prot & PAGE_READ ? 'r' : '-',
2072 prot & PAGE_WRITE ? 'w' : '-',
2073 prot & PAGE_EXEC ? 'x' : '-');
2075 if (prot1 != 0)
2076 start = end;
2077 else
2078 start = -1;
2079 prot = prot1;
2081 if (!p)
2082 break;
2087 int page_get_flags(target_ulong address)
2089 PageDesc *p;
2091 p = page_find(address >> TARGET_PAGE_BITS);
2092 if (!p)
2093 return 0;
2094 return p->flags;
2097 /* modify the flags of a page and invalidate the code if
2098 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2099 depending on PAGE_WRITE */
2100 void page_set_flags(target_ulong start, target_ulong end, int flags)
2102 PageDesc *p;
2103 target_ulong addr;
2105 /* mmap_lock should already be held. */
2106 start = start & TARGET_PAGE_MASK;
2107 end = TARGET_PAGE_ALIGN(end);
2108 if (flags & PAGE_WRITE)
2109 flags |= PAGE_WRITE_ORG;
2110 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2111 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2112 /* We may be called for host regions that are outside guest
2113 address space. */
2114 if (!p)
2115 return;
2116 /* if the write protection is set, then we invalidate the code
2117 inside */
2118 if (!(p->flags & PAGE_WRITE) &&
2119 (flags & PAGE_WRITE) &&
2120 p->first_tb) {
2121 tb_invalidate_phys_page(addr, 0, NULL);
2123 p->flags = flags;
2127 int page_check_range(target_ulong start, target_ulong len, int flags)
2129 PageDesc *p;
2130 target_ulong end;
2131 target_ulong addr;
2133 if (start + len < start)
2134 /* we've wrapped around */
2135 return -1;
2137 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2138 start = start & TARGET_PAGE_MASK;
2140 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2141 p = page_find(addr >> TARGET_PAGE_BITS);
2142 if( !p )
2143 return -1;
2144 if( !(p->flags & PAGE_VALID) )
2145 return -1;
2147 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2148 return -1;
2149 if (flags & PAGE_WRITE) {
2150 if (!(p->flags & PAGE_WRITE_ORG))
2151 return -1;
2152 /* unprotect the page if it was put read-only because it
2153 contains translated code */
2154 if (!(p->flags & PAGE_WRITE)) {
2155 if (!page_unprotect(addr, 0, NULL))
2156 return -1;
2158 return 0;
2161 return 0;
2164 /* called from signal handler: invalidate the code and unprotect the
2165 page. Return TRUE if the fault was succesfully handled. */
2166 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2168 unsigned int page_index, prot, pindex;
2169 PageDesc *p, *p1;
2170 target_ulong host_start, host_end, addr;
2172 /* Technically this isn't safe inside a signal handler. However we
2173 know this only ever happens in a synchronous SEGV handler, so in
2174 practice it seems to be ok. */
2175 mmap_lock();
2177 host_start = address & qemu_host_page_mask;
2178 page_index = host_start >> TARGET_PAGE_BITS;
2179 p1 = page_find(page_index);
2180 if (!p1) {
2181 mmap_unlock();
2182 return 0;
2184 host_end = host_start + qemu_host_page_size;
2185 p = p1;
2186 prot = 0;
2187 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2188 prot |= p->flags;
2189 p++;
2191 /* if the page was really writable, then we change its
2192 protection back to writable */
2193 if (prot & PAGE_WRITE_ORG) {
2194 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2195 if (!(p1[pindex].flags & PAGE_WRITE)) {
2196 mprotect((void *)g2h(host_start), qemu_host_page_size,
2197 (prot & PAGE_BITS) | PAGE_WRITE);
2198 p1[pindex].flags |= PAGE_WRITE;
2199 /* and since the content will be modified, we must invalidate
2200 the corresponding translated code. */
2201 tb_invalidate_phys_page(address, pc, puc);
2202 #ifdef DEBUG_TB_CHECK
2203 tb_invalidate_check(address);
2204 #endif
2205 mmap_unlock();
2206 return 1;
2209 mmap_unlock();
2210 return 0;
2213 static inline void tlb_set_dirty(CPUState *env,
2214 unsigned long addr, target_ulong vaddr)
2217 #endif /* defined(CONFIG_USER_ONLY) */
2219 #if !defined(CONFIG_USER_ONLY)
2221 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2222 ram_addr_t memory, ram_addr_t region_offset);
2223 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2224 ram_addr_t orig_memory, ram_addr_t region_offset);
2225 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2226 need_subpage) \
2227 do { \
2228 if (addr > start_addr) \
2229 start_addr2 = 0; \
2230 else { \
2231 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2232 if (start_addr2 > 0) \
2233 need_subpage = 1; \
2236 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2237 end_addr2 = TARGET_PAGE_SIZE - 1; \
2238 else { \
2239 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2240 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2241 need_subpage = 1; \
2243 } while (0)
2245 /* register physical memory. 'size' must be a multiple of the target
2246 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2247 io memory page. The address used when calling the IO function is
2248 the offset from the start of the region, plus region_offset. Both
2249 start_region and regon_offset are rounded down to a page boundary
2250 before calculating this offset. This should not be a problem unless
2251 the low bits of start_addr and region_offset differ. */
2252 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2253 ram_addr_t size,
2254 ram_addr_t phys_offset,
2255 ram_addr_t region_offset)
2257 target_phys_addr_t addr, end_addr;
2258 PhysPageDesc *p;
2259 CPUState *env;
2260 ram_addr_t orig_size = size;
2261 void *subpage;
2263 #ifdef USE_KQEMU
2264 /* XXX: should not depend on cpu context */
2265 env = first_cpu;
2266 if (env->kqemu_enabled) {
2267 kqemu_set_phys_mem(start_addr, size, phys_offset);
2269 #endif
2270 if (kvm_enabled())
2271 kvm_set_phys_mem(start_addr, size, phys_offset);
2273 region_offset &= TARGET_PAGE_MASK;
2274 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2275 end_addr = start_addr + (target_phys_addr_t)size;
2276 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2277 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2278 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2279 ram_addr_t orig_memory = p->phys_offset;
2280 target_phys_addr_t start_addr2, end_addr2;
2281 int need_subpage = 0;
2283 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2284 need_subpage);
2285 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2286 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2287 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2288 &p->phys_offset, orig_memory,
2289 p->region_offset);
2290 } else {
2291 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2292 >> IO_MEM_SHIFT];
2294 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2295 region_offset);
2296 p->region_offset = 0;
2297 } else {
2298 p->phys_offset = phys_offset;
2299 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2300 (phys_offset & IO_MEM_ROMD))
2301 phys_offset += TARGET_PAGE_SIZE;
2303 } else {
2304 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2305 p->phys_offset = phys_offset;
2306 p->region_offset = region_offset;
2307 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2308 (phys_offset & IO_MEM_ROMD)) {
2309 phys_offset += TARGET_PAGE_SIZE;
2310 }else {
2311 target_phys_addr_t start_addr2, end_addr2;
2312 int need_subpage = 0;
2314 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2315 end_addr2, need_subpage);
2317 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2318 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2319 &p->phys_offset, IO_MEM_UNASSIGNED,
2321 subpage_register(subpage, start_addr2, end_addr2,
2322 phys_offset, region_offset);
2323 p->region_offset = 0;
2327 region_offset += TARGET_PAGE_SIZE;
2330 /* since each CPU stores ram addresses in its TLB cache, we must
2331 reset the modified entries */
2332 /* XXX: slow ! */
2333 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2334 tlb_flush(env, 1);
2338 /* XXX: temporary until new memory mapping API */
2339 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2341 PhysPageDesc *p;
2343 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2344 if (!p)
2345 return IO_MEM_UNASSIGNED;
2346 return p->phys_offset;
2349 /* XXX: better than nothing */
2350 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2352 ram_addr_t addr;
2353 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2354 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2355 (uint64_t)size, (uint64_t)phys_ram_size);
2356 abort();
2358 addr = phys_ram_alloc_offset;
2359 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2360 return addr;
2363 void qemu_ram_free(ram_addr_t addr)
2367 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2369 #ifdef DEBUG_UNASSIGNED
2370 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2371 #endif
2372 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2373 do_unassigned_access(addr, 0, 0, 0, 1);
2374 #endif
2375 return 0;
2378 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2380 #ifdef DEBUG_UNASSIGNED
2381 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2382 #endif
2383 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2384 do_unassigned_access(addr, 0, 0, 0, 2);
2385 #endif
2386 return 0;
2389 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2391 #ifdef DEBUG_UNASSIGNED
2392 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2393 #endif
2394 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2395 do_unassigned_access(addr, 0, 0, 0, 4);
2396 #endif
2397 return 0;
2400 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2402 #ifdef DEBUG_UNASSIGNED
2403 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2404 #endif
2405 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2406 do_unassigned_access(addr, 1, 0, 0, 1);
2407 #endif
2410 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2412 #ifdef DEBUG_UNASSIGNED
2413 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2414 #endif
2415 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2416 do_unassigned_access(addr, 1, 0, 0, 2);
2417 #endif
2420 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2422 #ifdef DEBUG_UNASSIGNED
2423 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2424 #endif
2425 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2426 do_unassigned_access(addr, 1, 0, 0, 4);
2427 #endif
2430 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2431 unassigned_mem_readb,
2432 unassigned_mem_readw,
2433 unassigned_mem_readl,
2436 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2437 unassigned_mem_writeb,
2438 unassigned_mem_writew,
2439 unassigned_mem_writel,
2442 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2443 uint32_t val)
2445 int dirty_flags;
2446 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2447 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2448 #if !defined(CONFIG_USER_ONLY)
2449 tb_invalidate_phys_page_fast(ram_addr, 1);
2450 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2451 #endif
2453 stb_p(phys_ram_base + ram_addr, val);
2454 #ifdef USE_KQEMU
2455 if (cpu_single_env->kqemu_enabled &&
2456 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2457 kqemu_modify_page(cpu_single_env, ram_addr);
2458 #endif
2459 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2460 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2461 /* we remove the notdirty callback only if the code has been
2462 flushed */
2463 if (dirty_flags == 0xff)
2464 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2467 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2468 uint32_t val)
2470 int dirty_flags;
2471 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2472 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2473 #if !defined(CONFIG_USER_ONLY)
2474 tb_invalidate_phys_page_fast(ram_addr, 2);
2475 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2476 #endif
2478 stw_p(phys_ram_base + ram_addr, val);
2479 #ifdef USE_KQEMU
2480 if (cpu_single_env->kqemu_enabled &&
2481 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2482 kqemu_modify_page(cpu_single_env, ram_addr);
2483 #endif
2484 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2485 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2486 /* we remove the notdirty callback only if the code has been
2487 flushed */
2488 if (dirty_flags == 0xff)
2489 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2492 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2493 uint32_t val)
2495 int dirty_flags;
2496 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2497 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2498 #if !defined(CONFIG_USER_ONLY)
2499 tb_invalidate_phys_page_fast(ram_addr, 4);
2500 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2501 #endif
2503 stl_p(phys_ram_base + ram_addr, val);
2504 #ifdef USE_KQEMU
2505 if (cpu_single_env->kqemu_enabled &&
2506 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2507 kqemu_modify_page(cpu_single_env, ram_addr);
2508 #endif
2509 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2510 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2511 /* we remove the notdirty callback only if the code has been
2512 flushed */
2513 if (dirty_flags == 0xff)
2514 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2517 static CPUReadMemoryFunc *error_mem_read[3] = {
2518 NULL, /* never used */
2519 NULL, /* never used */
2520 NULL, /* never used */
2523 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2524 notdirty_mem_writeb,
2525 notdirty_mem_writew,
2526 notdirty_mem_writel,
2529 /* Generate a debug exception if a watchpoint has been hit. */
2530 static void check_watchpoint(int offset, int len_mask, int flags)
2532 CPUState *env = cpu_single_env;
2533 target_ulong pc, cs_base;
2534 TranslationBlock *tb;
2535 target_ulong vaddr;
2536 CPUWatchpoint *wp;
2537 int cpu_flags;
2539 if (env->watchpoint_hit) {
2540 /* We re-entered the check after replacing the TB. Now raise
2541 * the debug interrupt so that is will trigger after the
2542 * current instruction. */
2543 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2544 return;
2546 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2547 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2548 if ((vaddr == (wp->vaddr & len_mask) ||
2549 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2550 wp->flags |= BP_WATCHPOINT_HIT;
2551 if (!env->watchpoint_hit) {
2552 env->watchpoint_hit = wp;
2553 tb = tb_find_pc(env->mem_io_pc);
2554 if (!tb) {
2555 cpu_abort(env, "check_watchpoint: could not find TB for "
2556 "pc=%p", (void *)env->mem_io_pc);
2558 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2559 tb_phys_invalidate(tb, -1);
2560 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2561 env->exception_index = EXCP_DEBUG;
2562 } else {
2563 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2564 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2566 cpu_resume_from_signal(env, NULL);
2568 } else {
2569 wp->flags &= ~BP_WATCHPOINT_HIT;
2574 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2575 so these check for a hit then pass through to the normal out-of-line
2576 phys routines. */
2577 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2579 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2580 return ldub_phys(addr);
2583 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2585 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2586 return lduw_phys(addr);
2589 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2591 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2592 return ldl_phys(addr);
2595 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2596 uint32_t val)
2598 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2599 stb_phys(addr, val);
2602 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2603 uint32_t val)
2605 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2606 stw_phys(addr, val);
2609 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2610 uint32_t val)
2612 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2613 stl_phys(addr, val);
2616 static CPUReadMemoryFunc *watch_mem_read[3] = {
2617 watch_mem_readb,
2618 watch_mem_readw,
2619 watch_mem_readl,
2622 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2623 watch_mem_writeb,
2624 watch_mem_writew,
2625 watch_mem_writel,
2628 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2629 unsigned int len)
2631 uint32_t ret;
2632 unsigned int idx;
2634 idx = SUBPAGE_IDX(addr);
2635 #if defined(DEBUG_SUBPAGE)
2636 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2637 mmio, len, addr, idx);
2638 #endif
2639 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2640 addr + mmio->region_offset[idx][0][len]);
2642 return ret;
2645 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2646 uint32_t value, unsigned int len)
2648 unsigned int idx;
2650 idx = SUBPAGE_IDX(addr);
2651 #if defined(DEBUG_SUBPAGE)
2652 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2653 mmio, len, addr, idx, value);
2654 #endif
2655 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2656 addr + mmio->region_offset[idx][1][len],
2657 value);
2660 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2662 #if defined(DEBUG_SUBPAGE)
2663 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2664 #endif
2666 return subpage_readlen(opaque, addr, 0);
2669 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2670 uint32_t value)
2672 #if defined(DEBUG_SUBPAGE)
2673 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2674 #endif
2675 subpage_writelen(opaque, addr, value, 0);
2678 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2680 #if defined(DEBUG_SUBPAGE)
2681 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2682 #endif
2684 return subpage_readlen(opaque, addr, 1);
2687 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2688 uint32_t value)
2690 #if defined(DEBUG_SUBPAGE)
2691 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2692 #endif
2693 subpage_writelen(opaque, addr, value, 1);
2696 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2698 #if defined(DEBUG_SUBPAGE)
2699 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2700 #endif
2702 return subpage_readlen(opaque, addr, 2);
2705 static void subpage_writel (void *opaque,
2706 target_phys_addr_t addr, uint32_t value)
2708 #if defined(DEBUG_SUBPAGE)
2709 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2710 #endif
2711 subpage_writelen(opaque, addr, value, 2);
2714 static CPUReadMemoryFunc *subpage_read[] = {
2715 &subpage_readb,
2716 &subpage_readw,
2717 &subpage_readl,
2720 static CPUWriteMemoryFunc *subpage_write[] = {
2721 &subpage_writeb,
2722 &subpage_writew,
2723 &subpage_writel,
2726 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2727 ram_addr_t memory, ram_addr_t region_offset)
2729 int idx, eidx;
2730 unsigned int i;
2732 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2733 return -1;
2734 idx = SUBPAGE_IDX(start);
2735 eidx = SUBPAGE_IDX(end);
2736 #if defined(DEBUG_SUBPAGE)
2737 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2738 mmio, start, end, idx, eidx, memory);
2739 #endif
2740 memory >>= IO_MEM_SHIFT;
2741 for (; idx <= eidx; idx++) {
2742 for (i = 0; i < 4; i++) {
2743 if (io_mem_read[memory][i]) {
2744 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2745 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2746 mmio->region_offset[idx][0][i] = region_offset;
2748 if (io_mem_write[memory][i]) {
2749 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2750 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2751 mmio->region_offset[idx][1][i] = region_offset;
2756 return 0;
2759 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2760 ram_addr_t orig_memory, ram_addr_t region_offset)
2762 subpage_t *mmio;
2763 int subpage_memory;
2765 mmio = qemu_mallocz(sizeof(subpage_t));
2766 if (mmio != NULL) {
2767 mmio->base = base;
2768 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2769 #if defined(DEBUG_SUBPAGE)
2770 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2771 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2772 #endif
2773 *phys = subpage_memory | IO_MEM_SUBPAGE;
2774 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2775 region_offset);
2778 return mmio;
2781 static void io_mem_init(void)
2783 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2784 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2785 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2786 io_mem_nb = 5;
2788 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2789 watch_mem_write, NULL);
2790 /* alloc dirty bits array */
2791 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2792 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2795 /* mem_read and mem_write are arrays of functions containing the
2796 function to access byte (index 0), word (index 1) and dword (index
2797 2). Functions can be omitted with a NULL function pointer. The
2798 registered functions may be modified dynamically later.
2799 If io_index is non zero, the corresponding io zone is
2800 modified. If it is zero, a new io zone is allocated. The return
2801 value can be used with cpu_register_physical_memory(). (-1) is
2802 returned if error. */
2803 int cpu_register_io_memory(int io_index,
2804 CPUReadMemoryFunc **mem_read,
2805 CPUWriteMemoryFunc **mem_write,
2806 void *opaque)
2808 int i, subwidth = 0;
2810 if (io_index <= 0) {
2811 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2812 return -1;
2813 io_index = io_mem_nb++;
2814 } else {
2815 if (io_index >= IO_MEM_NB_ENTRIES)
2816 return -1;
2819 for(i = 0;i < 3; i++) {
2820 if (!mem_read[i] || !mem_write[i])
2821 subwidth = IO_MEM_SUBWIDTH;
2822 io_mem_read[io_index][i] = mem_read[i];
2823 io_mem_write[io_index][i] = mem_write[i];
2825 io_mem_opaque[io_index] = opaque;
2826 return (io_index << IO_MEM_SHIFT) | subwidth;
2829 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2831 return io_mem_write[io_index >> IO_MEM_SHIFT];
2834 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2836 return io_mem_read[io_index >> IO_MEM_SHIFT];
2839 #endif /* !defined(CONFIG_USER_ONLY) */
2841 /* physical memory access (slow version, mainly for debug) */
2842 #if defined(CONFIG_USER_ONLY)
2843 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2844 int len, int is_write)
2846 int l, flags;
2847 target_ulong page;
2848 void * p;
2850 while (len > 0) {
2851 page = addr & TARGET_PAGE_MASK;
2852 l = (page + TARGET_PAGE_SIZE) - addr;
2853 if (l > len)
2854 l = len;
2855 flags = page_get_flags(page);
2856 if (!(flags & PAGE_VALID))
2857 return;
2858 if (is_write) {
2859 if (!(flags & PAGE_WRITE))
2860 return;
2861 /* XXX: this code should not depend on lock_user */
2862 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2863 /* FIXME - should this return an error rather than just fail? */
2864 return;
2865 memcpy(p, buf, l);
2866 unlock_user(p, addr, l);
2867 } else {
2868 if (!(flags & PAGE_READ))
2869 return;
2870 /* XXX: this code should not depend on lock_user */
2871 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2872 /* FIXME - should this return an error rather than just fail? */
2873 return;
2874 memcpy(buf, p, l);
2875 unlock_user(p, addr, 0);
2877 len -= l;
2878 buf += l;
2879 addr += l;
2883 #else
2884 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2885 int len, int is_write)
2887 int l, io_index;
2888 uint8_t *ptr;
2889 uint32_t val;
2890 target_phys_addr_t page;
2891 unsigned long pd;
2892 PhysPageDesc *p;
2894 while (len > 0) {
2895 page = addr & TARGET_PAGE_MASK;
2896 l = (page + TARGET_PAGE_SIZE) - addr;
2897 if (l > len)
2898 l = len;
2899 p = phys_page_find(page >> TARGET_PAGE_BITS);
2900 if (!p) {
2901 pd = IO_MEM_UNASSIGNED;
2902 } else {
2903 pd = p->phys_offset;
2906 if (is_write) {
2907 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2908 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2909 if (p)
2910 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2911 /* XXX: could force cpu_single_env to NULL to avoid
2912 potential bugs */
2913 if (l >= 4 && ((addr & 3) == 0)) {
2914 /* 32 bit write access */
2915 val = ldl_p(buf);
2916 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2917 l = 4;
2918 } else if (l >= 2 && ((addr & 1) == 0)) {
2919 /* 16 bit write access */
2920 val = lduw_p(buf);
2921 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2922 l = 2;
2923 } else {
2924 /* 8 bit write access */
2925 val = ldub_p(buf);
2926 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2927 l = 1;
2929 } else {
2930 unsigned long addr1;
2931 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2932 /* RAM case */
2933 ptr = phys_ram_base + addr1;
2934 memcpy(ptr, buf, l);
2935 if (!cpu_physical_memory_is_dirty(addr1)) {
2936 /* invalidate code */
2937 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2938 /* set dirty bit */
2939 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2940 (0xff & ~CODE_DIRTY_FLAG);
2943 } else {
2944 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2945 !(pd & IO_MEM_ROMD)) {
2946 /* I/O case */
2947 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2948 if (p)
2949 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2950 if (l >= 4 && ((addr & 3) == 0)) {
2951 /* 32 bit read access */
2952 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2953 stl_p(buf, val);
2954 l = 4;
2955 } else if (l >= 2 && ((addr & 1) == 0)) {
2956 /* 16 bit read access */
2957 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2958 stw_p(buf, val);
2959 l = 2;
2960 } else {
2961 /* 8 bit read access */
2962 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2963 stb_p(buf, val);
2964 l = 1;
2966 } else {
2967 /* RAM case */
2968 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2969 (addr & ~TARGET_PAGE_MASK);
2970 memcpy(buf, ptr, l);
2973 len -= l;
2974 buf += l;
2975 addr += l;
2979 /* used for ROM loading : can write in RAM and ROM */
2980 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2981 const uint8_t *buf, int len)
2983 int l;
2984 uint8_t *ptr;
2985 target_phys_addr_t page;
2986 unsigned long pd;
2987 PhysPageDesc *p;
2989 while (len > 0) {
2990 page = addr & TARGET_PAGE_MASK;
2991 l = (page + TARGET_PAGE_SIZE) - addr;
2992 if (l > len)
2993 l = len;
2994 p = phys_page_find(page >> TARGET_PAGE_BITS);
2995 if (!p) {
2996 pd = IO_MEM_UNASSIGNED;
2997 } else {
2998 pd = p->phys_offset;
3001 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3002 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3003 !(pd & IO_MEM_ROMD)) {
3004 /* do nothing */
3005 } else {
3006 unsigned long addr1;
3007 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3008 /* ROM/RAM case */
3009 ptr = phys_ram_base + addr1;
3010 memcpy(ptr, buf, l);
3012 len -= l;
3013 buf += l;
3014 addr += l;
3019 /* warning: addr must be aligned */
3020 uint32_t ldl_phys(target_phys_addr_t addr)
3022 int io_index;
3023 uint8_t *ptr;
3024 uint32_t val;
3025 unsigned long pd;
3026 PhysPageDesc *p;
3028 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3029 if (!p) {
3030 pd = IO_MEM_UNASSIGNED;
3031 } else {
3032 pd = p->phys_offset;
3035 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3036 !(pd & IO_MEM_ROMD)) {
3037 /* I/O case */
3038 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3039 if (p)
3040 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3041 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3042 } else {
3043 /* RAM case */
3044 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3045 (addr & ~TARGET_PAGE_MASK);
3046 val = ldl_p(ptr);
3048 return val;
3051 /* warning: addr must be aligned */
3052 uint64_t ldq_phys(target_phys_addr_t addr)
3054 int io_index;
3055 uint8_t *ptr;
3056 uint64_t val;
3057 unsigned long pd;
3058 PhysPageDesc *p;
3060 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3061 if (!p) {
3062 pd = IO_MEM_UNASSIGNED;
3063 } else {
3064 pd = p->phys_offset;
3067 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3068 !(pd & IO_MEM_ROMD)) {
3069 /* I/O case */
3070 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3071 if (p)
3072 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3073 #ifdef TARGET_WORDS_BIGENDIAN
3074 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3075 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3076 #else
3077 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3078 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3079 #endif
3080 } else {
3081 /* RAM case */
3082 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3083 (addr & ~TARGET_PAGE_MASK);
3084 val = ldq_p(ptr);
3086 return val;
3089 /* XXX: optimize */
3090 uint32_t ldub_phys(target_phys_addr_t addr)
3092 uint8_t val;
3093 cpu_physical_memory_read(addr, &val, 1);
3094 return val;
3097 /* XXX: optimize */
3098 uint32_t lduw_phys(target_phys_addr_t addr)
3100 uint16_t val;
3101 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3102 return tswap16(val);
3105 /* warning: addr must be aligned. The ram page is not masked as dirty
3106 and the code inside is not invalidated. It is useful if the dirty
3107 bits are used to track modified PTEs */
3108 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3110 int io_index;
3111 uint8_t *ptr;
3112 unsigned long pd;
3113 PhysPageDesc *p;
3115 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3116 if (!p) {
3117 pd = IO_MEM_UNASSIGNED;
3118 } else {
3119 pd = p->phys_offset;
3122 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3123 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3124 if (p)
3125 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3126 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3127 } else {
3128 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3129 ptr = phys_ram_base + addr1;
3130 stl_p(ptr, val);
3132 if (unlikely(in_migration)) {
3133 if (!cpu_physical_memory_is_dirty(addr1)) {
3134 /* invalidate code */
3135 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3136 /* set dirty bit */
3137 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3138 (0xff & ~CODE_DIRTY_FLAG);
3144 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3146 int io_index;
3147 uint8_t *ptr;
3148 unsigned long pd;
3149 PhysPageDesc *p;
3151 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3152 if (!p) {
3153 pd = IO_MEM_UNASSIGNED;
3154 } else {
3155 pd = p->phys_offset;
3158 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3159 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3160 if (p)
3161 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3162 #ifdef TARGET_WORDS_BIGENDIAN
3163 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3164 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3165 #else
3166 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3167 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3168 #endif
3169 } else {
3170 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3171 (addr & ~TARGET_PAGE_MASK);
3172 stq_p(ptr, val);
3176 /* warning: addr must be aligned */
3177 void stl_phys(target_phys_addr_t addr, uint32_t val)
3179 int io_index;
3180 uint8_t *ptr;
3181 unsigned long pd;
3182 PhysPageDesc *p;
3184 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3185 if (!p) {
3186 pd = IO_MEM_UNASSIGNED;
3187 } else {
3188 pd = p->phys_offset;
3191 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3192 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3193 if (p)
3194 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3195 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3196 } else {
3197 unsigned long addr1;
3198 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3199 /* RAM case */
3200 ptr = phys_ram_base + addr1;
3201 stl_p(ptr, val);
3202 if (!cpu_physical_memory_is_dirty(addr1)) {
3203 /* invalidate code */
3204 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3205 /* set dirty bit */
3206 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3207 (0xff & ~CODE_DIRTY_FLAG);
3212 /* XXX: optimize */
3213 void stb_phys(target_phys_addr_t addr, uint32_t val)
3215 uint8_t v = val;
3216 cpu_physical_memory_write(addr, &v, 1);
3219 /* XXX: optimize */
3220 void stw_phys(target_phys_addr_t addr, uint32_t val)
3222 uint16_t v = tswap16(val);
3223 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3226 /* XXX: optimize */
3227 void stq_phys(target_phys_addr_t addr, uint64_t val)
3229 val = tswap64(val);
3230 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3233 #endif
3235 /* virtual memory access for debug */
3236 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3237 uint8_t *buf, int len, int is_write)
3239 int l;
3240 target_phys_addr_t phys_addr;
3241 target_ulong page;
3243 while (len > 0) {
3244 page = addr & TARGET_PAGE_MASK;
3245 phys_addr = cpu_get_phys_page_debug(env, page);
3246 /* if no physical page mapped, return an error */
3247 if (phys_addr == -1)
3248 return -1;
3249 l = (page + TARGET_PAGE_SIZE) - addr;
3250 if (l > len)
3251 l = len;
3252 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3253 buf, l, is_write);
3254 len -= l;
3255 buf += l;
3256 addr += l;
3258 return 0;
3261 /* in deterministic execution mode, instructions doing device I/Os
3262 must be at the end of the TB */
3263 void cpu_io_recompile(CPUState *env, void *retaddr)
3265 TranslationBlock *tb;
3266 uint32_t n, cflags;
3267 target_ulong pc, cs_base;
3268 uint64_t flags;
3270 tb = tb_find_pc((unsigned long)retaddr);
3271 if (!tb) {
3272 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3273 retaddr);
3275 n = env->icount_decr.u16.low + tb->icount;
3276 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3277 /* Calculate how many instructions had been executed before the fault
3278 occurred. */
3279 n = n - env->icount_decr.u16.low;
3280 /* Generate a new TB ending on the I/O insn. */
3281 n++;
3282 /* On MIPS and SH, delay slot instructions can only be restarted if
3283 they were already the first instruction in the TB. If this is not
3284 the first instruction in a TB then re-execute the preceding
3285 branch. */
3286 #if defined(TARGET_MIPS)
3287 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3288 env->active_tc.PC -= 4;
3289 env->icount_decr.u16.low++;
3290 env->hflags &= ~MIPS_HFLAG_BMASK;
3292 #elif defined(TARGET_SH4)
3293 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3294 && n > 1) {
3295 env->pc -= 2;
3296 env->icount_decr.u16.low++;
3297 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3299 #endif
3300 /* This should never happen. */
3301 if (n > CF_COUNT_MASK)
3302 cpu_abort(env, "TB too big during recompile");
3304 cflags = n | CF_LAST_IO;
3305 pc = tb->pc;
3306 cs_base = tb->cs_base;
3307 flags = tb->flags;
3308 tb_phys_invalidate(tb, -1);
3309 /* FIXME: In theory this could raise an exception. In practice
3310 we have already translated the block once so it's probably ok. */
3311 tb_gen_code(env, pc, cs_base, flags, cflags);
3312 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3313 the first in the TB) then we end up generating a whole new TB and
3314 repeating the fault, which is horribly inefficient.
3315 Better would be to execute just this insn uncached, or generate a
3316 second new TB. */
3317 cpu_resume_from_signal(env, NULL);
3320 void dump_exec_info(FILE *f,
3321 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3323 int i, target_code_size, max_target_code_size;
3324 int direct_jmp_count, direct_jmp2_count, cross_page;
3325 TranslationBlock *tb;
3327 target_code_size = 0;
3328 max_target_code_size = 0;
3329 cross_page = 0;
3330 direct_jmp_count = 0;
3331 direct_jmp2_count = 0;
3332 for(i = 0; i < nb_tbs; i++) {
3333 tb = &tbs[i];
3334 target_code_size += tb->size;
3335 if (tb->size > max_target_code_size)
3336 max_target_code_size = tb->size;
3337 if (tb->page_addr[1] != -1)
3338 cross_page++;
3339 if (tb->tb_next_offset[0] != 0xffff) {
3340 direct_jmp_count++;
3341 if (tb->tb_next_offset[1] != 0xffff) {
3342 direct_jmp2_count++;
3346 /* XXX: avoid using doubles ? */
3347 cpu_fprintf(f, "Translation buffer state:\n");
3348 cpu_fprintf(f, "gen code size %ld/%ld\n",
3349 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3350 cpu_fprintf(f, "TB count %d/%d\n",
3351 nb_tbs, code_gen_max_blocks);
3352 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3353 nb_tbs ? target_code_size / nb_tbs : 0,
3354 max_target_code_size);
3355 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3356 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3357 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3358 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3359 cross_page,
3360 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3361 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3362 direct_jmp_count,
3363 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3364 direct_jmp2_count,
3365 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3366 cpu_fprintf(f, "\nStatistics:\n");
3367 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3368 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3369 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3370 tcg_dump_info(f, cpu_fprintf);
3373 #if !defined(CONFIG_USER_ONLY)
3375 #define MMUSUFFIX _cmmu
3376 #define GETPC() NULL
3377 #define env cpu_single_env
3378 #define SOFTMMU_CODE_ACCESS
3380 #define SHIFT 0
3381 #include "softmmu_template.h"
3383 #define SHIFT 1
3384 #include "softmmu_template.h"
3386 #define SHIFT 2
3387 #include "softmmu_template.h"
3389 #define SHIFT 3
3390 #include "softmmu_template.h"
3392 #undef env
3394 #endif