IBM PowerPC 4xx 32-bit PCI controller emulation
[qemu/qemu-JZ.git] / exec.c
blob58a0cffacb4a37b6a626634e6ea7c08c5eb5c11f
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #include "hw/hw.h"
41 #include "osdep.h"
42 #include "kvm.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #endif
47 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_FLUSH
49 //#define DEBUG_TLB
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
61 #undef DEBUG_TB_CHECK
62 #endif
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 #else
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #endif
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 static int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #else
102 #define code_gen_section \
103 __attribute__((aligned (32)))
104 #endif
106 uint8_t code_gen_prologue[1024] code_gen_section;
107 static uint8_t *code_gen_buffer;
108 static unsigned long code_gen_buffer_size;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size;
111 uint8_t *code_gen_ptr;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size;
115 int phys_ram_fd;
116 uint8_t *phys_ram_base;
117 uint8_t *phys_ram_dirty;
118 static int in_migration;
119 static ram_addr_t phys_ram_alloc_offset = 0;
120 #endif
122 CPUState *first_cpu;
123 /* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
125 CPUState *cpu_single_env;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
129 int use_icount = 0;
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132 int64_t qemu_icount;
134 typedef struct PageDesc {
135 /* list of TBs intersecting this ram page */
136 TranslationBlock *first_tb;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141 #if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143 #endif
144 } PageDesc;
146 typedef struct PhysPageDesc {
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset;
149 ram_addr_t region_offset;
150 } PhysPageDesc;
152 #define L2_BITS 10
153 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154 /* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
158 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159 #else
160 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
161 #endif
163 #define L1_SIZE (1 << L1_BITS)
164 #define L2_SIZE (1 << L2_BITS)
166 unsigned long qemu_real_host_page_size;
167 unsigned long qemu_host_page_bits;
168 unsigned long qemu_host_page_size;
169 unsigned long qemu_host_page_mask;
171 /* XXX: for system emulation, it could just be an array */
172 static PageDesc *l1_map[L1_SIZE];
173 static PhysPageDesc **l1_phys_map;
175 #if !defined(CONFIG_USER_ONLY)
176 static void io_mem_init(void);
178 /* io memory support */
179 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
181 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
182 static int io_mem_nb;
183 static int io_mem_watch;
184 #endif
186 /* log support */
187 static const char *logfilename = "/tmp/qemu.log";
188 FILE *logfile;
189 int loglevel;
190 static int log_append = 0;
192 /* statistics */
193 static int tlb_flush_count;
194 static int tb_flush_count;
195 static int tb_phys_invalidate_count;
197 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198 typedef struct subpage_t {
199 target_phys_addr_t base;
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204 } subpage_t;
206 #ifdef _WIN32
207 static void map_exec(void *addr, long size)
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
214 #else
215 static void map_exec(void *addr, long size)
217 unsigned long start, end, page_size;
219 page_size = getpagesize();
220 start = (unsigned long)addr;
221 start &= ~(page_size - 1);
223 end = (unsigned long)addr + size;
224 end += page_size - 1;
225 end &= ~(page_size - 1);
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
230 #endif
232 static void page_init(void)
234 /* NOTE: we can always suppose that qemu_host_page_size >=
235 TARGET_PAGE_SIZE */
236 #ifdef _WIN32
238 SYSTEM_INFO system_info;
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
243 #else
244 qemu_real_host_page_size = getpagesize();
245 #endif
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
257 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
263 mmap_lock();
264 last_brk = (unsigned long)sbrk(0);
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
274 page_set_flags(startaddr & TARGET_PAGE_MASK,
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
278 } while (!feof(f));
279 fclose(f);
281 mmap_unlock();
283 #endif
286 static inline PageDesc **page_l1_map(target_ulong index)
288 #if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
292 return NULL;
293 #endif
294 return &l1_map[index >> L2_BITS];
297 static inline PageDesc *page_find_alloc(target_ulong index)
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
307 #if defined(CONFIG_USER_ONLY)
308 unsigned long addr;
309 size_t len = sizeof(PageDesc) * L2_SIZE;
310 /* Don't use qemu_malloc because it may recurse. */
311 p = mmap(0, len, PROT_READ | PROT_WRITE,
312 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
313 *lp = p;
314 addr = h2g(p);
315 if (addr == (target_ulong)addr) {
316 page_set_flags(addr & TARGET_PAGE_MASK,
317 TARGET_PAGE_ALIGN(addr + len),
318 PAGE_RESERVED);
320 #else
321 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
322 *lp = p;
323 #endif
325 return p + (index & (L2_SIZE - 1));
328 static inline PageDesc *page_find(target_ulong index)
330 PageDesc **lp, *p;
331 lp = page_l1_map(index);
332 if (!lp)
333 return NULL;
335 p = *lp;
336 if (!p)
337 return 0;
338 return p + (index & (L2_SIZE - 1));
341 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
343 void **lp, **p;
344 PhysPageDesc *pd;
346 p = (void **)l1_phys_map;
347 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
349 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
350 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
351 #endif
352 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
353 p = *lp;
354 if (!p) {
355 /* allocate if not found */
356 if (!alloc)
357 return NULL;
358 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
359 memset(p, 0, sizeof(void *) * L1_SIZE);
360 *lp = p;
362 #endif
363 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
364 pd = *lp;
365 if (!pd) {
366 int i;
367 /* allocate if not found */
368 if (!alloc)
369 return NULL;
370 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
371 *lp = pd;
372 for (i = 0; i < L2_SIZE; i++)
373 pd[i].phys_offset = IO_MEM_UNASSIGNED;
375 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
378 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
380 return phys_page_find_alloc(index, 0);
383 #if !defined(CONFIG_USER_ONLY)
384 static void tlb_protect_code(ram_addr_t ram_addr);
385 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
386 target_ulong vaddr);
387 #define mmap_lock() do { } while(0)
388 #define mmap_unlock() do { } while(0)
389 #endif
391 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
393 #if defined(CONFIG_USER_ONLY)
394 /* Currently it is not recommanded to allocate big chunks of data in
395 user mode. It will change when a dedicated libc will be used */
396 #define USE_STATIC_CODE_GEN_BUFFER
397 #endif
399 #ifdef USE_STATIC_CODE_GEN_BUFFER
400 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
401 #endif
403 static void code_gen_alloc(unsigned long tb_size)
405 #ifdef USE_STATIC_CODE_GEN_BUFFER
406 code_gen_buffer = static_code_gen_buffer;
407 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
408 map_exec(code_gen_buffer, code_gen_buffer_size);
409 #else
410 code_gen_buffer_size = tb_size;
411 if (code_gen_buffer_size == 0) {
412 #if defined(CONFIG_USER_ONLY)
413 /* in user mode, phys_ram_size is not meaningful */
414 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415 #else
416 /* XXX: needs ajustments */
417 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
418 #endif
420 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
421 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
422 /* The code gen buffer location may have constraints depending on
423 the host cpu and OS */
424 #if defined(__linux__)
426 int flags;
427 void *start = NULL;
429 flags = MAP_PRIVATE | MAP_ANONYMOUS;
430 #if defined(__x86_64__)
431 flags |= MAP_32BIT;
432 /* Cannot map more than that */
433 if (code_gen_buffer_size > (800 * 1024 * 1024))
434 code_gen_buffer_size = (800 * 1024 * 1024);
435 #elif defined(__sparc_v9__)
436 // Map the buffer below 2G, so we can use direct calls and branches
437 flags |= MAP_FIXED;
438 start = (void *) 0x60000000UL;
439 if (code_gen_buffer_size > (512 * 1024 * 1024))
440 code_gen_buffer_size = (512 * 1024 * 1024);
441 #elif defined(__arm__)
442 /* Map the buffer below 32M, so we can use direct calls and branches */
443 flags |= MAP_FIXED;
444 start = (void *) 0x01000000UL;
445 if (code_gen_buffer_size > 16 * 1024 * 1024)
446 code_gen_buffer_size = 16 * 1024 * 1024;
447 #endif
448 code_gen_buffer = mmap(start, code_gen_buffer_size,
449 PROT_WRITE | PROT_READ | PROT_EXEC,
450 flags, -1, 0);
451 if (code_gen_buffer == MAP_FAILED) {
452 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
453 exit(1);
456 #elif defined(__FreeBSD__)
458 int flags;
459 void *addr = NULL;
460 flags = MAP_PRIVATE | MAP_ANONYMOUS;
461 #if defined(__x86_64__)
462 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
463 * 0x40000000 is free */
464 flags |= MAP_FIXED;
465 addr = (void *)0x40000000;
466 /* Cannot map more than that */
467 if (code_gen_buffer_size > (800 * 1024 * 1024))
468 code_gen_buffer_size = (800 * 1024 * 1024);
469 #endif
470 code_gen_buffer = mmap(addr, code_gen_buffer_size,
471 PROT_WRITE | PROT_READ | PROT_EXEC,
472 flags, -1, 0);
473 if (code_gen_buffer == MAP_FAILED) {
474 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
475 exit(1);
478 #else
479 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
480 if (!code_gen_buffer) {
481 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482 exit(1);
484 map_exec(code_gen_buffer, code_gen_buffer_size);
485 #endif
486 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
487 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
488 code_gen_buffer_max_size = code_gen_buffer_size -
489 code_gen_max_block_size();
490 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
491 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
494 /* Must be called before using the QEMU cpus. 'tb_size' is the size
495 (in bytes) allocated to the translation buffer. Zero means default
496 size. */
497 void cpu_exec_init_all(unsigned long tb_size)
499 cpu_gen_init();
500 code_gen_alloc(tb_size);
501 code_gen_ptr = code_gen_buffer;
502 page_init();
503 #if !defined(CONFIG_USER_ONLY)
504 io_mem_init();
505 #endif
508 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
510 #define CPU_COMMON_SAVE_VERSION 1
512 static void cpu_common_save(QEMUFile *f, void *opaque)
514 CPUState *env = opaque;
516 qemu_put_be32s(f, &env->halted);
517 qemu_put_be32s(f, &env->interrupt_request);
520 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
522 CPUState *env = opaque;
524 if (version_id != CPU_COMMON_SAVE_VERSION)
525 return -EINVAL;
527 qemu_get_be32s(f, &env->halted);
528 qemu_get_be32s(f, &env->interrupt_request);
529 tlb_flush(env, 1);
531 return 0;
533 #endif
535 void cpu_exec_init(CPUState *env)
537 CPUState **penv;
538 int cpu_index;
540 env->next_cpu = NULL;
541 penv = &first_cpu;
542 cpu_index = 0;
543 while (*penv != NULL) {
544 penv = (CPUState **)&(*penv)->next_cpu;
545 cpu_index++;
547 env->cpu_index = cpu_index;
548 TAILQ_INIT(&env->breakpoints);
549 TAILQ_INIT(&env->watchpoints);
550 *penv = env;
551 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
552 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
553 cpu_common_save, cpu_common_load, env);
554 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
555 cpu_save, cpu_load, env);
556 #endif
559 static inline void invalidate_page_bitmap(PageDesc *p)
561 if (p->code_bitmap) {
562 qemu_free(p->code_bitmap);
563 p->code_bitmap = NULL;
565 p->code_write_count = 0;
568 /* set to NULL all the 'first_tb' fields in all PageDescs */
569 static void page_flush_tb(void)
571 int i, j;
572 PageDesc *p;
574 for(i = 0; i < L1_SIZE; i++) {
575 p = l1_map[i];
576 if (p) {
577 for(j = 0; j < L2_SIZE; j++) {
578 p->first_tb = NULL;
579 invalidate_page_bitmap(p);
580 p++;
586 /* flush all the translation blocks */
587 /* XXX: tb_flush is currently not thread safe */
588 void tb_flush(CPUState *env1)
590 CPUState *env;
591 #if defined(DEBUG_FLUSH)
592 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
593 (unsigned long)(code_gen_ptr - code_gen_buffer),
594 nb_tbs, nb_tbs > 0 ?
595 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
596 #endif
597 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
598 cpu_abort(env1, "Internal error: code buffer overflow\n");
600 nb_tbs = 0;
602 for(env = first_cpu; env != NULL; env = env->next_cpu) {
603 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
606 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
607 page_flush_tb();
609 code_gen_ptr = code_gen_buffer;
610 /* XXX: flush processor icache at this point if cache flush is
611 expensive */
612 tb_flush_count++;
615 #ifdef DEBUG_TB_CHECK
617 static void tb_invalidate_check(target_ulong address)
619 TranslationBlock *tb;
620 int i;
621 address &= TARGET_PAGE_MASK;
622 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
623 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
624 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
625 address >= tb->pc + tb->size)) {
626 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
627 address, (long)tb->pc, tb->size);
633 /* verify that all the pages have correct rights for code */
634 static void tb_page_check(void)
636 TranslationBlock *tb;
637 int i, flags1, flags2;
639 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
640 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
641 flags1 = page_get_flags(tb->pc);
642 flags2 = page_get_flags(tb->pc + tb->size - 1);
643 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
644 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
645 (long)tb->pc, tb->size, flags1, flags2);
651 static void tb_jmp_check(TranslationBlock *tb)
653 TranslationBlock *tb1;
654 unsigned int n1;
656 /* suppress any remaining jumps to this TB */
657 tb1 = tb->jmp_first;
658 for(;;) {
659 n1 = (long)tb1 & 3;
660 tb1 = (TranslationBlock *)((long)tb1 & ~3);
661 if (n1 == 2)
662 break;
663 tb1 = tb1->jmp_next[n1];
665 /* check end of list */
666 if (tb1 != tb) {
667 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
671 #endif
673 /* invalidate one TB */
674 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
675 int next_offset)
677 TranslationBlock *tb1;
678 for(;;) {
679 tb1 = *ptb;
680 if (tb1 == tb) {
681 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
682 break;
684 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
688 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
690 TranslationBlock *tb1;
691 unsigned int n1;
693 for(;;) {
694 tb1 = *ptb;
695 n1 = (long)tb1 & 3;
696 tb1 = (TranslationBlock *)((long)tb1 & ~3);
697 if (tb1 == tb) {
698 *ptb = tb1->page_next[n1];
699 break;
701 ptb = &tb1->page_next[n1];
705 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
707 TranslationBlock *tb1, **ptb;
708 unsigned int n1;
710 ptb = &tb->jmp_next[n];
711 tb1 = *ptb;
712 if (tb1) {
713 /* find tb(n) in circular list */
714 for(;;) {
715 tb1 = *ptb;
716 n1 = (long)tb1 & 3;
717 tb1 = (TranslationBlock *)((long)tb1 & ~3);
718 if (n1 == n && tb1 == tb)
719 break;
720 if (n1 == 2) {
721 ptb = &tb1->jmp_first;
722 } else {
723 ptb = &tb1->jmp_next[n1];
726 /* now we can suppress tb(n) from the list */
727 *ptb = tb->jmp_next[n];
729 tb->jmp_next[n] = NULL;
733 /* reset the jump entry 'n' of a TB so that it is not chained to
734 another TB */
735 static inline void tb_reset_jump(TranslationBlock *tb, int n)
737 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
740 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
742 CPUState *env;
743 PageDesc *p;
744 unsigned int h, n1;
745 target_phys_addr_t phys_pc;
746 TranslationBlock *tb1, *tb2;
748 /* remove the TB from the hash list */
749 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
750 h = tb_phys_hash_func(phys_pc);
751 tb_remove(&tb_phys_hash[h], tb,
752 offsetof(TranslationBlock, phys_hash_next));
754 /* remove the TB from the page list */
755 if (tb->page_addr[0] != page_addr) {
756 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
757 tb_page_remove(&p->first_tb, tb);
758 invalidate_page_bitmap(p);
760 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
761 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
762 tb_page_remove(&p->first_tb, tb);
763 invalidate_page_bitmap(p);
766 tb_invalidated_flag = 1;
768 /* remove the TB from the hash list */
769 h = tb_jmp_cache_hash_func(tb->pc);
770 for(env = first_cpu; env != NULL; env = env->next_cpu) {
771 if (env->tb_jmp_cache[h] == tb)
772 env->tb_jmp_cache[h] = NULL;
775 /* suppress this TB from the two jump lists */
776 tb_jmp_remove(tb, 0);
777 tb_jmp_remove(tb, 1);
779 /* suppress any remaining jumps to this TB */
780 tb1 = tb->jmp_first;
781 for(;;) {
782 n1 = (long)tb1 & 3;
783 if (n1 == 2)
784 break;
785 tb1 = (TranslationBlock *)((long)tb1 & ~3);
786 tb2 = tb1->jmp_next[n1];
787 tb_reset_jump(tb1, n1);
788 tb1->jmp_next[n1] = NULL;
789 tb1 = tb2;
791 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
793 tb_phys_invalidate_count++;
796 static inline void set_bits(uint8_t *tab, int start, int len)
798 int end, mask, end1;
800 end = start + len;
801 tab += start >> 3;
802 mask = 0xff << (start & 7);
803 if ((start & ~7) == (end & ~7)) {
804 if (start < end) {
805 mask &= ~(0xff << (end & 7));
806 *tab |= mask;
808 } else {
809 *tab++ |= mask;
810 start = (start + 8) & ~7;
811 end1 = end & ~7;
812 while (start < end1) {
813 *tab++ = 0xff;
814 start += 8;
816 if (start < end) {
817 mask = ~(0xff << (end & 7));
818 *tab |= mask;
823 static void build_page_bitmap(PageDesc *p)
825 int n, tb_start, tb_end;
826 TranslationBlock *tb;
828 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
829 if (!p->code_bitmap)
830 return;
832 tb = p->first_tb;
833 while (tb != NULL) {
834 n = (long)tb & 3;
835 tb = (TranslationBlock *)((long)tb & ~3);
836 /* NOTE: this is subtle as a TB may span two physical pages */
837 if (n == 0) {
838 /* NOTE: tb_end may be after the end of the page, but
839 it is not a problem */
840 tb_start = tb->pc & ~TARGET_PAGE_MASK;
841 tb_end = tb_start + tb->size;
842 if (tb_end > TARGET_PAGE_SIZE)
843 tb_end = TARGET_PAGE_SIZE;
844 } else {
845 tb_start = 0;
846 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
848 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
849 tb = tb->page_next[n];
853 TranslationBlock *tb_gen_code(CPUState *env,
854 target_ulong pc, target_ulong cs_base,
855 int flags, int cflags)
857 TranslationBlock *tb;
858 uint8_t *tc_ptr;
859 target_ulong phys_pc, phys_page2, virt_page2;
860 int code_gen_size;
862 phys_pc = get_phys_addr_code(env, pc);
863 tb = tb_alloc(pc);
864 if (!tb) {
865 /* flush must be done */
866 tb_flush(env);
867 /* cannot fail at this point */
868 tb = tb_alloc(pc);
869 /* Don't forget to invalidate previous TB info. */
870 tb_invalidated_flag = 1;
872 tc_ptr = code_gen_ptr;
873 tb->tc_ptr = tc_ptr;
874 tb->cs_base = cs_base;
875 tb->flags = flags;
876 tb->cflags = cflags;
877 cpu_gen_code(env, tb, &code_gen_size);
878 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
880 /* check next page if needed */
881 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
882 phys_page2 = -1;
883 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
884 phys_page2 = get_phys_addr_code(env, virt_page2);
886 tb_link_phys(tb, phys_pc, phys_page2);
887 return tb;
890 /* invalidate all TBs which intersect with the target physical page
891 starting in range [start;end[. NOTE: start and end must refer to
892 the same physical page. 'is_cpu_write_access' should be true if called
893 from a real cpu write access: the virtual CPU will exit the current
894 TB if code is modified inside this TB. */
895 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
896 int is_cpu_write_access)
898 TranslationBlock *tb, *tb_next, *saved_tb;
899 CPUState *env = cpu_single_env;
900 target_ulong tb_start, tb_end;
901 PageDesc *p;
902 int n;
903 #ifdef TARGET_HAS_PRECISE_SMC
904 int current_tb_not_found = is_cpu_write_access;
905 TranslationBlock *current_tb = NULL;
906 int current_tb_modified = 0;
907 target_ulong current_pc = 0;
908 target_ulong current_cs_base = 0;
909 int current_flags = 0;
910 #endif /* TARGET_HAS_PRECISE_SMC */
912 p = page_find(start >> TARGET_PAGE_BITS);
913 if (!p)
914 return;
915 if (!p->code_bitmap &&
916 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
917 is_cpu_write_access) {
918 /* build code bitmap */
919 build_page_bitmap(p);
922 /* we remove all the TBs in the range [start, end[ */
923 /* XXX: see if in some cases it could be faster to invalidate all the code */
924 tb = p->first_tb;
925 while (tb != NULL) {
926 n = (long)tb & 3;
927 tb = (TranslationBlock *)((long)tb & ~3);
928 tb_next = tb->page_next[n];
929 /* NOTE: this is subtle as a TB may span two physical pages */
930 if (n == 0) {
931 /* NOTE: tb_end may be after the end of the page, but
932 it is not a problem */
933 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934 tb_end = tb_start + tb->size;
935 } else {
936 tb_start = tb->page_addr[1];
937 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
939 if (!(tb_end <= start || tb_start >= end)) {
940 #ifdef TARGET_HAS_PRECISE_SMC
941 if (current_tb_not_found) {
942 current_tb_not_found = 0;
943 current_tb = NULL;
944 if (env->mem_io_pc) {
945 /* now we have a real cpu fault */
946 current_tb = tb_find_pc(env->mem_io_pc);
949 if (current_tb == tb &&
950 (current_tb->cflags & CF_COUNT_MASK) != 1) {
951 /* If we are modifying the current TB, we must stop
952 its execution. We could be more precise by checking
953 that the modification is after the current PC, but it
954 would require a specialized function to partially
955 restore the CPU state */
957 current_tb_modified = 1;
958 cpu_restore_state(current_tb, env,
959 env->mem_io_pc, NULL);
960 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
961 &current_flags);
963 #endif /* TARGET_HAS_PRECISE_SMC */
964 /* we need to do that to handle the case where a signal
965 occurs while doing tb_phys_invalidate() */
966 saved_tb = NULL;
967 if (env) {
968 saved_tb = env->current_tb;
969 env->current_tb = NULL;
971 tb_phys_invalidate(tb, -1);
972 if (env) {
973 env->current_tb = saved_tb;
974 if (env->interrupt_request && env->current_tb)
975 cpu_interrupt(env, env->interrupt_request);
978 tb = tb_next;
980 #if !defined(CONFIG_USER_ONLY)
981 /* if no code remaining, no need to continue to use slow writes */
982 if (!p->first_tb) {
983 invalidate_page_bitmap(p);
984 if (is_cpu_write_access) {
985 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
988 #endif
989 #ifdef TARGET_HAS_PRECISE_SMC
990 if (current_tb_modified) {
991 /* we generate a block containing just the instruction
992 modifying the memory. It will ensure that it cannot modify
993 itself */
994 env->current_tb = NULL;
995 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
996 cpu_resume_from_signal(env, NULL);
998 #endif
1001 /* len must be <= 8 and start must be a multiple of len */
1002 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1004 PageDesc *p;
1005 int offset, b;
1006 #if 0
1007 if (1) {
1008 if (loglevel) {
1009 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1010 cpu_single_env->mem_io_vaddr, len,
1011 cpu_single_env->eip,
1012 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1015 #endif
1016 p = page_find(start >> TARGET_PAGE_BITS);
1017 if (!p)
1018 return;
1019 if (p->code_bitmap) {
1020 offset = start & ~TARGET_PAGE_MASK;
1021 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1022 if (b & ((1 << len) - 1))
1023 goto do_invalidate;
1024 } else {
1025 do_invalidate:
1026 tb_invalidate_phys_page_range(start, start + len, 1);
1030 #if !defined(CONFIG_SOFTMMU)
1031 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1032 unsigned long pc, void *puc)
1034 TranslationBlock *tb;
1035 PageDesc *p;
1036 int n;
1037 #ifdef TARGET_HAS_PRECISE_SMC
1038 TranslationBlock *current_tb = NULL;
1039 CPUState *env = cpu_single_env;
1040 int current_tb_modified = 0;
1041 target_ulong current_pc = 0;
1042 target_ulong current_cs_base = 0;
1043 int current_flags = 0;
1044 #endif
1046 addr &= TARGET_PAGE_MASK;
1047 p = page_find(addr >> TARGET_PAGE_BITS);
1048 if (!p)
1049 return;
1050 tb = p->first_tb;
1051 #ifdef TARGET_HAS_PRECISE_SMC
1052 if (tb && pc != 0) {
1053 current_tb = tb_find_pc(pc);
1055 #endif
1056 while (tb != NULL) {
1057 n = (long)tb & 3;
1058 tb = (TranslationBlock *)((long)tb & ~3);
1059 #ifdef TARGET_HAS_PRECISE_SMC
1060 if (current_tb == tb &&
1061 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1062 /* If we are modifying the current TB, we must stop
1063 its execution. We could be more precise by checking
1064 that the modification is after the current PC, but it
1065 would require a specialized function to partially
1066 restore the CPU state */
1068 current_tb_modified = 1;
1069 cpu_restore_state(current_tb, env, pc, puc);
1070 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1071 &current_flags);
1073 #endif /* TARGET_HAS_PRECISE_SMC */
1074 tb_phys_invalidate(tb, addr);
1075 tb = tb->page_next[n];
1077 p->first_tb = NULL;
1078 #ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_modified) {
1080 /* we generate a block containing just the instruction
1081 modifying the memory. It will ensure that it cannot modify
1082 itself */
1083 env->current_tb = NULL;
1084 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1085 cpu_resume_from_signal(env, puc);
1087 #endif
1089 #endif
1091 /* add the tb in the target page and protect it if necessary */
1092 static inline void tb_alloc_page(TranslationBlock *tb,
1093 unsigned int n, target_ulong page_addr)
1095 PageDesc *p;
1096 TranslationBlock *last_first_tb;
1098 tb->page_addr[n] = page_addr;
1099 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1100 tb->page_next[n] = p->first_tb;
1101 last_first_tb = p->first_tb;
1102 p->first_tb = (TranslationBlock *)((long)tb | n);
1103 invalidate_page_bitmap(p);
1105 #if defined(TARGET_HAS_SMC) || 1
1107 #if defined(CONFIG_USER_ONLY)
1108 if (p->flags & PAGE_WRITE) {
1109 target_ulong addr;
1110 PageDesc *p2;
1111 int prot;
1113 /* force the host page as non writable (writes will have a
1114 page fault + mprotect overhead) */
1115 page_addr &= qemu_host_page_mask;
1116 prot = 0;
1117 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1118 addr += TARGET_PAGE_SIZE) {
1120 p2 = page_find (addr >> TARGET_PAGE_BITS);
1121 if (!p2)
1122 continue;
1123 prot |= p2->flags;
1124 p2->flags &= ~PAGE_WRITE;
1125 page_get_flags(addr);
1127 mprotect(g2h(page_addr), qemu_host_page_size,
1128 (prot & PAGE_BITS) & ~PAGE_WRITE);
1129 #ifdef DEBUG_TB_INVALIDATE
1130 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1131 page_addr);
1132 #endif
1134 #else
1135 /* if some code is already present, then the pages are already
1136 protected. So we handle the case where only the first TB is
1137 allocated in a physical page */
1138 if (!last_first_tb) {
1139 tlb_protect_code(page_addr);
1141 #endif
1143 #endif /* TARGET_HAS_SMC */
1146 /* Allocate a new translation block. Flush the translation buffer if
1147 too many translation blocks or too much generated code. */
1148 TranslationBlock *tb_alloc(target_ulong pc)
1150 TranslationBlock *tb;
1152 if (nb_tbs >= code_gen_max_blocks ||
1153 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1154 return NULL;
1155 tb = &tbs[nb_tbs++];
1156 tb->pc = pc;
1157 tb->cflags = 0;
1158 return tb;
1161 void tb_free(TranslationBlock *tb)
1163 /* In practice this is mostly used for single use temporary TB
1164 Ignore the hard cases and just back up if this TB happens to
1165 be the last one generated. */
1166 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1167 code_gen_ptr = tb->tc_ptr;
1168 nb_tbs--;
1172 /* add a new TB and link it to the physical page tables. phys_page2 is
1173 (-1) to indicate that only one page contains the TB. */
1174 void tb_link_phys(TranslationBlock *tb,
1175 target_ulong phys_pc, target_ulong phys_page2)
1177 unsigned int h;
1178 TranslationBlock **ptb;
1180 /* Grab the mmap lock to stop another thread invalidating this TB
1181 before we are done. */
1182 mmap_lock();
1183 /* add in the physical hash table */
1184 h = tb_phys_hash_func(phys_pc);
1185 ptb = &tb_phys_hash[h];
1186 tb->phys_hash_next = *ptb;
1187 *ptb = tb;
1189 /* add in the page list */
1190 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1191 if (phys_page2 != -1)
1192 tb_alloc_page(tb, 1, phys_page2);
1193 else
1194 tb->page_addr[1] = -1;
1196 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1197 tb->jmp_next[0] = NULL;
1198 tb->jmp_next[1] = NULL;
1200 /* init original jump addresses */
1201 if (tb->tb_next_offset[0] != 0xffff)
1202 tb_reset_jump(tb, 0);
1203 if (tb->tb_next_offset[1] != 0xffff)
1204 tb_reset_jump(tb, 1);
1206 #ifdef DEBUG_TB_CHECK
1207 tb_page_check();
1208 #endif
1209 mmap_unlock();
1212 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1213 tb[1].tc_ptr. Return NULL if not found */
1214 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1216 int m_min, m_max, m;
1217 unsigned long v;
1218 TranslationBlock *tb;
1220 if (nb_tbs <= 0)
1221 return NULL;
1222 if (tc_ptr < (unsigned long)code_gen_buffer ||
1223 tc_ptr >= (unsigned long)code_gen_ptr)
1224 return NULL;
1225 /* binary search (cf Knuth) */
1226 m_min = 0;
1227 m_max = nb_tbs - 1;
1228 while (m_min <= m_max) {
1229 m = (m_min + m_max) >> 1;
1230 tb = &tbs[m];
1231 v = (unsigned long)tb->tc_ptr;
1232 if (v == tc_ptr)
1233 return tb;
1234 else if (tc_ptr < v) {
1235 m_max = m - 1;
1236 } else {
1237 m_min = m + 1;
1240 return &tbs[m_max];
1243 static void tb_reset_jump_recursive(TranslationBlock *tb);
1245 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1247 TranslationBlock *tb1, *tb_next, **ptb;
1248 unsigned int n1;
1250 tb1 = tb->jmp_next[n];
1251 if (tb1 != NULL) {
1252 /* find head of list */
1253 for(;;) {
1254 n1 = (long)tb1 & 3;
1255 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1256 if (n1 == 2)
1257 break;
1258 tb1 = tb1->jmp_next[n1];
1260 /* we are now sure now that tb jumps to tb1 */
1261 tb_next = tb1;
1263 /* remove tb from the jmp_first list */
1264 ptb = &tb_next->jmp_first;
1265 for(;;) {
1266 tb1 = *ptb;
1267 n1 = (long)tb1 & 3;
1268 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1269 if (n1 == n && tb1 == tb)
1270 break;
1271 ptb = &tb1->jmp_next[n1];
1273 *ptb = tb->jmp_next[n];
1274 tb->jmp_next[n] = NULL;
1276 /* suppress the jump to next tb in generated code */
1277 tb_reset_jump(tb, n);
1279 /* suppress jumps in the tb on which we could have jumped */
1280 tb_reset_jump_recursive(tb_next);
1284 static void tb_reset_jump_recursive(TranslationBlock *tb)
1286 tb_reset_jump_recursive2(tb, 0);
1287 tb_reset_jump_recursive2(tb, 1);
1290 #if defined(TARGET_HAS_ICE)
1291 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1293 target_phys_addr_t addr;
1294 target_ulong pd;
1295 ram_addr_t ram_addr;
1296 PhysPageDesc *p;
1298 addr = cpu_get_phys_page_debug(env, pc);
1299 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1300 if (!p) {
1301 pd = IO_MEM_UNASSIGNED;
1302 } else {
1303 pd = p->phys_offset;
1305 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1306 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1308 #endif
1310 /* Add a watchpoint. */
1311 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1312 int flags, CPUWatchpoint **watchpoint)
1314 target_ulong len_mask = ~(len - 1);
1315 CPUWatchpoint *wp;
1317 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1318 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1319 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1320 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1321 return -EINVAL;
1323 wp = qemu_malloc(sizeof(*wp));
1324 if (!wp)
1325 return -ENOMEM;
1327 wp->vaddr = addr;
1328 wp->len_mask = len_mask;
1329 wp->flags = flags;
1331 /* keep all GDB-injected watchpoints in front */
1332 if (flags & BP_GDB)
1333 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1334 else
1335 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1337 tlb_flush_page(env, addr);
1339 if (watchpoint)
1340 *watchpoint = wp;
1341 return 0;
1344 /* Remove a specific watchpoint. */
1345 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1346 int flags)
1348 target_ulong len_mask = ~(len - 1);
1349 CPUWatchpoint *wp;
1351 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1352 if (addr == wp->vaddr && len_mask == wp->len_mask
1353 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1354 cpu_watchpoint_remove_by_ref(env, wp);
1355 return 0;
1358 return -ENOENT;
1361 /* Remove a specific watchpoint by reference. */
1362 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1364 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1366 tlb_flush_page(env, watchpoint->vaddr);
1368 qemu_free(watchpoint);
1371 /* Remove all matching watchpoints. */
1372 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1374 CPUWatchpoint *wp, *next;
1376 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1377 if (wp->flags & mask)
1378 cpu_watchpoint_remove_by_ref(env, wp);
1382 /* Add a breakpoint. */
1383 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1384 CPUBreakpoint **breakpoint)
1386 #if defined(TARGET_HAS_ICE)
1387 CPUBreakpoint *bp;
1389 bp = qemu_malloc(sizeof(*bp));
1390 if (!bp)
1391 return -ENOMEM;
1393 bp->pc = pc;
1394 bp->flags = flags;
1396 /* keep all GDB-injected breakpoints in front */
1397 if (flags & BP_GDB)
1398 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1399 else
1400 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1402 breakpoint_invalidate(env, pc);
1404 if (breakpoint)
1405 *breakpoint = bp;
1406 return 0;
1407 #else
1408 return -ENOSYS;
1409 #endif
1412 /* Remove a specific breakpoint. */
1413 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1415 #if defined(TARGET_HAS_ICE)
1416 CPUBreakpoint *bp;
1418 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1419 if (bp->pc == pc && bp->flags == flags) {
1420 cpu_breakpoint_remove_by_ref(env, bp);
1421 return 0;
1424 return -ENOENT;
1425 #else
1426 return -ENOSYS;
1427 #endif
1430 /* Remove a specific breakpoint by reference. */
1431 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1433 #if defined(TARGET_HAS_ICE)
1434 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1436 breakpoint_invalidate(env, breakpoint->pc);
1438 qemu_free(breakpoint);
1439 #endif
1442 /* Remove all matching breakpoints. */
1443 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1445 #if defined(TARGET_HAS_ICE)
1446 CPUBreakpoint *bp, *next;
1448 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1449 if (bp->flags & mask)
1450 cpu_breakpoint_remove_by_ref(env, bp);
1452 #endif
1455 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1456 CPU loop after each instruction */
1457 void cpu_single_step(CPUState *env, int enabled)
1459 #if defined(TARGET_HAS_ICE)
1460 if (env->singlestep_enabled != enabled) {
1461 env->singlestep_enabled = enabled;
1462 /* must flush all the translated code to avoid inconsistancies */
1463 /* XXX: only flush what is necessary */
1464 tb_flush(env);
1466 #endif
1469 /* enable or disable low levels log */
1470 void cpu_set_log(int log_flags)
1472 loglevel = log_flags;
1473 if (loglevel && !logfile) {
1474 logfile = fopen(logfilename, log_append ? "a" : "w");
1475 if (!logfile) {
1476 perror(logfilename);
1477 _exit(1);
1479 #if !defined(CONFIG_SOFTMMU)
1480 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1482 static char logfile_buf[4096];
1483 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1485 #else
1486 setvbuf(logfile, NULL, _IOLBF, 0);
1487 #endif
1488 log_append = 1;
1490 if (!loglevel && logfile) {
1491 fclose(logfile);
1492 logfile = NULL;
1496 void cpu_set_log_filename(const char *filename)
1498 logfilename = strdup(filename);
1499 if (logfile) {
1500 fclose(logfile);
1501 logfile = NULL;
1503 cpu_set_log(loglevel);
1506 /* mask must never be zero, except for A20 change call */
1507 void cpu_interrupt(CPUState *env, int mask)
1509 #if !defined(USE_NPTL)
1510 TranslationBlock *tb;
1511 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1512 #endif
1513 int old_mask;
1515 old_mask = env->interrupt_request;
1516 /* FIXME: This is probably not threadsafe. A different thread could
1517 be in the middle of a read-modify-write operation. */
1518 env->interrupt_request |= mask;
1519 #if defined(USE_NPTL)
1520 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1521 problem and hope the cpu will stop of its own accord. For userspace
1522 emulation this often isn't actually as bad as it sounds. Often
1523 signals are used primarily to interrupt blocking syscalls. */
1524 #else
1525 if (use_icount) {
1526 env->icount_decr.u16.high = 0xffff;
1527 #ifndef CONFIG_USER_ONLY
1528 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1529 an async event happened and we need to process it. */
1530 if (!can_do_io(env)
1531 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1532 cpu_abort(env, "Raised interrupt while not in I/O function");
1534 #endif
1535 } else {
1536 tb = env->current_tb;
1537 /* if the cpu is currently executing code, we must unlink it and
1538 all the potentially executing TB */
1539 if (tb && !testandset(&interrupt_lock)) {
1540 env->current_tb = NULL;
1541 tb_reset_jump_recursive(tb);
1542 resetlock(&interrupt_lock);
1545 #endif
1548 void cpu_reset_interrupt(CPUState *env, int mask)
1550 env->interrupt_request &= ~mask;
1553 const CPULogItem cpu_log_items[] = {
1554 { CPU_LOG_TB_OUT_ASM, "out_asm",
1555 "show generated host assembly code for each compiled TB" },
1556 { CPU_LOG_TB_IN_ASM, "in_asm",
1557 "show target assembly code for each compiled TB" },
1558 { CPU_LOG_TB_OP, "op",
1559 "show micro ops for each compiled TB" },
1560 { CPU_LOG_TB_OP_OPT, "op_opt",
1561 "show micro ops "
1562 #ifdef TARGET_I386
1563 "before eflags optimization and "
1564 #endif
1565 "after liveness analysis" },
1566 { CPU_LOG_INT, "int",
1567 "show interrupts/exceptions in short format" },
1568 { CPU_LOG_EXEC, "exec",
1569 "show trace before each executed TB (lots of logs)" },
1570 { CPU_LOG_TB_CPU, "cpu",
1571 "show CPU state before block translation" },
1572 #ifdef TARGET_I386
1573 { CPU_LOG_PCALL, "pcall",
1574 "show protected mode far calls/returns/exceptions" },
1575 #endif
1576 #ifdef DEBUG_IOPORT
1577 { CPU_LOG_IOPORT, "ioport",
1578 "show all i/o ports accesses" },
1579 #endif
1580 { 0, NULL, NULL },
1583 static int cmp1(const char *s1, int n, const char *s2)
1585 if (strlen(s2) != n)
1586 return 0;
1587 return memcmp(s1, s2, n) == 0;
1590 /* takes a comma separated list of log masks. Return 0 if error. */
1591 int cpu_str_to_log_mask(const char *str)
1593 const CPULogItem *item;
1594 int mask;
1595 const char *p, *p1;
1597 p = str;
1598 mask = 0;
1599 for(;;) {
1600 p1 = strchr(p, ',');
1601 if (!p1)
1602 p1 = p + strlen(p);
1603 if(cmp1(p,p1-p,"all")) {
1604 for(item = cpu_log_items; item->mask != 0; item++) {
1605 mask |= item->mask;
1607 } else {
1608 for(item = cpu_log_items; item->mask != 0; item++) {
1609 if (cmp1(p, p1 - p, item->name))
1610 goto found;
1612 return 0;
1614 found:
1615 mask |= item->mask;
1616 if (*p1 != ',')
1617 break;
1618 p = p1 + 1;
1620 return mask;
1623 void cpu_abort(CPUState *env, const char *fmt, ...)
1625 va_list ap;
1626 va_list ap2;
1628 va_start(ap, fmt);
1629 va_copy(ap2, ap);
1630 fprintf(stderr, "qemu: fatal: ");
1631 vfprintf(stderr, fmt, ap);
1632 fprintf(stderr, "\n");
1633 #ifdef TARGET_I386
1634 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1635 #else
1636 cpu_dump_state(env, stderr, fprintf, 0);
1637 #endif
1638 if (logfile) {
1639 fprintf(logfile, "qemu: fatal: ");
1640 vfprintf(logfile, fmt, ap2);
1641 fprintf(logfile, "\n");
1642 #ifdef TARGET_I386
1643 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1644 #else
1645 cpu_dump_state(env, logfile, fprintf, 0);
1646 #endif
1647 fflush(logfile);
1648 fclose(logfile);
1650 va_end(ap2);
1651 va_end(ap);
1652 abort();
1655 CPUState *cpu_copy(CPUState *env)
1657 CPUState *new_env = cpu_init(env->cpu_model_str);
1658 /* preserve chaining and index */
1659 CPUState *next_cpu = new_env->next_cpu;
1660 int cpu_index = new_env->cpu_index;
1661 memcpy(new_env, env, sizeof(CPUState));
1662 new_env->next_cpu = next_cpu;
1663 new_env->cpu_index = cpu_index;
1664 return new_env;
1667 #if !defined(CONFIG_USER_ONLY)
1669 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1671 unsigned int i;
1673 /* Discard jump cache entries for any tb which might potentially
1674 overlap the flushed page. */
1675 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1676 memset (&env->tb_jmp_cache[i], 0,
1677 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1679 i = tb_jmp_cache_hash_page(addr);
1680 memset (&env->tb_jmp_cache[i], 0,
1681 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1684 /* NOTE: if flush_global is true, also flush global entries (not
1685 implemented yet) */
1686 void tlb_flush(CPUState *env, int flush_global)
1688 int i;
1690 #if defined(DEBUG_TLB)
1691 printf("tlb_flush:\n");
1692 #endif
1693 /* must reset current TB so that interrupts cannot modify the
1694 links while we are modifying them */
1695 env->current_tb = NULL;
1697 for(i = 0; i < CPU_TLB_SIZE; i++) {
1698 env->tlb_table[0][i].addr_read = -1;
1699 env->tlb_table[0][i].addr_write = -1;
1700 env->tlb_table[0][i].addr_code = -1;
1701 env->tlb_table[1][i].addr_read = -1;
1702 env->tlb_table[1][i].addr_write = -1;
1703 env->tlb_table[1][i].addr_code = -1;
1704 #if (NB_MMU_MODES >= 3)
1705 env->tlb_table[2][i].addr_read = -1;
1706 env->tlb_table[2][i].addr_write = -1;
1707 env->tlb_table[2][i].addr_code = -1;
1708 #if (NB_MMU_MODES == 4)
1709 env->tlb_table[3][i].addr_read = -1;
1710 env->tlb_table[3][i].addr_write = -1;
1711 env->tlb_table[3][i].addr_code = -1;
1712 #endif
1713 #endif
1716 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1718 #ifdef USE_KQEMU
1719 if (env->kqemu_enabled) {
1720 kqemu_flush(env, flush_global);
1722 #endif
1723 tlb_flush_count++;
1726 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1728 if (addr == (tlb_entry->addr_read &
1729 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1730 addr == (tlb_entry->addr_write &
1731 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1732 addr == (tlb_entry->addr_code &
1733 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1734 tlb_entry->addr_read = -1;
1735 tlb_entry->addr_write = -1;
1736 tlb_entry->addr_code = -1;
1740 void tlb_flush_page(CPUState *env, target_ulong addr)
1742 int i;
1744 #if defined(DEBUG_TLB)
1745 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1746 #endif
1747 /* must reset current TB so that interrupts cannot modify the
1748 links while we are modifying them */
1749 env->current_tb = NULL;
1751 addr &= TARGET_PAGE_MASK;
1752 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1753 tlb_flush_entry(&env->tlb_table[0][i], addr);
1754 tlb_flush_entry(&env->tlb_table[1][i], addr);
1755 #if (NB_MMU_MODES >= 3)
1756 tlb_flush_entry(&env->tlb_table[2][i], addr);
1757 #if (NB_MMU_MODES == 4)
1758 tlb_flush_entry(&env->tlb_table[3][i], addr);
1759 #endif
1760 #endif
1762 tlb_flush_jmp_cache(env, addr);
1764 #ifdef USE_KQEMU
1765 if (env->kqemu_enabled) {
1766 kqemu_flush_page(env, addr);
1768 #endif
1771 /* update the TLBs so that writes to code in the virtual page 'addr'
1772 can be detected */
1773 static void tlb_protect_code(ram_addr_t ram_addr)
1775 cpu_physical_memory_reset_dirty(ram_addr,
1776 ram_addr + TARGET_PAGE_SIZE,
1777 CODE_DIRTY_FLAG);
1780 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1781 tested for self modifying code */
1782 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1783 target_ulong vaddr)
1785 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1788 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1789 unsigned long start, unsigned long length)
1791 unsigned long addr;
1792 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1793 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1794 if ((addr - start) < length) {
1795 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1800 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1801 int dirty_flags)
1803 CPUState *env;
1804 unsigned long length, start1;
1805 int i, mask, len;
1806 uint8_t *p;
1808 start &= TARGET_PAGE_MASK;
1809 end = TARGET_PAGE_ALIGN(end);
1811 length = end - start;
1812 if (length == 0)
1813 return;
1814 len = length >> TARGET_PAGE_BITS;
1815 #ifdef USE_KQEMU
1816 /* XXX: should not depend on cpu context */
1817 env = first_cpu;
1818 if (env->kqemu_enabled) {
1819 ram_addr_t addr;
1820 addr = start;
1821 for(i = 0; i < len; i++) {
1822 kqemu_set_notdirty(env, addr);
1823 addr += TARGET_PAGE_SIZE;
1826 #endif
1827 mask = ~dirty_flags;
1828 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1829 for(i = 0; i < len; i++)
1830 p[i] &= mask;
1832 /* we modify the TLB cache so that the dirty bit will be set again
1833 when accessing the range */
1834 start1 = start + (unsigned long)phys_ram_base;
1835 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1836 for(i = 0; i < CPU_TLB_SIZE; i++)
1837 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1838 for(i = 0; i < CPU_TLB_SIZE; i++)
1839 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1840 #if (NB_MMU_MODES >= 3)
1841 for(i = 0; i < CPU_TLB_SIZE; i++)
1842 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1843 #if (NB_MMU_MODES == 4)
1844 for(i = 0; i < CPU_TLB_SIZE; i++)
1845 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1846 #endif
1847 #endif
1851 int cpu_physical_memory_set_dirty_tracking(int enable)
1853 in_migration = enable;
1854 return 0;
1857 int cpu_physical_memory_get_dirty_tracking(void)
1859 return in_migration;
1862 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1864 if (kvm_enabled())
1865 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1868 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1870 ram_addr_t ram_addr;
1872 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1873 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1874 tlb_entry->addend - (unsigned long)phys_ram_base;
1875 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1876 tlb_entry->addr_write |= TLB_NOTDIRTY;
1881 /* update the TLB according to the current state of the dirty bits */
1882 void cpu_tlb_update_dirty(CPUState *env)
1884 int i;
1885 for(i = 0; i < CPU_TLB_SIZE; i++)
1886 tlb_update_dirty(&env->tlb_table[0][i]);
1887 for(i = 0; i < CPU_TLB_SIZE; i++)
1888 tlb_update_dirty(&env->tlb_table[1][i]);
1889 #if (NB_MMU_MODES >= 3)
1890 for(i = 0; i < CPU_TLB_SIZE; i++)
1891 tlb_update_dirty(&env->tlb_table[2][i]);
1892 #if (NB_MMU_MODES == 4)
1893 for(i = 0; i < CPU_TLB_SIZE; i++)
1894 tlb_update_dirty(&env->tlb_table[3][i]);
1895 #endif
1896 #endif
1899 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1901 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1902 tlb_entry->addr_write = vaddr;
1905 /* update the TLB corresponding to virtual page vaddr
1906 so that it is no longer dirty */
1907 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1909 int i;
1911 vaddr &= TARGET_PAGE_MASK;
1912 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1913 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1914 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1915 #if (NB_MMU_MODES >= 3)
1916 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1917 #if (NB_MMU_MODES == 4)
1918 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1919 #endif
1920 #endif
1923 /* add a new TLB entry. At most one entry for a given virtual address
1924 is permitted. Return 0 if OK or 2 if the page could not be mapped
1925 (can only happen in non SOFTMMU mode for I/O pages or pages
1926 conflicting with the host address space). */
1927 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1928 target_phys_addr_t paddr, int prot,
1929 int mmu_idx, int is_softmmu)
1931 PhysPageDesc *p;
1932 unsigned long pd;
1933 unsigned int index;
1934 target_ulong address;
1935 target_ulong code_address;
1936 target_phys_addr_t addend;
1937 int ret;
1938 CPUTLBEntry *te;
1939 CPUWatchpoint *wp;
1940 target_phys_addr_t iotlb;
1942 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1943 if (!p) {
1944 pd = IO_MEM_UNASSIGNED;
1945 } else {
1946 pd = p->phys_offset;
1948 #if defined(DEBUG_TLB)
1949 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1950 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1951 #endif
1953 ret = 0;
1954 address = vaddr;
1955 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1956 /* IO memory case (romd handled later) */
1957 address |= TLB_MMIO;
1959 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1960 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1961 /* Normal RAM. */
1962 iotlb = pd & TARGET_PAGE_MASK;
1963 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1964 iotlb |= IO_MEM_NOTDIRTY;
1965 else
1966 iotlb |= IO_MEM_ROM;
1967 } else {
1968 /* IO handlers are currently passed a phsical address.
1969 It would be nice to pass an offset from the base address
1970 of that region. This would avoid having to special case RAM,
1971 and avoid full address decoding in every device.
1972 We can't use the high bits of pd for this because
1973 IO_MEM_ROMD uses these as a ram address. */
1974 iotlb = (pd & ~TARGET_PAGE_MASK);
1975 if (p) {
1976 iotlb += p->region_offset;
1977 } else {
1978 iotlb += paddr;
1982 code_address = address;
1983 /* Make accesses to pages with watchpoints go via the
1984 watchpoint trap routines. */
1985 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1986 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1987 iotlb = io_mem_watch + paddr;
1988 /* TODO: The memory case can be optimized by not trapping
1989 reads of pages with a write breakpoint. */
1990 address |= TLB_MMIO;
1994 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1995 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1996 te = &env->tlb_table[mmu_idx][index];
1997 te->addend = addend - vaddr;
1998 if (prot & PAGE_READ) {
1999 te->addr_read = address;
2000 } else {
2001 te->addr_read = -1;
2004 if (prot & PAGE_EXEC) {
2005 te->addr_code = code_address;
2006 } else {
2007 te->addr_code = -1;
2009 if (prot & PAGE_WRITE) {
2010 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2011 (pd & IO_MEM_ROMD)) {
2012 /* Write access calls the I/O callback. */
2013 te->addr_write = address | TLB_MMIO;
2014 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2015 !cpu_physical_memory_is_dirty(pd)) {
2016 te->addr_write = address | TLB_NOTDIRTY;
2017 } else {
2018 te->addr_write = address;
2020 } else {
2021 te->addr_write = -1;
2023 return ret;
2026 #else
2028 void tlb_flush(CPUState *env, int flush_global)
2032 void tlb_flush_page(CPUState *env, target_ulong addr)
2036 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2037 target_phys_addr_t paddr, int prot,
2038 int mmu_idx, int is_softmmu)
2040 return 0;
2043 /* dump memory mappings */
2044 void page_dump(FILE *f)
2046 unsigned long start, end;
2047 int i, j, prot, prot1;
2048 PageDesc *p;
2050 fprintf(f, "%-8s %-8s %-8s %s\n",
2051 "start", "end", "size", "prot");
2052 start = -1;
2053 end = -1;
2054 prot = 0;
2055 for(i = 0; i <= L1_SIZE; i++) {
2056 if (i < L1_SIZE)
2057 p = l1_map[i];
2058 else
2059 p = NULL;
2060 for(j = 0;j < L2_SIZE; j++) {
2061 if (!p)
2062 prot1 = 0;
2063 else
2064 prot1 = p[j].flags;
2065 if (prot1 != prot) {
2066 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2067 if (start != -1) {
2068 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2069 start, end, end - start,
2070 prot & PAGE_READ ? 'r' : '-',
2071 prot & PAGE_WRITE ? 'w' : '-',
2072 prot & PAGE_EXEC ? 'x' : '-');
2074 if (prot1 != 0)
2075 start = end;
2076 else
2077 start = -1;
2078 prot = prot1;
2080 if (!p)
2081 break;
2086 int page_get_flags(target_ulong address)
2088 PageDesc *p;
2090 p = page_find(address >> TARGET_PAGE_BITS);
2091 if (!p)
2092 return 0;
2093 return p->flags;
2096 /* modify the flags of a page and invalidate the code if
2097 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2098 depending on PAGE_WRITE */
2099 void page_set_flags(target_ulong start, target_ulong end, int flags)
2101 PageDesc *p;
2102 target_ulong addr;
2104 /* mmap_lock should already be held. */
2105 start = start & TARGET_PAGE_MASK;
2106 end = TARGET_PAGE_ALIGN(end);
2107 if (flags & PAGE_WRITE)
2108 flags |= PAGE_WRITE_ORG;
2109 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2110 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2111 /* We may be called for host regions that are outside guest
2112 address space. */
2113 if (!p)
2114 return;
2115 /* if the write protection is set, then we invalidate the code
2116 inside */
2117 if (!(p->flags & PAGE_WRITE) &&
2118 (flags & PAGE_WRITE) &&
2119 p->first_tb) {
2120 tb_invalidate_phys_page(addr, 0, NULL);
2122 p->flags = flags;
2126 int page_check_range(target_ulong start, target_ulong len, int flags)
2128 PageDesc *p;
2129 target_ulong end;
2130 target_ulong addr;
2132 if (start + len < start)
2133 /* we've wrapped around */
2134 return -1;
2136 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2137 start = start & TARGET_PAGE_MASK;
2139 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2140 p = page_find(addr >> TARGET_PAGE_BITS);
2141 if( !p )
2142 return -1;
2143 if( !(p->flags & PAGE_VALID) )
2144 return -1;
2146 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2147 return -1;
2148 if (flags & PAGE_WRITE) {
2149 if (!(p->flags & PAGE_WRITE_ORG))
2150 return -1;
2151 /* unprotect the page if it was put read-only because it
2152 contains translated code */
2153 if (!(p->flags & PAGE_WRITE)) {
2154 if (!page_unprotect(addr, 0, NULL))
2155 return -1;
2157 return 0;
2160 return 0;
2163 /* called from signal handler: invalidate the code and unprotect the
2164 page. Return TRUE if the fault was succesfully handled. */
2165 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2167 unsigned int page_index, prot, pindex;
2168 PageDesc *p, *p1;
2169 target_ulong host_start, host_end, addr;
2171 /* Technically this isn't safe inside a signal handler. However we
2172 know this only ever happens in a synchronous SEGV handler, so in
2173 practice it seems to be ok. */
2174 mmap_lock();
2176 host_start = address & qemu_host_page_mask;
2177 page_index = host_start >> TARGET_PAGE_BITS;
2178 p1 = page_find(page_index);
2179 if (!p1) {
2180 mmap_unlock();
2181 return 0;
2183 host_end = host_start + qemu_host_page_size;
2184 p = p1;
2185 prot = 0;
2186 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2187 prot |= p->flags;
2188 p++;
2190 /* if the page was really writable, then we change its
2191 protection back to writable */
2192 if (prot & PAGE_WRITE_ORG) {
2193 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2194 if (!(p1[pindex].flags & PAGE_WRITE)) {
2195 mprotect((void *)g2h(host_start), qemu_host_page_size,
2196 (prot & PAGE_BITS) | PAGE_WRITE);
2197 p1[pindex].flags |= PAGE_WRITE;
2198 /* and since the content will be modified, we must invalidate
2199 the corresponding translated code. */
2200 tb_invalidate_phys_page(address, pc, puc);
2201 #ifdef DEBUG_TB_CHECK
2202 tb_invalidate_check(address);
2203 #endif
2204 mmap_unlock();
2205 return 1;
2208 mmap_unlock();
2209 return 0;
2212 static inline void tlb_set_dirty(CPUState *env,
2213 unsigned long addr, target_ulong vaddr)
2216 #endif /* defined(CONFIG_USER_ONLY) */
2218 #if !defined(CONFIG_USER_ONLY)
2220 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2221 ram_addr_t memory, ram_addr_t region_offset);
2222 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2223 ram_addr_t orig_memory, ram_addr_t region_offset);
2224 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2225 need_subpage) \
2226 do { \
2227 if (addr > start_addr) \
2228 start_addr2 = 0; \
2229 else { \
2230 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2231 if (start_addr2 > 0) \
2232 need_subpage = 1; \
2235 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2236 end_addr2 = TARGET_PAGE_SIZE - 1; \
2237 else { \
2238 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2239 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2240 need_subpage = 1; \
2242 } while (0)
2244 /* register physical memory. 'size' must be a multiple of the target
2245 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2246 io memory page. The address used when calling the IO function is
2247 the offset from the start of the region, plus region_offset. Both
2248 start_region and regon_offset are rounded down to a page boundary
2249 before calculating this offset. This should not be a problem unless
2250 the low bits of start_addr and region_offset differ. */
2251 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2252 ram_addr_t size,
2253 ram_addr_t phys_offset,
2254 ram_addr_t region_offset)
2256 target_phys_addr_t addr, end_addr;
2257 PhysPageDesc *p;
2258 CPUState *env;
2259 ram_addr_t orig_size = size;
2260 void *subpage;
2262 #ifdef USE_KQEMU
2263 /* XXX: should not depend on cpu context */
2264 env = first_cpu;
2265 if (env->kqemu_enabled) {
2266 kqemu_set_phys_mem(start_addr, size, phys_offset);
2268 #endif
2269 if (kvm_enabled())
2270 kvm_set_phys_mem(start_addr, size, phys_offset);
2272 region_offset &= TARGET_PAGE_MASK;
2273 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2274 end_addr = start_addr + (target_phys_addr_t)size;
2275 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2276 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2277 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2278 ram_addr_t orig_memory = p->phys_offset;
2279 target_phys_addr_t start_addr2, end_addr2;
2280 int need_subpage = 0;
2282 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2283 need_subpage);
2284 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2285 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2286 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2287 &p->phys_offset, orig_memory,
2288 p->region_offset);
2289 } else {
2290 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2291 >> IO_MEM_SHIFT];
2293 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2294 region_offset);
2295 p->region_offset = 0;
2296 } else {
2297 p->phys_offset = phys_offset;
2298 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2299 (phys_offset & IO_MEM_ROMD))
2300 phys_offset += TARGET_PAGE_SIZE;
2302 } else {
2303 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2304 p->phys_offset = phys_offset;
2305 p->region_offset = region_offset;
2306 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2307 (phys_offset & IO_MEM_ROMD)) {
2308 phys_offset += TARGET_PAGE_SIZE;
2309 } else {
2310 target_phys_addr_t start_addr2, end_addr2;
2311 int need_subpage = 0;
2313 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2314 end_addr2, need_subpage);
2316 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2317 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2318 &p->phys_offset, IO_MEM_UNASSIGNED,
2320 subpage_register(subpage, start_addr2, end_addr2,
2321 phys_offset, region_offset);
2322 p->region_offset = 0;
2326 region_offset += TARGET_PAGE_SIZE;
2329 /* since each CPU stores ram addresses in its TLB cache, we must
2330 reset the modified entries */
2331 /* XXX: slow ! */
2332 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2333 tlb_flush(env, 1);
2337 /* XXX: temporary until new memory mapping API */
2338 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2340 PhysPageDesc *p;
2342 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2343 if (!p)
2344 return IO_MEM_UNASSIGNED;
2345 return p->phys_offset;
2348 /* XXX: better than nothing */
2349 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2351 ram_addr_t addr;
2352 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2353 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2354 (uint64_t)size, (uint64_t)phys_ram_size);
2355 abort();
2357 addr = phys_ram_alloc_offset;
2358 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2359 return addr;
2362 void qemu_ram_free(ram_addr_t addr)
2366 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2368 #ifdef DEBUG_UNASSIGNED
2369 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2370 #endif
2371 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2372 do_unassigned_access(addr, 0, 0, 0, 1);
2373 #endif
2374 return 0;
2377 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2379 #ifdef DEBUG_UNASSIGNED
2380 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2381 #endif
2382 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2383 do_unassigned_access(addr, 0, 0, 0, 2);
2384 #endif
2385 return 0;
2388 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2390 #ifdef DEBUG_UNASSIGNED
2391 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2392 #endif
2393 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2394 do_unassigned_access(addr, 0, 0, 0, 4);
2395 #endif
2396 return 0;
2399 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2401 #ifdef DEBUG_UNASSIGNED
2402 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2403 #endif
2404 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2405 do_unassigned_access(addr, 1, 0, 0, 1);
2406 #endif
2409 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2411 #ifdef DEBUG_UNASSIGNED
2412 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2413 #endif
2414 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2415 do_unassigned_access(addr, 1, 0, 0, 2);
2416 #endif
2419 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2421 #ifdef DEBUG_UNASSIGNED
2422 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2423 #endif
2424 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2425 do_unassigned_access(addr, 1, 0, 0, 4);
2426 #endif
2429 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2430 unassigned_mem_readb,
2431 unassigned_mem_readw,
2432 unassigned_mem_readl,
2435 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2436 unassigned_mem_writeb,
2437 unassigned_mem_writew,
2438 unassigned_mem_writel,
2441 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2442 uint32_t val)
2444 int dirty_flags;
2445 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2446 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2447 #if !defined(CONFIG_USER_ONLY)
2448 tb_invalidate_phys_page_fast(ram_addr, 1);
2449 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2450 #endif
2452 stb_p(phys_ram_base + ram_addr, val);
2453 #ifdef USE_KQEMU
2454 if (cpu_single_env->kqemu_enabled &&
2455 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2456 kqemu_modify_page(cpu_single_env, ram_addr);
2457 #endif
2458 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2459 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2460 /* we remove the notdirty callback only if the code has been
2461 flushed */
2462 if (dirty_flags == 0xff)
2463 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2466 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2467 uint32_t val)
2469 int dirty_flags;
2470 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2471 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2472 #if !defined(CONFIG_USER_ONLY)
2473 tb_invalidate_phys_page_fast(ram_addr, 2);
2474 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2475 #endif
2477 stw_p(phys_ram_base + ram_addr, val);
2478 #ifdef USE_KQEMU
2479 if (cpu_single_env->kqemu_enabled &&
2480 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2481 kqemu_modify_page(cpu_single_env, ram_addr);
2482 #endif
2483 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2484 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2485 /* we remove the notdirty callback only if the code has been
2486 flushed */
2487 if (dirty_flags == 0xff)
2488 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2491 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2492 uint32_t val)
2494 int dirty_flags;
2495 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2496 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2497 #if !defined(CONFIG_USER_ONLY)
2498 tb_invalidate_phys_page_fast(ram_addr, 4);
2499 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2500 #endif
2502 stl_p(phys_ram_base + ram_addr, val);
2503 #ifdef USE_KQEMU
2504 if (cpu_single_env->kqemu_enabled &&
2505 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2506 kqemu_modify_page(cpu_single_env, ram_addr);
2507 #endif
2508 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2509 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2510 /* we remove the notdirty callback only if the code has been
2511 flushed */
2512 if (dirty_flags == 0xff)
2513 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2516 static CPUReadMemoryFunc *error_mem_read[3] = {
2517 NULL, /* never used */
2518 NULL, /* never used */
2519 NULL, /* never used */
2522 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2523 notdirty_mem_writeb,
2524 notdirty_mem_writew,
2525 notdirty_mem_writel,
2528 /* Generate a debug exception if a watchpoint has been hit. */
2529 static void check_watchpoint(int offset, int len_mask, int flags)
2531 CPUState *env = cpu_single_env;
2532 target_ulong pc, cs_base;
2533 TranslationBlock *tb;
2534 target_ulong vaddr;
2535 CPUWatchpoint *wp;
2536 int cpu_flags;
2538 if (env->watchpoint_hit) {
2539 /* We re-entered the check after replacing the TB. Now raise
2540 * the debug interrupt so that is will trigger after the
2541 * current instruction. */
2542 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2543 return;
2545 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2546 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2547 if ((vaddr == (wp->vaddr & len_mask) ||
2548 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2549 wp->flags |= BP_WATCHPOINT_HIT;
2550 if (!env->watchpoint_hit) {
2551 env->watchpoint_hit = wp;
2552 tb = tb_find_pc(env->mem_io_pc);
2553 if (!tb) {
2554 cpu_abort(env, "check_watchpoint: could not find TB for "
2555 "pc=%p", (void *)env->mem_io_pc);
2557 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2558 tb_phys_invalidate(tb, -1);
2559 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2560 env->exception_index = EXCP_DEBUG;
2561 } else {
2562 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2563 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2565 cpu_resume_from_signal(env, NULL);
2567 } else {
2568 wp->flags &= ~BP_WATCHPOINT_HIT;
2573 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2574 so these check for a hit then pass through to the normal out-of-line
2575 phys routines. */
2576 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2578 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2579 return ldub_phys(addr);
2582 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2584 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2585 return lduw_phys(addr);
2588 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2590 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2591 return ldl_phys(addr);
2594 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2595 uint32_t val)
2597 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2598 stb_phys(addr, val);
2601 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2602 uint32_t val)
2604 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2605 stw_phys(addr, val);
2608 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2609 uint32_t val)
2611 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2612 stl_phys(addr, val);
2615 static CPUReadMemoryFunc *watch_mem_read[3] = {
2616 watch_mem_readb,
2617 watch_mem_readw,
2618 watch_mem_readl,
2621 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2622 watch_mem_writeb,
2623 watch_mem_writew,
2624 watch_mem_writel,
2627 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2628 unsigned int len)
2630 uint32_t ret;
2631 unsigned int idx;
2633 idx = SUBPAGE_IDX(addr);
2634 #if defined(DEBUG_SUBPAGE)
2635 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2636 mmio, len, addr, idx);
2637 #endif
2638 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2639 addr + mmio->region_offset[idx][0][len]);
2641 return ret;
2644 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2645 uint32_t value, unsigned int len)
2647 unsigned int idx;
2649 idx = SUBPAGE_IDX(addr);
2650 #if defined(DEBUG_SUBPAGE)
2651 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2652 mmio, len, addr, idx, value);
2653 #endif
2654 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2655 addr + mmio->region_offset[idx][1][len],
2656 value);
2659 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2661 #if defined(DEBUG_SUBPAGE)
2662 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2663 #endif
2665 return subpage_readlen(opaque, addr, 0);
2668 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2669 uint32_t value)
2671 #if defined(DEBUG_SUBPAGE)
2672 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2673 #endif
2674 subpage_writelen(opaque, addr, value, 0);
2677 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2679 #if defined(DEBUG_SUBPAGE)
2680 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2681 #endif
2683 return subpage_readlen(opaque, addr, 1);
2686 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2687 uint32_t value)
2689 #if defined(DEBUG_SUBPAGE)
2690 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2691 #endif
2692 subpage_writelen(opaque, addr, value, 1);
2695 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2697 #if defined(DEBUG_SUBPAGE)
2698 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2699 #endif
2701 return subpage_readlen(opaque, addr, 2);
2704 static void subpage_writel (void *opaque,
2705 target_phys_addr_t addr, uint32_t value)
2707 #if defined(DEBUG_SUBPAGE)
2708 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2709 #endif
2710 subpage_writelen(opaque, addr, value, 2);
2713 static CPUReadMemoryFunc *subpage_read[] = {
2714 &subpage_readb,
2715 &subpage_readw,
2716 &subpage_readl,
2719 static CPUWriteMemoryFunc *subpage_write[] = {
2720 &subpage_writeb,
2721 &subpage_writew,
2722 &subpage_writel,
2725 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2726 ram_addr_t memory, ram_addr_t region_offset)
2728 int idx, eidx;
2729 unsigned int i;
2731 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2732 return -1;
2733 idx = SUBPAGE_IDX(start);
2734 eidx = SUBPAGE_IDX(end);
2735 #if defined(DEBUG_SUBPAGE)
2736 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2737 mmio, start, end, idx, eidx, memory);
2738 #endif
2739 memory >>= IO_MEM_SHIFT;
2740 for (; idx <= eidx; idx++) {
2741 for (i = 0; i < 4; i++) {
2742 if (io_mem_read[memory][i]) {
2743 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2744 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2745 mmio->region_offset[idx][0][i] = region_offset;
2747 if (io_mem_write[memory][i]) {
2748 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2749 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2750 mmio->region_offset[idx][1][i] = region_offset;
2755 return 0;
2758 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2759 ram_addr_t orig_memory, ram_addr_t region_offset)
2761 subpage_t *mmio;
2762 int subpage_memory;
2764 mmio = qemu_mallocz(sizeof(subpage_t));
2765 if (mmio != NULL) {
2766 mmio->base = base;
2767 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2768 #if defined(DEBUG_SUBPAGE)
2769 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2770 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2771 #endif
2772 *phys = subpage_memory | IO_MEM_SUBPAGE;
2773 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2774 region_offset);
2777 return mmio;
2780 static void io_mem_init(void)
2782 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2783 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2784 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2785 io_mem_nb = 5;
2787 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2788 watch_mem_write, NULL);
2789 /* alloc dirty bits array */
2790 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2791 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2794 /* mem_read and mem_write are arrays of functions containing the
2795 function to access byte (index 0), word (index 1) and dword (index
2796 2). Functions can be omitted with a NULL function pointer. The
2797 registered functions may be modified dynamically later.
2798 If io_index is non zero, the corresponding io zone is
2799 modified. If it is zero, a new io zone is allocated. The return
2800 value can be used with cpu_register_physical_memory(). (-1) is
2801 returned if error. */
2802 int cpu_register_io_memory(int io_index,
2803 CPUReadMemoryFunc **mem_read,
2804 CPUWriteMemoryFunc **mem_write,
2805 void *opaque)
2807 int i, subwidth = 0;
2809 if (io_index <= 0) {
2810 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2811 return -1;
2812 io_index = io_mem_nb++;
2813 } else {
2814 if (io_index >= IO_MEM_NB_ENTRIES)
2815 return -1;
2818 for(i = 0;i < 3; i++) {
2819 if (!mem_read[i] || !mem_write[i])
2820 subwidth = IO_MEM_SUBWIDTH;
2821 io_mem_read[io_index][i] = mem_read[i];
2822 io_mem_write[io_index][i] = mem_write[i];
2824 io_mem_opaque[io_index] = opaque;
2825 return (io_index << IO_MEM_SHIFT) | subwidth;
2828 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2830 return io_mem_write[io_index >> IO_MEM_SHIFT];
2833 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2835 return io_mem_read[io_index >> IO_MEM_SHIFT];
2838 #endif /* !defined(CONFIG_USER_ONLY) */
2840 /* physical memory access (slow version, mainly for debug) */
2841 #if defined(CONFIG_USER_ONLY)
2842 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2843 int len, int is_write)
2845 int l, flags;
2846 target_ulong page;
2847 void * p;
2849 while (len > 0) {
2850 page = addr & TARGET_PAGE_MASK;
2851 l = (page + TARGET_PAGE_SIZE) - addr;
2852 if (l > len)
2853 l = len;
2854 flags = page_get_flags(page);
2855 if (!(flags & PAGE_VALID))
2856 return;
2857 if (is_write) {
2858 if (!(flags & PAGE_WRITE))
2859 return;
2860 /* XXX: this code should not depend on lock_user */
2861 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2862 /* FIXME - should this return an error rather than just fail? */
2863 return;
2864 memcpy(p, buf, l);
2865 unlock_user(p, addr, l);
2866 } else {
2867 if (!(flags & PAGE_READ))
2868 return;
2869 /* XXX: this code should not depend on lock_user */
2870 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2871 /* FIXME - should this return an error rather than just fail? */
2872 return;
2873 memcpy(buf, p, l);
2874 unlock_user(p, addr, 0);
2876 len -= l;
2877 buf += l;
2878 addr += l;
2882 #else
2883 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2884 int len, int is_write)
2886 int l, io_index;
2887 uint8_t *ptr;
2888 uint32_t val;
2889 target_phys_addr_t page;
2890 unsigned long pd;
2891 PhysPageDesc *p;
2893 while (len > 0) {
2894 page = addr & TARGET_PAGE_MASK;
2895 l = (page + TARGET_PAGE_SIZE) - addr;
2896 if (l > len)
2897 l = len;
2898 p = phys_page_find(page >> TARGET_PAGE_BITS);
2899 if (!p) {
2900 pd = IO_MEM_UNASSIGNED;
2901 } else {
2902 pd = p->phys_offset;
2905 if (is_write) {
2906 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2907 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2908 if (p)
2909 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2910 /* XXX: could force cpu_single_env to NULL to avoid
2911 potential bugs */
2912 if (l >= 4 && ((addr & 3) == 0)) {
2913 /* 32 bit write access */
2914 val = ldl_p(buf);
2915 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2916 l = 4;
2917 } else if (l >= 2 && ((addr & 1) == 0)) {
2918 /* 16 bit write access */
2919 val = lduw_p(buf);
2920 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2921 l = 2;
2922 } else {
2923 /* 8 bit write access */
2924 val = ldub_p(buf);
2925 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2926 l = 1;
2928 } else {
2929 unsigned long addr1;
2930 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2931 /* RAM case */
2932 ptr = phys_ram_base + addr1;
2933 memcpy(ptr, buf, l);
2934 if (!cpu_physical_memory_is_dirty(addr1)) {
2935 /* invalidate code */
2936 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2937 /* set dirty bit */
2938 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2939 (0xff & ~CODE_DIRTY_FLAG);
2942 } else {
2943 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2944 !(pd & IO_MEM_ROMD)) {
2945 /* I/O case */
2946 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2947 if (p)
2948 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2949 if (l >= 4 && ((addr & 3) == 0)) {
2950 /* 32 bit read access */
2951 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2952 stl_p(buf, val);
2953 l = 4;
2954 } else if (l >= 2 && ((addr & 1) == 0)) {
2955 /* 16 bit read access */
2956 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2957 stw_p(buf, val);
2958 l = 2;
2959 } else {
2960 /* 8 bit read access */
2961 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2962 stb_p(buf, val);
2963 l = 1;
2965 } else {
2966 /* RAM case */
2967 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2968 (addr & ~TARGET_PAGE_MASK);
2969 memcpy(buf, ptr, l);
2972 len -= l;
2973 buf += l;
2974 addr += l;
2978 /* used for ROM loading : can write in RAM and ROM */
2979 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2980 const uint8_t *buf, int len)
2982 int l;
2983 uint8_t *ptr;
2984 target_phys_addr_t page;
2985 unsigned long pd;
2986 PhysPageDesc *p;
2988 while (len > 0) {
2989 page = addr & TARGET_PAGE_MASK;
2990 l = (page + TARGET_PAGE_SIZE) - addr;
2991 if (l > len)
2992 l = len;
2993 p = phys_page_find(page >> TARGET_PAGE_BITS);
2994 if (!p) {
2995 pd = IO_MEM_UNASSIGNED;
2996 } else {
2997 pd = p->phys_offset;
3000 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3001 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3002 !(pd & IO_MEM_ROMD)) {
3003 /* do nothing */
3004 } else {
3005 unsigned long addr1;
3006 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3007 /* ROM/RAM case */
3008 ptr = phys_ram_base + addr1;
3009 memcpy(ptr, buf, l);
3011 len -= l;
3012 buf += l;
3013 addr += l;
3018 /* warning: addr must be aligned */
3019 uint32_t ldl_phys(target_phys_addr_t addr)
3021 int io_index;
3022 uint8_t *ptr;
3023 uint32_t val;
3024 unsigned long pd;
3025 PhysPageDesc *p;
3027 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3028 if (!p) {
3029 pd = IO_MEM_UNASSIGNED;
3030 } else {
3031 pd = p->phys_offset;
3034 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3035 !(pd & IO_MEM_ROMD)) {
3036 /* I/O case */
3037 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3038 if (p)
3039 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3040 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3041 } else {
3042 /* RAM case */
3043 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3044 (addr & ~TARGET_PAGE_MASK);
3045 val = ldl_p(ptr);
3047 return val;
3050 /* warning: addr must be aligned */
3051 uint64_t ldq_phys(target_phys_addr_t addr)
3053 int io_index;
3054 uint8_t *ptr;
3055 uint64_t val;
3056 unsigned long pd;
3057 PhysPageDesc *p;
3059 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3060 if (!p) {
3061 pd = IO_MEM_UNASSIGNED;
3062 } else {
3063 pd = p->phys_offset;
3066 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3067 !(pd & IO_MEM_ROMD)) {
3068 /* I/O case */
3069 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3070 if (p)
3071 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3072 #ifdef TARGET_WORDS_BIGENDIAN
3073 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3074 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3075 #else
3076 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3077 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3078 #endif
3079 } else {
3080 /* RAM case */
3081 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3082 (addr & ~TARGET_PAGE_MASK);
3083 val = ldq_p(ptr);
3085 return val;
3088 /* XXX: optimize */
3089 uint32_t ldub_phys(target_phys_addr_t addr)
3091 uint8_t val;
3092 cpu_physical_memory_read(addr, &val, 1);
3093 return val;
3096 /* XXX: optimize */
3097 uint32_t lduw_phys(target_phys_addr_t addr)
3099 uint16_t val;
3100 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3101 return tswap16(val);
3104 /* warning: addr must be aligned. The ram page is not masked as dirty
3105 and the code inside is not invalidated. It is useful if the dirty
3106 bits are used to track modified PTEs */
3107 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3109 int io_index;
3110 uint8_t *ptr;
3111 unsigned long pd;
3112 PhysPageDesc *p;
3114 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3115 if (!p) {
3116 pd = IO_MEM_UNASSIGNED;
3117 } else {
3118 pd = p->phys_offset;
3121 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3122 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3123 if (p)
3124 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3125 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3126 } else {
3127 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3128 ptr = phys_ram_base + addr1;
3129 stl_p(ptr, val);
3131 if (unlikely(in_migration)) {
3132 if (!cpu_physical_memory_is_dirty(addr1)) {
3133 /* invalidate code */
3134 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3135 /* set dirty bit */
3136 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3137 (0xff & ~CODE_DIRTY_FLAG);
3143 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3145 int io_index;
3146 uint8_t *ptr;
3147 unsigned long pd;
3148 PhysPageDesc *p;
3150 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3151 if (!p) {
3152 pd = IO_MEM_UNASSIGNED;
3153 } else {
3154 pd = p->phys_offset;
3157 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3158 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3159 if (p)
3160 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3161 #ifdef TARGET_WORDS_BIGENDIAN
3162 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3163 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3164 #else
3165 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3166 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3167 #endif
3168 } else {
3169 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3170 (addr & ~TARGET_PAGE_MASK);
3171 stq_p(ptr, val);
3175 /* warning: addr must be aligned */
3176 void stl_phys(target_phys_addr_t addr, uint32_t val)
3178 int io_index;
3179 uint8_t *ptr;
3180 unsigned long pd;
3181 PhysPageDesc *p;
3183 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3184 if (!p) {
3185 pd = IO_MEM_UNASSIGNED;
3186 } else {
3187 pd = p->phys_offset;
3190 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3191 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3192 if (p)
3193 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3194 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3195 } else {
3196 unsigned long addr1;
3197 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3198 /* RAM case */
3199 ptr = phys_ram_base + addr1;
3200 stl_p(ptr, val);
3201 if (!cpu_physical_memory_is_dirty(addr1)) {
3202 /* invalidate code */
3203 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3204 /* set dirty bit */
3205 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3206 (0xff & ~CODE_DIRTY_FLAG);
3211 /* XXX: optimize */
3212 void stb_phys(target_phys_addr_t addr, uint32_t val)
3214 uint8_t v = val;
3215 cpu_physical_memory_write(addr, &v, 1);
3218 /* XXX: optimize */
3219 void stw_phys(target_phys_addr_t addr, uint32_t val)
3221 uint16_t v = tswap16(val);
3222 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3225 /* XXX: optimize */
3226 void stq_phys(target_phys_addr_t addr, uint64_t val)
3228 val = tswap64(val);
3229 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3232 #endif
3234 /* virtual memory access for debug */
3235 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3236 uint8_t *buf, int len, int is_write)
3238 int l;
3239 target_phys_addr_t phys_addr;
3240 target_ulong page;
3242 while (len > 0) {
3243 page = addr & TARGET_PAGE_MASK;
3244 phys_addr = cpu_get_phys_page_debug(env, page);
3245 /* if no physical page mapped, return an error */
3246 if (phys_addr == -1)
3247 return -1;
3248 l = (page + TARGET_PAGE_SIZE) - addr;
3249 if (l > len)
3250 l = len;
3251 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3252 buf, l, is_write);
3253 len -= l;
3254 buf += l;
3255 addr += l;
3257 return 0;
3260 /* in deterministic execution mode, instructions doing device I/Os
3261 must be at the end of the TB */
3262 void cpu_io_recompile(CPUState *env, void *retaddr)
3264 TranslationBlock *tb;
3265 uint32_t n, cflags;
3266 target_ulong pc, cs_base;
3267 uint64_t flags;
3269 tb = tb_find_pc((unsigned long)retaddr);
3270 if (!tb) {
3271 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3272 retaddr);
3274 n = env->icount_decr.u16.low + tb->icount;
3275 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3276 /* Calculate how many instructions had been executed before the fault
3277 occurred. */
3278 n = n - env->icount_decr.u16.low;
3279 /* Generate a new TB ending on the I/O insn. */
3280 n++;
3281 /* On MIPS and SH, delay slot instructions can only be restarted if
3282 they were already the first instruction in the TB. If this is not
3283 the first instruction in a TB then re-execute the preceding
3284 branch. */
3285 #if defined(TARGET_MIPS)
3286 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3287 env->active_tc.PC -= 4;
3288 env->icount_decr.u16.low++;
3289 env->hflags &= ~MIPS_HFLAG_BMASK;
3291 #elif defined(TARGET_SH4)
3292 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3293 && n > 1) {
3294 env->pc -= 2;
3295 env->icount_decr.u16.low++;
3296 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3298 #endif
3299 /* This should never happen. */
3300 if (n > CF_COUNT_MASK)
3301 cpu_abort(env, "TB too big during recompile");
3303 cflags = n | CF_LAST_IO;
3304 pc = tb->pc;
3305 cs_base = tb->cs_base;
3306 flags = tb->flags;
3307 tb_phys_invalidate(tb, -1);
3308 /* FIXME: In theory this could raise an exception. In practice
3309 we have already translated the block once so it's probably ok. */
3310 tb_gen_code(env, pc, cs_base, flags, cflags);
3311 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3312 the first in the TB) then we end up generating a whole new TB and
3313 repeating the fault, which is horribly inefficient.
3314 Better would be to execute just this insn uncached, or generate a
3315 second new TB. */
3316 cpu_resume_from_signal(env, NULL);
3319 void dump_exec_info(FILE *f,
3320 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3322 int i, target_code_size, max_target_code_size;
3323 int direct_jmp_count, direct_jmp2_count, cross_page;
3324 TranslationBlock *tb;
3326 target_code_size = 0;
3327 max_target_code_size = 0;
3328 cross_page = 0;
3329 direct_jmp_count = 0;
3330 direct_jmp2_count = 0;
3331 for(i = 0; i < nb_tbs; i++) {
3332 tb = &tbs[i];
3333 target_code_size += tb->size;
3334 if (tb->size > max_target_code_size)
3335 max_target_code_size = tb->size;
3336 if (tb->page_addr[1] != -1)
3337 cross_page++;
3338 if (tb->tb_next_offset[0] != 0xffff) {
3339 direct_jmp_count++;
3340 if (tb->tb_next_offset[1] != 0xffff) {
3341 direct_jmp2_count++;
3345 /* XXX: avoid using doubles ? */
3346 cpu_fprintf(f, "Translation buffer state:\n");
3347 cpu_fprintf(f, "gen code size %ld/%ld\n",
3348 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3349 cpu_fprintf(f, "TB count %d/%d\n",
3350 nb_tbs, code_gen_max_blocks);
3351 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3352 nb_tbs ? target_code_size / nb_tbs : 0,
3353 max_target_code_size);
3354 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3355 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3356 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3357 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3358 cross_page,
3359 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3360 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3361 direct_jmp_count,
3362 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3363 direct_jmp2_count,
3364 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3365 cpu_fprintf(f, "\nStatistics:\n");
3366 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3367 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3368 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3369 tcg_dump_info(f, cpu_fprintf);
3372 #if !defined(CONFIG_USER_ONLY)
3374 #define MMUSUFFIX _cmmu
3375 #define GETPC() NULL
3376 #define env cpu_single_env
3377 #define SOFTMMU_CODE_ACCESS
3379 #define SHIFT 0
3380 #include "softmmu_template.h"
3382 #define SHIFT 1
3383 #include "softmmu_template.h"
3385 #define SHIFT 2
3386 #include "softmmu_template.h"
3388 #define SHIFT 3
3389 #include "softmmu_template.h"
3391 #undef env
3393 #endif