Temporarily disable logging around pci config writes (cirrus)
[qemu-kvm/fedora.git] / exec.c
blobc699043edbe967b4ebeaf6bc428b5722af856835
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
41 #include "tcg.h"
42 #endif
43 #include "qemu-kvm.h"
45 #include "hw/hw.h"
46 #include "osdep.h"
47 #include "kvm.h"
48 #if defined(CONFIG_USER_ONLY)
49 #include <qemu.h>
50 #endif
52 //#define DEBUG_TB_INVALIDATE
53 //#define DEBUG_FLUSH
54 //#define DEBUG_TLB
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
66 #undef DEBUG_TB_CHECK
67 #endif
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #define MMAP_AREA_START 0x00000000
72 #define MMAP_AREA_END 0xa8000000
74 #if defined(TARGET_SPARC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 41
76 #elif defined(TARGET_SPARC)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #elif defined(TARGET_ALPHA)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #define TARGET_VIRT_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_PPC64)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 42
85 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
86 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 #elif defined(TARGET_IA64)
88 #define TARGET_PHYS_ADDR_SPACE_BITS 36
89 #else
90 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
91 #define TARGET_PHYS_ADDR_SPACE_BITS 32
92 #endif
94 static TranslationBlock *tbs;
95 int code_gen_max_blocks;
96 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
97 static int nb_tbs;
98 /* any access to the tbs or the page table must use this lock */
99 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
101 #if defined(__arm__) || defined(__sparc_v9__)
102 /* The prologue must be reachable with a direct jump. ARM and Sparc64
103 have limited branch ranges (possibly also PPC) so place it in a
104 section close to code segment. */
105 #define code_gen_section \
106 __attribute__((__section__(".gen_code"))) \
107 __attribute__((aligned (32)))
108 #else
109 #define code_gen_section \
110 __attribute__((aligned (32)))
111 #endif
113 uint8_t code_gen_prologue[1024] code_gen_section;
114 static uint8_t *code_gen_buffer;
115 static unsigned long code_gen_buffer_size;
116 /* threshold to flush the translated code buffer */
117 static unsigned long code_gen_buffer_max_size;
118 uint8_t *code_gen_ptr;
120 #if !defined(CONFIG_USER_ONLY)
121 ram_addr_t phys_ram_size;
122 int phys_ram_fd;
123 uint8_t *phys_ram_base;
124 uint8_t *phys_ram_dirty;
125 uint8_t *bios_mem;
126 static int in_migration;
127 static ram_addr_t phys_ram_alloc_offset = 0;
128 #endif
130 CPUState *first_cpu;
131 /* current CPU in the current thread. It is only valid inside
132 cpu_exec() */
133 CPUState *cpu_single_env;
134 /* 0 = Do not count executed instructions.
135 1 = Precise instruction counting.
136 2 = Adaptive rate instruction counting. */
137 int use_icount = 0;
138 /* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
140 int64_t qemu_icount;
142 typedef struct PageDesc {
143 /* list of TBs intersecting this ram page */
144 TranslationBlock *first_tb;
145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count;
148 uint8_t *code_bitmap;
149 #if defined(CONFIG_USER_ONLY)
150 unsigned long flags;
151 #endif
152 } PageDesc;
154 typedef struct PhysPageDesc {
155 /* offset in host memory of the page + io_index in the low bits */
156 ram_addr_t phys_offset;
157 } PhysPageDesc;
159 #define L2_BITS 10
160 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
161 /* XXX: this is a temporary hack for alpha target.
162 * In the future, this is to be replaced by a multi-level table
163 * to actually be able to handle the complete 64 bits address space.
165 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
166 #else
167 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
168 #endif
170 #define L1_SIZE (1 << L1_BITS)
171 #define L2_SIZE (1 << L2_BITS)
173 unsigned long qemu_real_host_page_size;
174 unsigned long qemu_host_page_bits;
175 unsigned long qemu_host_page_size;
176 unsigned long qemu_host_page_mask;
178 /* XXX: for system emulation, it could just be an array */
179 static PageDesc *l1_map[L1_SIZE];
180 static PhysPageDesc **l1_phys_map;
182 #if !defined(CONFIG_USER_ONLY)
183 static void io_mem_init(void);
185 /* io memory support */
186 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
187 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
188 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
189 char io_mem_used[IO_MEM_NB_ENTRIES];
190 static int io_mem_watch;
191 #endif
193 /* log support */
194 static const char *logfilename = "/tmp/qemu.log";
195 FILE *logfile;
196 int loglevel;
197 static int log_append = 0;
199 /* statistics */
200 static int tlb_flush_count;
201 static int tb_flush_count;
202 static int tb_phys_invalidate_count;
204 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
205 typedef struct subpage_t {
206 target_phys_addr_t base;
207 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
208 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
209 void *opaque[TARGET_PAGE_SIZE][2][4];
210 } subpage_t;
212 #ifdef _WIN32
213 static void map_exec(void *addr, long size)
215 DWORD old_protect;
216 VirtualProtect(addr, size,
217 PAGE_EXECUTE_READWRITE, &old_protect);
220 #else
221 static void map_exec(void *addr, long size)
223 unsigned long start, end, page_size;
225 page_size = getpagesize();
226 start = (unsigned long)addr;
227 start &= ~(page_size - 1);
229 end = (unsigned long)addr + size;
230 end += page_size - 1;
231 end &= ~(page_size - 1);
233 mprotect((void *)start, end - start,
234 PROT_READ | PROT_WRITE | PROT_EXEC);
236 #endif
238 static void page_init(void)
240 /* NOTE: we can always suppose that qemu_host_page_size >=
241 TARGET_PAGE_SIZE */
242 #ifdef _WIN32
244 SYSTEM_INFO system_info;
246 GetSystemInfo(&system_info);
247 qemu_real_host_page_size = system_info.dwPageSize;
249 #else
250 qemu_real_host_page_size = getpagesize();
251 #endif
252 if (qemu_host_page_size == 0)
253 qemu_host_page_size = qemu_real_host_page_size;
254 if (qemu_host_page_size < TARGET_PAGE_SIZE)
255 qemu_host_page_size = TARGET_PAGE_SIZE;
256 qemu_host_page_bits = 0;
257 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
258 qemu_host_page_bits++;
259 qemu_host_page_mask = ~(qemu_host_page_size - 1);
260 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
263 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
265 long long startaddr, endaddr;
266 FILE *f;
267 int n;
269 mmap_lock();
270 last_brk = (unsigned long)sbrk(0);
271 f = fopen("/proc/self/maps", "r");
272 if (f) {
273 do {
274 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
275 if (n == 2) {
276 startaddr = MIN(startaddr,
277 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278 endaddr = MIN(endaddr,
279 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280 page_set_flags(startaddr & TARGET_PAGE_MASK,
281 TARGET_PAGE_ALIGN(endaddr),
282 PAGE_RESERVED);
284 } while (!feof(f));
285 fclose(f);
287 mmap_unlock();
289 #endif
292 static inline PageDesc **page_l1_map(target_ulong index)
294 #if TARGET_LONG_BITS > 32
295 /* Host memory outside guest VM. For 32-bit targets we have already
296 excluded high addresses. */
297 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
298 return NULL;
299 #endif
300 return &l1_map[index >> L2_BITS];
303 static inline PageDesc *page_find_alloc(target_ulong index)
305 PageDesc **lp, *p;
306 lp = page_l1_map(index);
307 if (!lp)
308 return NULL;
310 p = *lp;
311 if (!p) {
312 /* allocate if not found */
313 #if defined(CONFIG_USER_ONLY)
314 unsigned long addr;
315 size_t len = sizeof(PageDesc) * L2_SIZE;
316 /* Don't use qemu_malloc because it may recurse. */
317 p = mmap(0, len, PROT_READ | PROT_WRITE,
318 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
319 *lp = p;
320 addr = h2g(p);
321 if (addr == (target_ulong)addr) {
322 page_set_flags(addr & TARGET_PAGE_MASK,
323 TARGET_PAGE_ALIGN(addr + len),
324 PAGE_RESERVED);
326 #else
327 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
328 *lp = p;
329 #endif
331 return p + (index & (L2_SIZE - 1));
334 static inline PageDesc *page_find(target_ulong index)
336 PageDesc **lp, *p;
337 lp = page_l1_map(index);
338 if (!lp)
339 return NULL;
341 p = *lp;
342 if (!p)
343 return 0;
344 return p + (index & (L2_SIZE - 1));
347 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
349 void **lp, **p;
350 PhysPageDesc *pd;
352 p = (void **)l1_phys_map;
353 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
355 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
356 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
357 #endif
358 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
359 p = *lp;
360 if (!p) {
361 /* allocate if not found */
362 if (!alloc)
363 return NULL;
364 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
365 memset(p, 0, sizeof(void *) * L1_SIZE);
366 *lp = p;
368 #endif
369 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
370 pd = *lp;
371 if (!pd) {
372 int i;
373 /* allocate if not found */
374 if (!alloc)
375 return NULL;
376 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
377 *lp = pd;
378 for (i = 0; i < L2_SIZE; i++)
379 pd[i].phys_offset = IO_MEM_UNASSIGNED;
381 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
384 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
386 return phys_page_find_alloc(index, 0);
389 #if !defined(CONFIG_USER_ONLY)
390 static void tlb_protect_code(ram_addr_t ram_addr);
391 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
392 target_ulong vaddr);
393 #define mmap_lock() do { } while(0)
394 #define mmap_unlock() do { } while(0)
395 #endif
397 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399 #if defined(CONFIG_USER_ONLY)
400 /* Currently it is not recommanded to allocate big chunks of data in
401 user mode. It will change when a dedicated libc will be used */
402 #define USE_STATIC_CODE_GEN_BUFFER
403 #endif
405 #ifdef USE_STATIC_CODE_GEN_BUFFER
406 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
407 #endif
409 static void code_gen_alloc(unsigned long tb_size)
411 if (kvm_enabled())
412 return;
414 #ifdef USE_STATIC_CODE_GEN_BUFFER
415 code_gen_buffer = static_code_gen_buffer;
416 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
417 map_exec(code_gen_buffer, code_gen_buffer_size);
418 #else
419 code_gen_buffer_size = tb_size;
420 if (code_gen_buffer_size == 0) {
421 #if defined(CONFIG_USER_ONLY)
422 /* in user mode, phys_ram_size is not meaningful */
423 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
424 #else
425 /* XXX: needs ajustments */
426 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
427 #endif
429 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
430 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
431 /* The code gen buffer location may have constraints depending on
432 the host cpu and OS */
433 #if defined(__linux__)
435 int flags;
436 void *start = NULL;
438 flags = MAP_PRIVATE | MAP_ANONYMOUS;
439 #if defined(__x86_64__)
440 flags |= MAP_32BIT;
441 /* Cannot map more than that */
442 if (code_gen_buffer_size > (800 * 1024 * 1024))
443 code_gen_buffer_size = (800 * 1024 * 1024);
444 #elif defined(__sparc_v9__)
445 // Map the buffer below 2G, so we can use direct calls and branches
446 flags |= MAP_FIXED;
447 start = (void *) 0x60000000UL;
448 if (code_gen_buffer_size > (512 * 1024 * 1024))
449 code_gen_buffer_size = (512 * 1024 * 1024);
450 #endif
451 code_gen_buffer = mmap(start, code_gen_buffer_size,
452 PROT_WRITE | PROT_READ | PROT_EXEC,
453 flags, -1, 0);
454 if (code_gen_buffer == MAP_FAILED) {
455 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
456 exit(1);
459 #elif defined(__FreeBSD__)
461 int flags;
462 void *addr = NULL;
463 flags = MAP_PRIVATE | MAP_ANONYMOUS;
464 #if defined(__x86_64__)
465 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
466 * 0x40000000 is free */
467 flags |= MAP_FIXED;
468 addr = (void *)0x40000000;
469 /* Cannot map more than that */
470 if (code_gen_buffer_size > (800 * 1024 * 1024))
471 code_gen_buffer_size = (800 * 1024 * 1024);
472 #endif
473 code_gen_buffer = mmap(addr, code_gen_buffer_size,
474 PROT_WRITE | PROT_READ | PROT_EXEC,
475 flags, -1, 0);
476 if (code_gen_buffer == MAP_FAILED) {
477 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
478 exit(1);
481 #else
482 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
483 if (!code_gen_buffer) {
484 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
485 exit(1);
487 map_exec(code_gen_buffer, code_gen_buffer_size);
488 #endif
489 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
490 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
491 code_gen_buffer_max_size = code_gen_buffer_size -
492 code_gen_max_block_size();
493 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
494 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
497 /* Must be called before using the QEMU cpus. 'tb_size' is the size
498 (in bytes) allocated to the translation buffer. Zero means default
499 size. */
500 void cpu_exec_init_all(unsigned long tb_size)
502 cpu_gen_init();
503 code_gen_alloc(tb_size);
504 code_gen_ptr = code_gen_buffer;
505 page_init();
506 #if !defined(CONFIG_USER_ONLY)
507 io_mem_init();
508 #endif
511 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
513 #define CPU_COMMON_SAVE_VERSION 1
515 static void cpu_common_save(QEMUFile *f, void *opaque)
517 CPUState *env = opaque;
519 qemu_put_be32s(f, &env->halted);
520 qemu_put_be32s(f, &env->interrupt_request);
523 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
525 CPUState *env = opaque;
527 if (version_id != CPU_COMMON_SAVE_VERSION)
528 return -EINVAL;
530 qemu_get_be32s(f, &env->halted);
531 qemu_get_be32s(f, &env->interrupt_request);
532 tlb_flush(env, 1);
534 return 0;
536 #endif
538 void cpu_exec_init(CPUState *env)
540 CPUState **penv;
541 int cpu_index;
543 env->next_cpu = NULL;
544 penv = &first_cpu;
545 cpu_index = 0;
546 while (*penv != NULL) {
547 penv = (CPUState **)&(*penv)->next_cpu;
548 cpu_index++;
550 env->cpu_index = cpu_index;
551 TAILQ_INIT(&env->breakpoints);
552 TAILQ_INIT(&env->watchpoints);
553 #ifdef __WIN32
554 env->thread_id = GetCurrentProcessId();
555 #else
556 env->thread_id = getpid();
557 #endif
558 *penv = env;
559 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
560 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
561 cpu_common_save, cpu_common_load, env);
562 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
563 cpu_save, cpu_load, env);
564 #endif
567 static inline void invalidate_page_bitmap(PageDesc *p)
569 if (p->code_bitmap) {
570 qemu_free(p->code_bitmap);
571 p->code_bitmap = NULL;
573 p->code_write_count = 0;
576 /* set to NULL all the 'first_tb' fields in all PageDescs */
577 static void page_flush_tb(void)
579 int i, j;
580 PageDesc *p;
582 for(i = 0; i < L1_SIZE; i++) {
583 p = l1_map[i];
584 if (p) {
585 for(j = 0; j < L2_SIZE; j++) {
586 p->first_tb = NULL;
587 invalidate_page_bitmap(p);
588 p++;
594 /* flush all the translation blocks */
595 /* XXX: tb_flush is currently not thread safe */
596 void tb_flush(CPUState *env1)
598 CPUState *env;
599 #if defined(DEBUG_FLUSH)
600 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
601 (unsigned long)(code_gen_ptr - code_gen_buffer),
602 nb_tbs, nb_tbs > 0 ?
603 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
604 #endif
605 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
606 cpu_abort(env1, "Internal error: code buffer overflow\n");
608 nb_tbs = 0;
610 for(env = first_cpu; env != NULL; env = env->next_cpu) {
611 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
614 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
615 page_flush_tb();
617 code_gen_ptr = code_gen_buffer;
618 /* XXX: flush processor icache at this point if cache flush is
619 expensive */
620 tb_flush_count++;
623 #ifdef DEBUG_TB_CHECK
625 static void tb_invalidate_check(target_ulong address)
627 TranslationBlock *tb;
628 int i;
629 address &= TARGET_PAGE_MASK;
630 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
631 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
632 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
633 address >= tb->pc + tb->size)) {
634 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
635 address, (long)tb->pc, tb->size);
641 /* verify that all the pages have correct rights for code */
642 static void tb_page_check(void)
644 TranslationBlock *tb;
645 int i, flags1, flags2;
647 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
648 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
649 flags1 = page_get_flags(tb->pc);
650 flags2 = page_get_flags(tb->pc + tb->size - 1);
651 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
652 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
653 (long)tb->pc, tb->size, flags1, flags2);
659 static void tb_jmp_check(TranslationBlock *tb)
661 TranslationBlock *tb1;
662 unsigned int n1;
664 /* suppress any remaining jumps to this TB */
665 tb1 = tb->jmp_first;
666 for(;;) {
667 n1 = (long)tb1 & 3;
668 tb1 = (TranslationBlock *)((long)tb1 & ~3);
669 if (n1 == 2)
670 break;
671 tb1 = tb1->jmp_next[n1];
673 /* check end of list */
674 if (tb1 != tb) {
675 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
679 #endif
681 /* invalidate one TB */
682 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
683 int next_offset)
685 TranslationBlock *tb1;
686 for(;;) {
687 tb1 = *ptb;
688 if (tb1 == tb) {
689 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
690 break;
692 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
696 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
698 TranslationBlock *tb1;
699 unsigned int n1;
701 for(;;) {
702 tb1 = *ptb;
703 n1 = (long)tb1 & 3;
704 tb1 = (TranslationBlock *)((long)tb1 & ~3);
705 if (tb1 == tb) {
706 *ptb = tb1->page_next[n1];
707 break;
709 ptb = &tb1->page_next[n1];
713 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
715 TranslationBlock *tb1, **ptb;
716 unsigned int n1;
718 ptb = &tb->jmp_next[n];
719 tb1 = *ptb;
720 if (tb1) {
721 /* find tb(n) in circular list */
722 for(;;) {
723 tb1 = *ptb;
724 n1 = (long)tb1 & 3;
725 tb1 = (TranslationBlock *)((long)tb1 & ~3);
726 if (n1 == n && tb1 == tb)
727 break;
728 if (n1 == 2) {
729 ptb = &tb1->jmp_first;
730 } else {
731 ptb = &tb1->jmp_next[n1];
734 /* now we can suppress tb(n) from the list */
735 *ptb = tb->jmp_next[n];
737 tb->jmp_next[n] = NULL;
741 /* reset the jump entry 'n' of a TB so that it is not chained to
742 another TB */
743 static inline void tb_reset_jump(TranslationBlock *tb, int n)
745 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
748 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
750 CPUState *env;
751 PageDesc *p;
752 unsigned int h, n1;
753 target_phys_addr_t phys_pc;
754 TranslationBlock *tb1, *tb2;
756 /* remove the TB from the hash list */
757 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
758 h = tb_phys_hash_func(phys_pc);
759 tb_remove(&tb_phys_hash[h], tb,
760 offsetof(TranslationBlock, phys_hash_next));
762 /* remove the TB from the page list */
763 if (tb->page_addr[0] != page_addr) {
764 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
765 tb_page_remove(&p->first_tb, tb);
766 invalidate_page_bitmap(p);
768 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
769 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
770 tb_page_remove(&p->first_tb, tb);
771 invalidate_page_bitmap(p);
774 tb_invalidated_flag = 1;
776 /* remove the TB from the hash list */
777 h = tb_jmp_cache_hash_func(tb->pc);
778 for(env = first_cpu; env != NULL; env = env->next_cpu) {
779 if (env->tb_jmp_cache[h] == tb)
780 env->tb_jmp_cache[h] = NULL;
783 /* suppress this TB from the two jump lists */
784 tb_jmp_remove(tb, 0);
785 tb_jmp_remove(tb, 1);
787 /* suppress any remaining jumps to this TB */
788 tb1 = tb->jmp_first;
789 for(;;) {
790 n1 = (long)tb1 & 3;
791 if (n1 == 2)
792 break;
793 tb1 = (TranslationBlock *)((long)tb1 & ~3);
794 tb2 = tb1->jmp_next[n1];
795 tb_reset_jump(tb1, n1);
796 tb1->jmp_next[n1] = NULL;
797 tb1 = tb2;
799 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
801 tb_phys_invalidate_count++;
804 static inline void set_bits(uint8_t *tab, int start, int len)
806 int end, mask, end1;
808 end = start + len;
809 tab += start >> 3;
810 mask = 0xff << (start & 7);
811 if ((start & ~7) == (end & ~7)) {
812 if (start < end) {
813 mask &= ~(0xff << (end & 7));
814 *tab |= mask;
816 } else {
817 *tab++ |= mask;
818 start = (start + 8) & ~7;
819 end1 = end & ~7;
820 while (start < end1) {
821 *tab++ = 0xff;
822 start += 8;
824 if (start < end) {
825 mask = ~(0xff << (end & 7));
826 *tab |= mask;
831 static void build_page_bitmap(PageDesc *p)
833 int n, tb_start, tb_end;
834 TranslationBlock *tb;
836 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
837 if (!p->code_bitmap)
838 return;
840 tb = p->first_tb;
841 while (tb != NULL) {
842 n = (long)tb & 3;
843 tb = (TranslationBlock *)((long)tb & ~3);
844 /* NOTE: this is subtle as a TB may span two physical pages */
845 if (n == 0) {
846 /* NOTE: tb_end may be after the end of the page, but
847 it is not a problem */
848 tb_start = tb->pc & ~TARGET_PAGE_MASK;
849 tb_end = tb_start + tb->size;
850 if (tb_end > TARGET_PAGE_SIZE)
851 tb_end = TARGET_PAGE_SIZE;
852 } else {
853 tb_start = 0;
854 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
856 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
857 tb = tb->page_next[n];
861 TranslationBlock *tb_gen_code(CPUState *env,
862 target_ulong pc, target_ulong cs_base,
863 int flags, int cflags)
865 TranslationBlock *tb;
866 uint8_t *tc_ptr;
867 target_ulong phys_pc, phys_page2, virt_page2;
868 int code_gen_size;
870 phys_pc = get_phys_addr_code(env, pc);
871 tb = tb_alloc(pc);
872 if (!tb) {
873 /* flush must be done */
874 tb_flush(env);
875 /* cannot fail at this point */
876 tb = tb_alloc(pc);
877 /* Don't forget to invalidate previous TB info. */
878 tb_invalidated_flag = 1;
880 tc_ptr = code_gen_ptr;
881 tb->tc_ptr = tc_ptr;
882 tb->cs_base = cs_base;
883 tb->flags = flags;
884 tb->cflags = cflags;
885 cpu_gen_code(env, tb, &code_gen_size);
886 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
888 /* check next page if needed */
889 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
890 phys_page2 = -1;
891 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
892 phys_page2 = get_phys_addr_code(env, virt_page2);
894 tb_link_phys(tb, phys_pc, phys_page2);
895 return tb;
898 /* invalidate all TBs which intersect with the target physical page
899 starting in range [start;end[. NOTE: start and end must refer to
900 the same physical page. 'is_cpu_write_access' should be true if called
901 from a real cpu write access: the virtual CPU will exit the current
902 TB if code is modified inside this TB. */
903 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
904 int is_cpu_write_access)
906 TranslationBlock *tb, *tb_next, *saved_tb;
907 CPUState *env = cpu_single_env;
908 target_ulong tb_start, tb_end;
909 PageDesc *p;
910 int n;
911 #ifdef TARGET_HAS_PRECISE_SMC
912 int current_tb_not_found = is_cpu_write_access;
913 TranslationBlock *current_tb = NULL;
914 int current_tb_modified = 0;
915 target_ulong current_pc = 0;
916 target_ulong current_cs_base = 0;
917 int current_flags = 0;
918 #endif /* TARGET_HAS_PRECISE_SMC */
920 p = page_find(start >> TARGET_PAGE_BITS);
921 if (!p)
922 return;
923 if (!p->code_bitmap &&
924 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
925 is_cpu_write_access) {
926 /* build code bitmap */
927 build_page_bitmap(p);
930 /* we remove all the TBs in the range [start, end[ */
931 /* XXX: see if in some cases it could be faster to invalidate all the code */
932 tb = p->first_tb;
933 while (tb != NULL) {
934 n = (long)tb & 3;
935 tb = (TranslationBlock *)((long)tb & ~3);
936 tb_next = tb->page_next[n];
937 /* NOTE: this is subtle as a TB may span two physical pages */
938 if (n == 0) {
939 /* NOTE: tb_end may be after the end of the page, but
940 it is not a problem */
941 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
942 tb_end = tb_start + tb->size;
943 } else {
944 tb_start = tb->page_addr[1];
945 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
947 if (!(tb_end <= start || tb_start >= end)) {
948 #ifdef TARGET_HAS_PRECISE_SMC
949 if (current_tb_not_found) {
950 current_tb_not_found = 0;
951 current_tb = NULL;
952 if (env->mem_io_pc) {
953 /* now we have a real cpu fault */
954 current_tb = tb_find_pc(env->mem_io_pc);
957 if (current_tb == tb &&
958 (current_tb->cflags & CF_COUNT_MASK) != 1) {
959 /* If we are modifying the current TB, we must stop
960 its execution. We could be more precise by checking
961 that the modification is after the current PC, but it
962 would require a specialized function to partially
963 restore the CPU state */
965 current_tb_modified = 1;
966 cpu_restore_state(current_tb, env,
967 env->mem_io_pc, NULL);
968 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
969 &current_flags);
971 #endif /* TARGET_HAS_PRECISE_SMC */
972 /* we need to do that to handle the case where a signal
973 occurs while doing tb_phys_invalidate() */
974 saved_tb = NULL;
975 if (env) {
976 saved_tb = env->current_tb;
977 env->current_tb = NULL;
979 tb_phys_invalidate(tb, -1);
980 if (env) {
981 env->current_tb = saved_tb;
982 if (env->interrupt_request && env->current_tb)
983 cpu_interrupt(env, env->interrupt_request);
986 tb = tb_next;
988 #if !defined(CONFIG_USER_ONLY)
989 /* if no code remaining, no need to continue to use slow writes */
990 if (!p->first_tb) {
991 invalidate_page_bitmap(p);
992 if (is_cpu_write_access) {
993 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
996 #endif
997 #ifdef TARGET_HAS_PRECISE_SMC
998 if (current_tb_modified) {
999 /* we generate a block containing just the instruction
1000 modifying the memory. It will ensure that it cannot modify
1001 itself */
1002 env->current_tb = NULL;
1003 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1004 cpu_resume_from_signal(env, NULL);
1006 #endif
1009 /* len must be <= 8 and start must be a multiple of len */
1010 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1012 PageDesc *p;
1013 int offset, b;
1014 #if 0
1015 if (1) {
1016 if (loglevel) {
1017 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1018 cpu_single_env->mem_io_vaddr, len,
1019 cpu_single_env->eip,
1020 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1023 #endif
1024 p = page_find(start >> TARGET_PAGE_BITS);
1025 if (!p)
1026 return;
1027 if (p->code_bitmap) {
1028 offset = start & ~TARGET_PAGE_MASK;
1029 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1030 if (b & ((1 << len) - 1))
1031 goto do_invalidate;
1032 } else {
1033 do_invalidate:
1034 tb_invalidate_phys_page_range(start, start + len, 1);
1038 #if !defined(CONFIG_SOFTMMU)
1039 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1040 unsigned long pc, void *puc)
1042 TranslationBlock *tb;
1043 PageDesc *p;
1044 int n;
1045 #ifdef TARGET_HAS_PRECISE_SMC
1046 TranslationBlock *current_tb = NULL;
1047 CPUState *env = cpu_single_env;
1048 int current_tb_modified = 0;
1049 target_ulong current_pc = 0;
1050 target_ulong current_cs_base = 0;
1051 int current_flags = 0;
1052 #endif
1054 addr &= TARGET_PAGE_MASK;
1055 p = page_find(addr >> TARGET_PAGE_BITS);
1056 if (!p)
1057 return;
1058 tb = p->first_tb;
1059 #ifdef TARGET_HAS_PRECISE_SMC
1060 if (tb && pc != 0) {
1061 current_tb = tb_find_pc(pc);
1063 #endif
1064 while (tb != NULL) {
1065 n = (long)tb & 3;
1066 tb = (TranslationBlock *)((long)tb & ~3);
1067 #ifdef TARGET_HAS_PRECISE_SMC
1068 if (current_tb == tb &&
1069 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1070 /* If we are modifying the current TB, we must stop
1071 its execution. We could be more precise by checking
1072 that the modification is after the current PC, but it
1073 would require a specialized function to partially
1074 restore the CPU state */
1076 current_tb_modified = 1;
1077 cpu_restore_state(current_tb, env, pc, puc);
1078 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1079 &current_flags);
1081 #endif /* TARGET_HAS_PRECISE_SMC */
1082 tb_phys_invalidate(tb, addr);
1083 tb = tb->page_next[n];
1085 p->first_tb = NULL;
1086 #ifdef TARGET_HAS_PRECISE_SMC
1087 if (current_tb_modified) {
1088 /* we generate a block containing just the instruction
1089 modifying the memory. It will ensure that it cannot modify
1090 itself */
1091 env->current_tb = NULL;
1092 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1093 cpu_resume_from_signal(env, puc);
1095 #endif
1097 #endif
1099 /* add the tb in the target page and protect it if necessary */
1100 static inline void tb_alloc_page(TranslationBlock *tb,
1101 unsigned int n, target_ulong page_addr)
1103 PageDesc *p;
1104 TranslationBlock *last_first_tb;
1106 tb->page_addr[n] = page_addr;
1107 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1108 tb->page_next[n] = p->first_tb;
1109 last_first_tb = p->first_tb;
1110 p->first_tb = (TranslationBlock *)((long)tb | n);
1111 invalidate_page_bitmap(p);
1113 #if defined(TARGET_HAS_SMC) || 1
1115 #if defined(CONFIG_USER_ONLY)
1116 if (p->flags & PAGE_WRITE) {
1117 target_ulong addr;
1118 PageDesc *p2;
1119 int prot;
1121 /* force the host page as non writable (writes will have a
1122 page fault + mprotect overhead) */
1123 page_addr &= qemu_host_page_mask;
1124 prot = 0;
1125 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1126 addr += TARGET_PAGE_SIZE) {
1128 p2 = page_find (addr >> TARGET_PAGE_BITS);
1129 if (!p2)
1130 continue;
1131 prot |= p2->flags;
1132 p2->flags &= ~PAGE_WRITE;
1133 page_get_flags(addr);
1135 mprotect(g2h(page_addr), qemu_host_page_size,
1136 (prot & PAGE_BITS) & ~PAGE_WRITE);
1137 #ifdef DEBUG_TB_INVALIDATE
1138 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1139 page_addr);
1140 #endif
1142 #else
1143 /* if some code is already present, then the pages are already
1144 protected. So we handle the case where only the first TB is
1145 allocated in a physical page */
1146 if (!last_first_tb) {
1147 tlb_protect_code(page_addr);
1149 #endif
1151 #endif /* TARGET_HAS_SMC */
1154 /* Allocate a new translation block. Flush the translation buffer if
1155 too many translation blocks or too much generated code. */
1156 TranslationBlock *tb_alloc(target_ulong pc)
1158 TranslationBlock *tb;
1160 if (nb_tbs >= code_gen_max_blocks ||
1161 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1162 return NULL;
1163 tb = &tbs[nb_tbs++];
1164 tb->pc = pc;
1165 tb->cflags = 0;
1166 return tb;
1169 void tb_free(TranslationBlock *tb)
1171 /* In practice this is mostly used for single use temporary TB
1172 Ignore the hard cases and just back up if this TB happens to
1173 be the last one generated. */
1174 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1175 code_gen_ptr = tb->tc_ptr;
1176 nb_tbs--;
1180 /* add a new TB and link it to the physical page tables. phys_page2 is
1181 (-1) to indicate that only one page contains the TB. */
1182 void tb_link_phys(TranslationBlock *tb,
1183 target_ulong phys_pc, target_ulong phys_page2)
1185 unsigned int h;
1186 TranslationBlock **ptb;
1188 /* Grab the mmap lock to stop another thread invalidating this TB
1189 before we are done. */
1190 mmap_lock();
1191 /* add in the physical hash table */
1192 h = tb_phys_hash_func(phys_pc);
1193 ptb = &tb_phys_hash[h];
1194 tb->phys_hash_next = *ptb;
1195 *ptb = tb;
1197 /* add in the page list */
1198 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1199 if (phys_page2 != -1)
1200 tb_alloc_page(tb, 1, phys_page2);
1201 else
1202 tb->page_addr[1] = -1;
1204 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1205 tb->jmp_next[0] = NULL;
1206 tb->jmp_next[1] = NULL;
1208 /* init original jump addresses */
1209 if (tb->tb_next_offset[0] != 0xffff)
1210 tb_reset_jump(tb, 0);
1211 if (tb->tb_next_offset[1] != 0xffff)
1212 tb_reset_jump(tb, 1);
1214 #ifdef DEBUG_TB_CHECK
1215 tb_page_check();
1216 #endif
1217 mmap_unlock();
1220 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1221 tb[1].tc_ptr. Return NULL if not found */
1222 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1224 int m_min, m_max, m;
1225 unsigned long v;
1226 TranslationBlock *tb;
1228 if (nb_tbs <= 0)
1229 return NULL;
1230 if (tc_ptr < (unsigned long)code_gen_buffer ||
1231 tc_ptr >= (unsigned long)code_gen_ptr)
1232 return NULL;
1233 /* binary search (cf Knuth) */
1234 m_min = 0;
1235 m_max = nb_tbs - 1;
1236 while (m_min <= m_max) {
1237 m = (m_min + m_max) >> 1;
1238 tb = &tbs[m];
1239 v = (unsigned long)tb->tc_ptr;
1240 if (v == tc_ptr)
1241 return tb;
1242 else if (tc_ptr < v) {
1243 m_max = m - 1;
1244 } else {
1245 m_min = m + 1;
1248 return &tbs[m_max];
1251 static void tb_reset_jump_recursive(TranslationBlock *tb);
1253 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1255 TranslationBlock *tb1, *tb_next, **ptb;
1256 unsigned int n1;
1258 tb1 = tb->jmp_next[n];
1259 if (tb1 != NULL) {
1260 /* find head of list */
1261 for(;;) {
1262 n1 = (long)tb1 & 3;
1263 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1264 if (n1 == 2)
1265 break;
1266 tb1 = tb1->jmp_next[n1];
1268 /* we are now sure now that tb jumps to tb1 */
1269 tb_next = tb1;
1271 /* remove tb from the jmp_first list */
1272 ptb = &tb_next->jmp_first;
1273 for(;;) {
1274 tb1 = *ptb;
1275 n1 = (long)tb1 & 3;
1276 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1277 if (n1 == n && tb1 == tb)
1278 break;
1279 ptb = &tb1->jmp_next[n1];
1281 *ptb = tb->jmp_next[n];
1282 tb->jmp_next[n] = NULL;
1284 /* suppress the jump to next tb in generated code */
1285 tb_reset_jump(tb, n);
1287 /* suppress jumps in the tb on which we could have jumped */
1288 tb_reset_jump_recursive(tb_next);
1292 static void tb_reset_jump_recursive(TranslationBlock *tb)
1294 tb_reset_jump_recursive2(tb, 0);
1295 tb_reset_jump_recursive2(tb, 1);
1298 #if defined(TARGET_HAS_ICE)
1299 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1301 target_phys_addr_t addr;
1302 target_ulong pd;
1303 ram_addr_t ram_addr;
1304 PhysPageDesc *p;
1306 addr = cpu_get_phys_page_debug(env, pc);
1307 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1308 if (!p) {
1309 pd = IO_MEM_UNASSIGNED;
1310 } else {
1311 pd = p->phys_offset;
1313 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1314 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1316 #endif
1318 /* Add a watchpoint. */
1319 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1320 int flags, CPUWatchpoint **watchpoint)
1322 target_ulong len_mask = ~(len - 1);
1323 CPUWatchpoint *wp;
1325 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1326 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1327 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1328 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1329 return -EINVAL;
1331 wp = qemu_malloc(sizeof(*wp));
1332 if (!wp)
1333 return -ENOMEM;
1335 wp->vaddr = addr;
1336 wp->len_mask = len_mask;
1337 wp->flags = flags;
1339 /* keep all GDB-injected watchpoints in front */
1340 if (flags & BP_GDB)
1341 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1342 else
1343 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1345 tlb_flush_page(env, addr);
1347 if (watchpoint)
1348 *watchpoint = wp;
1349 return 0;
1352 /* Remove a specific watchpoint. */
1353 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1354 int flags)
1356 target_ulong len_mask = ~(len - 1);
1357 CPUWatchpoint *wp;
1359 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1360 if (addr == wp->vaddr && len_mask == wp->len_mask
1361 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1362 cpu_watchpoint_remove_by_ref(env, wp);
1363 return 0;
1366 return -ENOENT;
1369 /* Remove a specific watchpoint by reference. */
1370 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1372 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1374 tlb_flush_page(env, watchpoint->vaddr);
1376 qemu_free(watchpoint);
1379 /* Remove all matching watchpoints. */
1380 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1382 CPUWatchpoint *wp, *next;
1384 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1385 if (wp->flags & mask)
1386 cpu_watchpoint_remove_by_ref(env, wp);
1390 /* Add a breakpoint. */
1391 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1392 CPUBreakpoint **breakpoint)
1394 #if defined(TARGET_HAS_ICE)
1395 CPUBreakpoint *bp;
1397 bp = qemu_malloc(sizeof(*bp));
1398 if (!bp)
1399 return -ENOMEM;
1401 bp->pc = pc;
1402 bp->flags = flags;
1404 /* keep all GDB-injected breakpoints in front */
1405 if (flags & BP_GDB)
1406 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1407 else
1408 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1410 if (kvm_enabled())
1411 kvm_update_debugger(env);
1413 breakpoint_invalidate(env, pc);
1415 if (breakpoint)
1416 *breakpoint = bp;
1417 return 0;
1418 #else
1419 return -ENOSYS;
1420 #endif
1423 /* Remove a specific breakpoint. */
1424 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1426 #if defined(TARGET_HAS_ICE)
1427 CPUBreakpoint *bp;
1429 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1430 if (bp->pc == pc && bp->flags == flags) {
1431 cpu_breakpoint_remove_by_ref(env, bp);
1432 return 0;
1435 return -ENOENT;
1436 #else
1437 return -ENOSYS;
1438 #endif
1441 /* Remove a specific breakpoint by reference. */
1442 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1444 #if defined(TARGET_HAS_ICE)
1445 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1447 if (kvm_enabled())
1448 kvm_update_debugger(env);
1450 breakpoint_invalidate(env, breakpoint->pc);
1452 qemu_free(breakpoint);
1453 #endif
1456 /* Remove all matching breakpoints. */
1457 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1459 #if defined(TARGET_HAS_ICE)
1460 CPUBreakpoint *bp, *next;
1462 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1463 if (bp->flags & mask)
1464 cpu_breakpoint_remove_by_ref(env, bp);
1466 #endif
1469 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1470 CPU loop after each instruction */
1471 void cpu_single_step(CPUState *env, int enabled)
1473 #if defined(TARGET_HAS_ICE)
1474 if (env->singlestep_enabled != enabled) {
1475 env->singlestep_enabled = enabled;
1476 /* must flush all the translated code to avoid inconsistancies */
1477 /* XXX: only flush what is necessary */
1478 tb_flush(env);
1480 if (kvm_enabled())
1481 kvm_update_debugger(env);
1482 #endif
1485 /* enable or disable low levels log */
1486 void cpu_set_log(int log_flags)
1488 loglevel = log_flags;
1489 if (loglevel && !logfile) {
1490 logfile = fopen(logfilename, log_append ? "a" : "w");
1491 if (!logfile) {
1492 perror(logfilename);
1493 _exit(1);
1495 #if !defined(CONFIG_SOFTMMU)
1496 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1498 static char logfile_buf[4096];
1499 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1501 #else
1502 setvbuf(logfile, NULL, _IOLBF, 0);
1503 #endif
1504 log_append = 1;
1506 if (!loglevel && logfile) {
1507 fclose(logfile);
1508 logfile = NULL;
1512 void cpu_set_log_filename(const char *filename)
1514 logfilename = strdup(filename);
1515 if (logfile) {
1516 fclose(logfile);
1517 logfile = NULL;
1519 cpu_set_log(loglevel);
1522 /* mask must never be zero, except for A20 change call */
1523 void cpu_interrupt(CPUState *env, int mask)
1525 #if !defined(USE_NPTL)
1526 TranslationBlock *tb;
1527 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1528 #endif
1529 int old_mask;
1531 old_mask = env->interrupt_request;
1532 /* FIXME: This is probably not threadsafe. A different thread could
1533 be in the middle of a read-modify-write operation. */
1534 env->interrupt_request |= mask;
1535 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1536 kvm_update_interrupt_request(env);
1537 #if defined(USE_NPTL)
1538 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1539 problem and hope the cpu will stop of its own accord. For userspace
1540 emulation this often isn't actually as bad as it sounds. Often
1541 signals are used primarily to interrupt blocking syscalls. */
1542 #else
1543 if (use_icount) {
1544 env->icount_decr.u16.high = 0xffff;
1545 #ifndef CONFIG_USER_ONLY
1546 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1547 an async event happened and we need to process it. */
1548 if (!can_do_io(env)
1549 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1550 cpu_abort(env, "Raised interrupt while not in I/O function");
1552 #endif
1553 } else {
1554 tb = env->current_tb;
1555 /* if the cpu is currently executing code, we must unlink it and
1556 all the potentially executing TB */
1557 if (tb && !testandset(&interrupt_lock)) {
1558 env->current_tb = NULL;
1559 tb_reset_jump_recursive(tb);
1560 resetlock(&interrupt_lock);
1563 #endif
1566 void cpu_reset_interrupt(CPUState *env, int mask)
1568 env->interrupt_request &= ~mask;
1571 const CPULogItem cpu_log_items[] = {
1572 { CPU_LOG_TB_OUT_ASM, "out_asm",
1573 "show generated host assembly code for each compiled TB" },
1574 { CPU_LOG_TB_IN_ASM, "in_asm",
1575 "show target assembly code for each compiled TB" },
1576 { CPU_LOG_TB_OP, "op",
1577 "show micro ops for each compiled TB" },
1578 { CPU_LOG_TB_OP_OPT, "op_opt",
1579 "show micro ops "
1580 #ifdef TARGET_I386
1581 "before eflags optimization and "
1582 #endif
1583 "after liveness analysis" },
1584 { CPU_LOG_INT, "int",
1585 "show interrupts/exceptions in short format" },
1586 { CPU_LOG_EXEC, "exec",
1587 "show trace before each executed TB (lots of logs)" },
1588 { CPU_LOG_TB_CPU, "cpu",
1589 "show CPU state before block translation" },
1590 #ifdef TARGET_I386
1591 { CPU_LOG_PCALL, "pcall",
1592 "show protected mode far calls/returns/exceptions" },
1593 #endif
1594 #ifdef DEBUG_IOPORT
1595 { CPU_LOG_IOPORT, "ioport",
1596 "show all i/o ports accesses" },
1597 #endif
1598 { 0, NULL, NULL },
1601 static int cmp1(const char *s1, int n, const char *s2)
1603 if (strlen(s2) != n)
1604 return 0;
1605 return memcmp(s1, s2, n) == 0;
1608 /* takes a comma separated list of log masks. Return 0 if error. */
1609 int cpu_str_to_log_mask(const char *str)
1611 const CPULogItem *item;
1612 int mask;
1613 const char *p, *p1;
1615 p = str;
1616 mask = 0;
1617 for(;;) {
1618 p1 = strchr(p, ',');
1619 if (!p1)
1620 p1 = p + strlen(p);
1621 if(cmp1(p,p1-p,"all")) {
1622 for(item = cpu_log_items; item->mask != 0; item++) {
1623 mask |= item->mask;
1625 } else {
1626 for(item = cpu_log_items; item->mask != 0; item++) {
1627 if (cmp1(p, p1 - p, item->name))
1628 goto found;
1630 return 0;
1632 found:
1633 mask |= item->mask;
1634 if (*p1 != ',')
1635 break;
1636 p = p1 + 1;
1638 return mask;
1641 void cpu_abort(CPUState *env, const char *fmt, ...)
1643 va_list ap;
1644 va_list ap2;
1646 va_start(ap, fmt);
1647 va_copy(ap2, ap);
1648 fprintf(stderr, "qemu: fatal: ");
1649 vfprintf(stderr, fmt, ap);
1650 fprintf(stderr, "\n");
1651 #ifdef TARGET_I386
1652 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1653 #else
1654 cpu_dump_state(env, stderr, fprintf, 0);
1655 #endif
1656 if (logfile) {
1657 fprintf(logfile, "qemu: fatal: ");
1658 vfprintf(logfile, fmt, ap2);
1659 fprintf(logfile, "\n");
1660 #ifdef TARGET_I386
1661 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1662 #else
1663 cpu_dump_state(env, logfile, fprintf, 0);
1664 #endif
1665 fflush(logfile);
1666 fclose(logfile);
1668 va_end(ap2);
1669 va_end(ap);
1670 abort();
1673 CPUState *cpu_copy(CPUState *env)
1675 CPUState *new_env = cpu_init(env->cpu_model_str);
1676 /* preserve chaining and index */
1677 CPUState *next_cpu = new_env->next_cpu;
1678 int cpu_index = new_env->cpu_index;
1679 memcpy(new_env, env, sizeof(CPUState));
1680 new_env->next_cpu = next_cpu;
1681 new_env->cpu_index = cpu_index;
1682 return new_env;
1685 #if !defined(CONFIG_USER_ONLY)
1687 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1689 unsigned int i;
1691 /* Discard jump cache entries for any tb which might potentially
1692 overlap the flushed page. */
1693 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1694 memset (&env->tb_jmp_cache[i], 0,
1695 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1697 i = tb_jmp_cache_hash_page(addr);
1698 memset (&env->tb_jmp_cache[i], 0,
1699 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1702 /* NOTE: if flush_global is true, also flush global entries (not
1703 implemented yet) */
1704 void tlb_flush(CPUState *env, int flush_global)
1706 int i;
1708 #if defined(DEBUG_TLB)
1709 printf("tlb_flush:\n");
1710 #endif
1711 /* must reset current TB so that interrupts cannot modify the
1712 links while we are modifying them */
1713 env->current_tb = NULL;
1715 for(i = 0; i < CPU_TLB_SIZE; i++) {
1716 env->tlb_table[0][i].addr_read = -1;
1717 env->tlb_table[0][i].addr_write = -1;
1718 env->tlb_table[0][i].addr_code = -1;
1719 env->tlb_table[1][i].addr_read = -1;
1720 env->tlb_table[1][i].addr_write = -1;
1721 env->tlb_table[1][i].addr_code = -1;
1722 #if (NB_MMU_MODES >= 3)
1723 env->tlb_table[2][i].addr_read = -1;
1724 env->tlb_table[2][i].addr_write = -1;
1725 env->tlb_table[2][i].addr_code = -1;
1726 #if (NB_MMU_MODES == 4)
1727 env->tlb_table[3][i].addr_read = -1;
1728 env->tlb_table[3][i].addr_write = -1;
1729 env->tlb_table[3][i].addr_code = -1;
1730 #endif
1731 #endif
1734 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1736 #ifdef USE_KQEMU
1737 if (env->kqemu_enabled) {
1738 kqemu_flush(env, flush_global);
1740 #endif
1741 tlb_flush_count++;
1744 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1746 if (addr == (tlb_entry->addr_read &
1747 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1748 addr == (tlb_entry->addr_write &
1749 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1750 addr == (tlb_entry->addr_code &
1751 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1752 tlb_entry->addr_read = -1;
1753 tlb_entry->addr_write = -1;
1754 tlb_entry->addr_code = -1;
1758 void tlb_flush_page(CPUState *env, target_ulong addr)
1760 int i;
1762 #if defined(DEBUG_TLB)
1763 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1764 #endif
1765 /* must reset current TB so that interrupts cannot modify the
1766 links while we are modifying them */
1767 env->current_tb = NULL;
1769 addr &= TARGET_PAGE_MASK;
1770 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1771 tlb_flush_entry(&env->tlb_table[0][i], addr);
1772 tlb_flush_entry(&env->tlb_table[1][i], addr);
1773 #if (NB_MMU_MODES >= 3)
1774 tlb_flush_entry(&env->tlb_table[2][i], addr);
1775 #if (NB_MMU_MODES == 4)
1776 tlb_flush_entry(&env->tlb_table[3][i], addr);
1777 #endif
1778 #endif
1780 tlb_flush_jmp_cache(env, addr);
1782 #ifdef USE_KQEMU
1783 if (env->kqemu_enabled) {
1784 kqemu_flush_page(env, addr);
1786 #endif
1789 /* update the TLBs so that writes to code in the virtual page 'addr'
1790 can be detected */
1791 static void tlb_protect_code(ram_addr_t ram_addr)
1793 cpu_physical_memory_reset_dirty(ram_addr,
1794 ram_addr + TARGET_PAGE_SIZE,
1795 CODE_DIRTY_FLAG);
1798 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1799 tested for self modifying code */
1800 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1801 target_ulong vaddr)
1803 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1806 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1807 unsigned long start, unsigned long length)
1809 unsigned long addr;
1810 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1811 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1812 if ((addr - start) < length) {
1813 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1818 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1819 int dirty_flags)
1821 CPUState *env;
1822 unsigned long length, start1;
1823 int i, mask, len;
1824 uint8_t *p;
1826 start &= TARGET_PAGE_MASK;
1827 end = TARGET_PAGE_ALIGN(end);
1829 length = end - start;
1830 if (length == 0)
1831 return;
1832 len = length >> TARGET_PAGE_BITS;
1833 #ifdef USE_KQEMU
1834 /* XXX: should not depend on cpu context */
1835 env = first_cpu;
1836 if (env->kqemu_enabled) {
1837 ram_addr_t addr;
1838 addr = start;
1839 for(i = 0; i < len; i++) {
1840 kqemu_set_notdirty(env, addr);
1841 addr += TARGET_PAGE_SIZE;
1844 #endif
1845 mask = ~dirty_flags;
1846 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1847 for(i = 0; i < len; i++)
1848 p[i] &= mask;
1850 /* we modify the TLB cache so that the dirty bit will be set again
1851 when accessing the range */
1852 start1 = start + (unsigned long)phys_ram_base;
1853 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1854 for(i = 0; i < CPU_TLB_SIZE; i++)
1855 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1856 for(i = 0; i < CPU_TLB_SIZE; i++)
1857 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1858 #if (NB_MMU_MODES >= 3)
1859 for(i = 0; i < CPU_TLB_SIZE; i++)
1860 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1861 #if (NB_MMU_MODES == 4)
1862 for(i = 0; i < CPU_TLB_SIZE; i++)
1863 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1864 #endif
1865 #endif
1869 int cpu_physical_memory_set_dirty_tracking(int enable)
1871 int r=0;
1873 if (kvm_enabled())
1874 r = kvm_physical_memory_set_dirty_tracking(enable);
1875 in_migration = enable;
1876 return r;
1879 int cpu_physical_memory_get_dirty_tracking(void)
1881 return in_migration;
1884 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1886 if (kvm_enabled())
1887 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1890 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1892 ram_addr_t ram_addr;
1894 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1895 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1896 tlb_entry->addend - (unsigned long)phys_ram_base;
1897 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1898 tlb_entry->addr_write |= TLB_NOTDIRTY;
1903 /* update the TLB according to the current state of the dirty bits */
1904 void cpu_tlb_update_dirty(CPUState *env)
1906 int i;
1907 for(i = 0; i < CPU_TLB_SIZE; i++)
1908 tlb_update_dirty(&env->tlb_table[0][i]);
1909 for(i = 0; i < CPU_TLB_SIZE; i++)
1910 tlb_update_dirty(&env->tlb_table[1][i]);
1911 #if (NB_MMU_MODES >= 3)
1912 for(i = 0; i < CPU_TLB_SIZE; i++)
1913 tlb_update_dirty(&env->tlb_table[2][i]);
1914 #if (NB_MMU_MODES == 4)
1915 for(i = 0; i < CPU_TLB_SIZE; i++)
1916 tlb_update_dirty(&env->tlb_table[3][i]);
1917 #endif
1918 #endif
1921 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1923 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1924 tlb_entry->addr_write = vaddr;
1927 /* update the TLB corresponding to virtual page vaddr
1928 so that it is no longer dirty */
1929 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1931 int i;
1933 vaddr &= TARGET_PAGE_MASK;
1934 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1935 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1936 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1937 #if (NB_MMU_MODES >= 3)
1938 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1939 #if (NB_MMU_MODES == 4)
1940 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1941 #endif
1942 #endif
1945 /* add a new TLB entry. At most one entry for a given virtual address
1946 is permitted. Return 0 if OK or 2 if the page could not be mapped
1947 (can only happen in non SOFTMMU mode for I/O pages or pages
1948 conflicting with the host address space). */
1949 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1950 target_phys_addr_t paddr, int prot,
1951 int mmu_idx, int is_softmmu)
1953 PhysPageDesc *p;
1954 unsigned long pd;
1955 unsigned int index;
1956 target_ulong address;
1957 target_ulong code_address;
1958 target_phys_addr_t addend;
1959 int ret;
1960 CPUTLBEntry *te;
1961 CPUWatchpoint *wp;
1962 target_phys_addr_t iotlb;
1964 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1965 if (!p) {
1966 pd = IO_MEM_UNASSIGNED;
1967 } else {
1968 pd = p->phys_offset;
1970 #if defined(DEBUG_TLB)
1971 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1972 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1973 #endif
1975 ret = 0;
1976 address = vaddr;
1977 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1978 /* IO memory case (romd handled later) */
1979 address |= TLB_MMIO;
1981 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1982 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1983 /* Normal RAM. */
1984 iotlb = pd & TARGET_PAGE_MASK;
1985 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1986 iotlb |= IO_MEM_NOTDIRTY;
1987 else
1988 iotlb |= IO_MEM_ROM;
1989 } else {
1990 /* IO handlers are currently passed a phsical address.
1991 It would be nice to pass an offset from the base address
1992 of that region. This would avoid having to special case RAM,
1993 and avoid full address decoding in every device.
1994 We can't use the high bits of pd for this because
1995 IO_MEM_ROMD uses these as a ram address. */
1996 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1999 code_address = address;
2000 /* Make accesses to pages with watchpoints go via the
2001 watchpoint trap routines. */
2002 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2003 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2004 iotlb = io_mem_watch + paddr;
2005 /* TODO: The memory case can be optimized by not trapping
2006 reads of pages with a write breakpoint. */
2007 address |= TLB_MMIO;
2011 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2012 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2013 te = &env->tlb_table[mmu_idx][index];
2014 te->addend = addend - vaddr;
2015 if (prot & PAGE_READ) {
2016 te->addr_read = address;
2017 } else {
2018 te->addr_read = -1;
2021 if (prot & PAGE_EXEC) {
2022 te->addr_code = code_address;
2023 } else {
2024 te->addr_code = -1;
2026 if (prot & PAGE_WRITE) {
2027 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2028 (pd & IO_MEM_ROMD)) {
2029 /* Write access calls the I/O callback. */
2030 te->addr_write = address | TLB_MMIO;
2031 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2032 !cpu_physical_memory_is_dirty(pd)) {
2033 te->addr_write = address | TLB_NOTDIRTY;
2034 } else {
2035 te->addr_write = address;
2037 } else {
2038 te->addr_write = -1;
2040 return ret;
2043 #else
2045 void tlb_flush(CPUState *env, int flush_global)
2049 void tlb_flush_page(CPUState *env, target_ulong addr)
2053 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2054 target_phys_addr_t paddr, int prot,
2055 int mmu_idx, int is_softmmu)
2057 return 0;
2060 /* dump memory mappings */
2061 void page_dump(FILE *f)
2063 unsigned long start, end;
2064 int i, j, prot, prot1;
2065 PageDesc *p;
2067 fprintf(f, "%-8s %-8s %-8s %s\n",
2068 "start", "end", "size", "prot");
2069 start = -1;
2070 end = -1;
2071 prot = 0;
2072 for(i = 0; i <= L1_SIZE; i++) {
2073 if (i < L1_SIZE)
2074 p = l1_map[i];
2075 else
2076 p = NULL;
2077 for(j = 0;j < L2_SIZE; j++) {
2078 if (!p)
2079 prot1 = 0;
2080 else
2081 prot1 = p[j].flags;
2082 if (prot1 != prot) {
2083 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2084 if (start != -1) {
2085 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2086 start, end, end - start,
2087 prot & PAGE_READ ? 'r' : '-',
2088 prot & PAGE_WRITE ? 'w' : '-',
2089 prot & PAGE_EXEC ? 'x' : '-');
2091 if (prot1 != 0)
2092 start = end;
2093 else
2094 start = -1;
2095 prot = prot1;
2097 if (!p)
2098 break;
2103 int page_get_flags(target_ulong address)
2105 PageDesc *p;
2107 p = page_find(address >> TARGET_PAGE_BITS);
2108 if (!p)
2109 return 0;
2110 return p->flags;
2113 /* modify the flags of a page and invalidate the code if
2114 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2115 depending on PAGE_WRITE */
2116 void page_set_flags(target_ulong start, target_ulong end, int flags)
2118 PageDesc *p;
2119 target_ulong addr;
2121 /* mmap_lock should already be held. */
2122 start = start & TARGET_PAGE_MASK;
2123 end = TARGET_PAGE_ALIGN(end);
2124 if (flags & PAGE_WRITE)
2125 flags |= PAGE_WRITE_ORG;
2126 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2127 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2128 /* We may be called for host regions that are outside guest
2129 address space. */
2130 if (!p)
2131 return;
2132 /* if the write protection is set, then we invalidate the code
2133 inside */
2134 if (!(p->flags & PAGE_WRITE) &&
2135 (flags & PAGE_WRITE) &&
2136 p->first_tb) {
2137 tb_invalidate_phys_page(addr, 0, NULL);
2139 p->flags = flags;
2143 int page_check_range(target_ulong start, target_ulong len, int flags)
2145 PageDesc *p;
2146 target_ulong end;
2147 target_ulong addr;
2149 if (start + len < start)
2150 /* we've wrapped around */
2151 return -1;
2153 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2154 start = start & TARGET_PAGE_MASK;
2156 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2157 p = page_find(addr >> TARGET_PAGE_BITS);
2158 if( !p )
2159 return -1;
2160 if( !(p->flags & PAGE_VALID) )
2161 return -1;
2163 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2164 return -1;
2165 if (flags & PAGE_WRITE) {
2166 if (!(p->flags & PAGE_WRITE_ORG))
2167 return -1;
2168 /* unprotect the page if it was put read-only because it
2169 contains translated code */
2170 if (!(p->flags & PAGE_WRITE)) {
2171 if (!page_unprotect(addr, 0, NULL))
2172 return -1;
2174 return 0;
2177 return 0;
2180 /* called from signal handler: invalidate the code and unprotect the
2181 page. Return TRUE if the fault was succesfully handled. */
2182 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2184 unsigned int page_index, prot, pindex;
2185 PageDesc *p, *p1;
2186 target_ulong host_start, host_end, addr;
2188 /* Technically this isn't safe inside a signal handler. However we
2189 know this only ever happens in a synchronous SEGV handler, so in
2190 practice it seems to be ok. */
2191 mmap_lock();
2193 host_start = address & qemu_host_page_mask;
2194 page_index = host_start >> TARGET_PAGE_BITS;
2195 p1 = page_find(page_index);
2196 if (!p1) {
2197 mmap_unlock();
2198 return 0;
2200 host_end = host_start + qemu_host_page_size;
2201 p = p1;
2202 prot = 0;
2203 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2204 prot |= p->flags;
2205 p++;
2207 /* if the page was really writable, then we change its
2208 protection back to writable */
2209 if (prot & PAGE_WRITE_ORG) {
2210 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2211 if (!(p1[pindex].flags & PAGE_WRITE)) {
2212 mprotect((void *)g2h(host_start), qemu_host_page_size,
2213 (prot & PAGE_BITS) | PAGE_WRITE);
2214 p1[pindex].flags |= PAGE_WRITE;
2215 /* and since the content will be modified, we must invalidate
2216 the corresponding translated code. */
2217 tb_invalidate_phys_page(address, pc, puc);
2218 #ifdef DEBUG_TB_CHECK
2219 tb_invalidate_check(address);
2220 #endif
2221 mmap_unlock();
2222 return 1;
2225 mmap_unlock();
2226 return 0;
2229 static inline void tlb_set_dirty(CPUState *env,
2230 unsigned long addr, target_ulong vaddr)
2233 #endif /* defined(CONFIG_USER_ONLY) */
2235 #if !defined(CONFIG_USER_ONLY)
2236 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2237 ram_addr_t memory);
2238 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2239 ram_addr_t orig_memory);
2240 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2241 need_subpage) \
2242 do { \
2243 if (addr > start_addr) \
2244 start_addr2 = 0; \
2245 else { \
2246 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2247 if (start_addr2 > 0) \
2248 need_subpage = 1; \
2251 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2252 end_addr2 = TARGET_PAGE_SIZE - 1; \
2253 else { \
2254 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2255 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2256 need_subpage = 1; \
2258 } while (0)
2260 /* register physical memory. 'size' must be a multiple of the target
2261 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2262 io memory page */
2263 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2264 ram_addr_t size,
2265 ram_addr_t phys_offset)
2267 target_phys_addr_t addr, end_addr;
2268 PhysPageDesc *p;
2269 CPUState *env;
2270 ram_addr_t orig_size = size;
2271 void *subpage;
2273 #ifdef USE_KQEMU
2274 /* XXX: should not depend on cpu context */
2275 env = first_cpu;
2276 if (env->kqemu_enabled) {
2277 kqemu_set_phys_mem(start_addr, size, phys_offset);
2279 #endif
2280 if (kvm_enabled())
2281 kvm_set_phys_mem(start_addr, size, phys_offset);
2283 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2284 end_addr = start_addr + (target_phys_addr_t)size;
2285 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2286 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2287 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2288 ram_addr_t orig_memory = p->phys_offset;
2289 target_phys_addr_t start_addr2, end_addr2;
2290 int need_subpage = 0;
2292 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2293 need_subpage);
2294 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2295 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2296 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2297 &p->phys_offset, orig_memory);
2298 } else {
2299 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2300 >> IO_MEM_SHIFT];
2302 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2303 } else {
2304 p->phys_offset = phys_offset;
2305 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2306 (phys_offset & IO_MEM_ROMD))
2307 phys_offset += TARGET_PAGE_SIZE;
2309 } else {
2310 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2311 p->phys_offset = phys_offset;
2312 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2313 (phys_offset & IO_MEM_ROMD))
2314 phys_offset += TARGET_PAGE_SIZE;
2315 else {
2316 target_phys_addr_t start_addr2, end_addr2;
2317 int need_subpage = 0;
2319 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2320 end_addr2, need_subpage);
2322 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2323 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2324 &p->phys_offset, IO_MEM_UNASSIGNED);
2325 subpage_register(subpage, start_addr2, end_addr2,
2326 phys_offset);
2332 /* since each CPU stores ram addresses in its TLB cache, we must
2333 reset the modified entries */
2334 /* XXX: slow ! */
2335 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2336 tlb_flush(env, 1);
2340 /* XXX: temporary until new memory mapping API */
2341 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2343 PhysPageDesc *p;
2345 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2346 if (!p)
2347 return IO_MEM_UNASSIGNED;
2348 return p->phys_offset;
2351 /* XXX: better than nothing */
2352 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2354 ram_addr_t addr;
2355 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2356 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2357 (uint64_t)size, (uint64_t)phys_ram_size);
2358 abort();
2360 addr = phys_ram_alloc_offset;
2361 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2362 return addr;
2365 void qemu_ram_free(ram_addr_t addr)
2369 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2371 #ifdef DEBUG_UNASSIGNED
2372 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2373 #endif
2374 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2375 do_unassigned_access(addr, 0, 0, 0, 1);
2376 #endif
2377 return 0;
2380 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2382 #ifdef DEBUG_UNASSIGNED
2383 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2384 #endif
2385 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2386 do_unassigned_access(addr, 0, 0, 0, 2);
2387 #endif
2388 return 0;
2391 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2393 #ifdef DEBUG_UNASSIGNED
2394 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2395 #endif
2396 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2397 do_unassigned_access(addr, 0, 0, 0, 4);
2398 #endif
2399 return 0;
2402 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2404 #ifdef DEBUG_UNASSIGNED
2405 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2406 #endif
2407 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2408 do_unassigned_access(addr, 1, 0, 0, 1);
2409 #endif
2412 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2414 #ifdef DEBUG_UNASSIGNED
2415 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2416 #endif
2417 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2418 do_unassigned_access(addr, 1, 0, 0, 2);
2419 #endif
2422 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2424 #ifdef DEBUG_UNASSIGNED
2425 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2426 #endif
2427 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2428 do_unassigned_access(addr, 1, 0, 0, 4);
2429 #endif
2432 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2433 unassigned_mem_readb,
2434 unassigned_mem_readw,
2435 unassigned_mem_readl,
2438 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2439 unassigned_mem_writeb,
2440 unassigned_mem_writew,
2441 unassigned_mem_writel,
2444 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2445 uint32_t val)
2447 int dirty_flags;
2448 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2449 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2450 #if !defined(CONFIG_USER_ONLY)
2451 tb_invalidate_phys_page_fast(ram_addr, 1);
2452 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2453 #endif
2455 stb_p(phys_ram_base + ram_addr, val);
2456 #ifdef USE_KQEMU
2457 if (cpu_single_env->kqemu_enabled &&
2458 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2459 kqemu_modify_page(cpu_single_env, ram_addr);
2460 #endif
2461 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2462 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2463 /* we remove the notdirty callback only if the code has been
2464 flushed */
2465 if (dirty_flags == 0xff)
2466 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2469 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2470 uint32_t val)
2472 int dirty_flags;
2473 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2474 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2475 #if !defined(CONFIG_USER_ONLY)
2476 tb_invalidate_phys_page_fast(ram_addr, 2);
2477 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2478 #endif
2480 stw_p(phys_ram_base + ram_addr, val);
2481 #ifdef USE_KQEMU
2482 if (cpu_single_env->kqemu_enabled &&
2483 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2484 kqemu_modify_page(cpu_single_env, ram_addr);
2485 #endif
2486 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2487 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2488 /* we remove the notdirty callback only if the code has been
2489 flushed */
2490 if (dirty_flags == 0xff)
2491 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2494 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2495 uint32_t val)
2497 int dirty_flags;
2498 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2499 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2500 #if !defined(CONFIG_USER_ONLY)
2501 tb_invalidate_phys_page_fast(ram_addr, 4);
2502 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2503 #endif
2505 stl_p(phys_ram_base + ram_addr, val);
2506 #ifdef USE_KQEMU
2507 if (cpu_single_env->kqemu_enabled &&
2508 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2509 kqemu_modify_page(cpu_single_env, ram_addr);
2510 #endif
2511 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2512 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2513 /* we remove the notdirty callback only if the code has been
2514 flushed */
2515 if (dirty_flags == 0xff)
2516 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2519 static CPUReadMemoryFunc *error_mem_read[3] = {
2520 NULL, /* never used */
2521 NULL, /* never used */
2522 NULL, /* never used */
2525 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2526 notdirty_mem_writeb,
2527 notdirty_mem_writew,
2528 notdirty_mem_writel,
2531 /* Generate a debug exception if a watchpoint has been hit. */
2532 static void check_watchpoint(int offset, int len_mask, int flags)
2534 CPUState *env = cpu_single_env;
2535 target_ulong pc, cs_base;
2536 TranslationBlock *tb;
2537 target_ulong vaddr;
2538 CPUWatchpoint *wp;
2539 int cpu_flags;
2541 if (env->watchpoint_hit) {
2542 /* We re-entered the check after replacing the TB. Now raise
2543 * the debug interrupt so that is will trigger after the
2544 * current instruction. */
2545 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2546 return;
2548 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2549 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2550 if ((vaddr == (wp->vaddr & len_mask) ||
2551 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2552 wp->flags |= BP_WATCHPOINT_HIT;
2553 if (!env->watchpoint_hit) {
2554 env->watchpoint_hit = wp;
2555 tb = tb_find_pc(env->mem_io_pc);
2556 if (!tb) {
2557 cpu_abort(env, "check_watchpoint: could not find TB for "
2558 "pc=%p", (void *)env->mem_io_pc);
2560 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2561 tb_phys_invalidate(tb, -1);
2562 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2563 env->exception_index = EXCP_DEBUG;
2564 } else {
2565 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2566 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2568 cpu_resume_from_signal(env, NULL);
2570 } else {
2571 wp->flags &= ~BP_WATCHPOINT_HIT;
2576 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2577 so these check for a hit then pass through to the normal out-of-line
2578 phys routines. */
2579 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2581 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2582 return ldub_phys(addr);
2585 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2587 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2588 return lduw_phys(addr);
2591 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2593 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2594 return ldl_phys(addr);
2597 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2598 uint32_t val)
2600 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2601 stb_phys(addr, val);
2604 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2605 uint32_t val)
2607 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2608 stw_phys(addr, val);
2611 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2612 uint32_t val)
2614 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2615 stl_phys(addr, val);
2618 static CPUReadMemoryFunc *watch_mem_read[3] = {
2619 watch_mem_readb,
2620 watch_mem_readw,
2621 watch_mem_readl,
2624 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2625 watch_mem_writeb,
2626 watch_mem_writew,
2627 watch_mem_writel,
2630 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2631 unsigned int len)
2633 uint32_t ret;
2634 unsigned int idx;
2636 idx = SUBPAGE_IDX(addr - mmio->base);
2637 #if defined(DEBUG_SUBPAGE)
2638 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2639 mmio, len, addr, idx);
2640 #endif
2641 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2643 return ret;
2646 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2647 uint32_t value, unsigned int len)
2649 unsigned int idx;
2651 idx = SUBPAGE_IDX(addr - mmio->base);
2652 #if defined(DEBUG_SUBPAGE)
2653 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2654 mmio, len, addr, idx, value);
2655 #endif
2656 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2659 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2661 #if defined(DEBUG_SUBPAGE)
2662 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2663 #endif
2665 return subpage_readlen(opaque, addr, 0);
2668 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2669 uint32_t value)
2671 #if defined(DEBUG_SUBPAGE)
2672 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2673 #endif
2674 subpage_writelen(opaque, addr, value, 0);
2677 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2679 #if defined(DEBUG_SUBPAGE)
2680 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2681 #endif
2683 return subpage_readlen(opaque, addr, 1);
2686 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2687 uint32_t value)
2689 #if defined(DEBUG_SUBPAGE)
2690 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2691 #endif
2692 subpage_writelen(opaque, addr, value, 1);
2695 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2697 #if defined(DEBUG_SUBPAGE)
2698 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2699 #endif
2701 return subpage_readlen(opaque, addr, 2);
2704 static void subpage_writel (void *opaque,
2705 target_phys_addr_t addr, uint32_t value)
2707 #if defined(DEBUG_SUBPAGE)
2708 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2709 #endif
2710 subpage_writelen(opaque, addr, value, 2);
2713 static CPUReadMemoryFunc *subpage_read[] = {
2714 &subpage_readb,
2715 &subpage_readw,
2716 &subpage_readl,
2719 static CPUWriteMemoryFunc *subpage_write[] = {
2720 &subpage_writeb,
2721 &subpage_writew,
2722 &subpage_writel,
2725 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2726 ram_addr_t memory)
2728 int idx, eidx;
2729 unsigned int i;
2731 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2732 return -1;
2733 idx = SUBPAGE_IDX(start);
2734 eidx = SUBPAGE_IDX(end);
2735 #if defined(DEBUG_SUBPAGE)
2736 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2737 mmio, start, end, idx, eidx, memory);
2738 #endif
2739 memory >>= IO_MEM_SHIFT;
2740 for (; idx <= eidx; idx++) {
2741 for (i = 0; i < 4; i++) {
2742 if (io_mem_read[memory][i]) {
2743 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2744 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2746 if (io_mem_write[memory][i]) {
2747 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2748 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2753 return 0;
2756 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2757 ram_addr_t orig_memory)
2759 subpage_t *mmio;
2760 int subpage_memory;
2762 mmio = qemu_mallocz(sizeof(subpage_t));
2763 if (mmio != NULL) {
2764 mmio->base = base;
2765 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2766 #if defined(DEBUG_SUBPAGE)
2767 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2768 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2769 #endif
2770 *phys = subpage_memory | IO_MEM_SUBPAGE;
2771 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2774 return mmio;
2777 static int get_free_io_mem_idx(void)
2779 int i;
2781 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2782 if (!io_mem_used[i]) {
2783 io_mem_used[i] = 1;
2784 return i;
2787 return -1;
2790 static void io_mem_init(void)
2792 int i;
2794 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2795 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2796 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2797 for (i=0; i<5; i++)
2798 io_mem_used[i] = 1;
2800 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2801 watch_mem_write, NULL);
2802 /* alloc dirty bits array */
2803 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2804 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2807 /* mem_read and mem_write are arrays of functions containing the
2808 function to access byte (index 0), word (index 1) and dword (index
2809 2). Functions can be omitted with a NULL function pointer. The
2810 registered functions may be modified dynamically later.
2811 If io_index is non zero, the corresponding io zone is
2812 modified. If it is zero, a new io zone is allocated. The return
2813 value can be used with cpu_register_physical_memory(). (-1) is
2814 returned if error. */
2815 int cpu_register_io_memory(int io_index,
2816 CPUReadMemoryFunc **mem_read,
2817 CPUWriteMemoryFunc **mem_write,
2818 void *opaque)
2820 int i, subwidth = 0;
2822 if (io_index <= 0) {
2823 io_index = get_free_io_mem_idx();
2824 if (io_index == -1)
2825 return io_index;
2826 } else {
2827 if (io_index >= IO_MEM_NB_ENTRIES)
2828 return -1;
2831 for(i = 0;i < 3; i++) {
2832 if (!mem_read[i] || !mem_write[i])
2833 subwidth = IO_MEM_SUBWIDTH;
2834 io_mem_read[io_index][i] = mem_read[i];
2835 io_mem_write[io_index][i] = mem_write[i];
2837 io_mem_opaque[io_index] = opaque;
2838 return (io_index << IO_MEM_SHIFT) | subwidth;
2841 void cpu_unregister_io_memory(int io_table_address)
2843 int i;
2844 int io_index = io_table_address >> IO_MEM_SHIFT;
2846 for (i=0;i < 3; i++) {
2847 io_mem_read[io_index][i] = unassigned_mem_read[i];
2848 io_mem_write[io_index][i] = unassigned_mem_write[i];
2850 io_mem_opaque[io_index] = NULL;
2851 io_mem_used[io_index] = 0;
2854 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2856 return io_mem_write[io_index >> IO_MEM_SHIFT];
2859 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2861 return io_mem_read[io_index >> IO_MEM_SHIFT];
2864 #endif /* !defined(CONFIG_USER_ONLY) */
2866 /* physical memory access (slow version, mainly for debug) */
2867 #if defined(CONFIG_USER_ONLY)
2868 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2869 int len, int is_write)
2871 int l, flags;
2872 target_ulong page;
2873 void * p;
2875 while (len > 0) {
2876 page = addr & TARGET_PAGE_MASK;
2877 l = (page + TARGET_PAGE_SIZE) - addr;
2878 if (l > len)
2879 l = len;
2880 flags = page_get_flags(page);
2881 if (!(flags & PAGE_VALID))
2882 return;
2883 if (is_write) {
2884 if (!(flags & PAGE_WRITE))
2885 return;
2886 /* XXX: this code should not depend on lock_user */
2887 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2888 /* FIXME - should this return an error rather than just fail? */
2889 return;
2890 memcpy(p, buf, l);
2891 unlock_user(p, addr, l);
2892 } else {
2893 if (!(flags & PAGE_READ))
2894 return;
2895 /* XXX: this code should not depend on lock_user */
2896 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2897 /* FIXME - should this return an error rather than just fail? */
2898 return;
2899 memcpy(buf, p, l);
2900 unlock_user(p, addr, 0);
2902 len -= l;
2903 buf += l;
2904 addr += l;
2908 #else
2909 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2910 int len, int is_write)
2912 int l, io_index;
2913 uint8_t *ptr;
2914 uint32_t val;
2915 target_phys_addr_t page;
2916 unsigned long pd;
2917 PhysPageDesc *p;
2919 while (len > 0) {
2920 page = addr & TARGET_PAGE_MASK;
2921 l = (page + TARGET_PAGE_SIZE) - addr;
2922 if (l > len)
2923 l = len;
2924 p = phys_page_find(page >> TARGET_PAGE_BITS);
2925 if (!p) {
2926 pd = IO_MEM_UNASSIGNED;
2927 } else {
2928 pd = p->phys_offset;
2931 if (is_write) {
2932 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2933 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2934 /* XXX: could force cpu_single_env to NULL to avoid
2935 potential bugs */
2936 if (l >= 4 && ((addr & 3) == 0)) {
2937 /* 32 bit write access */
2938 val = ldl_p(buf);
2939 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2940 l = 4;
2941 } else if (l >= 2 && ((addr & 1) == 0)) {
2942 /* 16 bit write access */
2943 val = lduw_p(buf);
2944 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2945 l = 2;
2946 } else {
2947 /* 8 bit write access */
2948 val = ldub_p(buf);
2949 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2950 l = 1;
2952 } else {
2953 unsigned long addr1;
2954 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2955 /* RAM case */
2956 ptr = phys_ram_base + addr1;
2957 memcpy(ptr, buf, l);
2958 if (!cpu_physical_memory_is_dirty(addr1)) {
2959 /* invalidate code */
2960 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2961 /* set dirty bit */
2962 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2963 (0xff & ~CODE_DIRTY_FLAG);
2965 /* qemu doesn't execute guest code directly, but kvm does
2966 therefore fluch instruction caches */
2967 if (kvm_enabled())
2968 flush_icache_range((unsigned long)ptr,
2969 ((unsigned long)ptr)+l);
2971 } else {
2972 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2973 !(pd & IO_MEM_ROMD)) {
2974 /* I/O case */
2975 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2976 if (l >= 4 && ((addr & 3) == 0)) {
2977 /* 32 bit read access */
2978 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2979 stl_p(buf, val);
2980 l = 4;
2981 } else if (l >= 2 && ((addr & 1) == 0)) {
2982 /* 16 bit read access */
2983 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2984 stw_p(buf, val);
2985 l = 2;
2986 } else {
2987 /* 8 bit read access */
2988 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2989 stb_p(buf, val);
2990 l = 1;
2992 } else {
2993 /* RAM case */
2994 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2995 (addr & ~TARGET_PAGE_MASK);
2996 memcpy(buf, ptr, l);
2999 len -= l;
3000 buf += l;
3001 addr += l;
3005 /* used for ROM loading : can write in RAM and ROM */
3006 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3007 const uint8_t *buf, int len)
3009 int l;
3010 uint8_t *ptr;
3011 target_phys_addr_t page;
3012 unsigned long pd;
3013 PhysPageDesc *p;
3015 while (len > 0) {
3016 page = addr & TARGET_PAGE_MASK;
3017 l = (page + TARGET_PAGE_SIZE) - addr;
3018 if (l > len)
3019 l = len;
3020 p = phys_page_find(page >> TARGET_PAGE_BITS);
3021 if (!p) {
3022 pd = IO_MEM_UNASSIGNED;
3023 } else {
3024 pd = p->phys_offset;
3027 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3028 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3029 !(pd & IO_MEM_ROMD)) {
3030 /* do nothing */
3031 } else {
3032 unsigned long addr1;
3033 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3034 /* ROM/RAM case */
3035 ptr = phys_ram_base + addr1;
3036 memcpy(ptr, buf, l);
3038 len -= l;
3039 buf += l;
3040 addr += l;
3045 /* warning: addr must be aligned */
3046 uint32_t ldl_phys(target_phys_addr_t addr)
3048 int io_index;
3049 uint8_t *ptr;
3050 uint32_t val;
3051 unsigned long pd;
3052 PhysPageDesc *p;
3054 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3055 if (!p) {
3056 pd = IO_MEM_UNASSIGNED;
3057 } else {
3058 pd = p->phys_offset;
3061 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3062 !(pd & IO_MEM_ROMD)) {
3063 /* I/O case */
3064 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3065 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3066 } else {
3067 /* RAM case */
3068 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3069 (addr & ~TARGET_PAGE_MASK);
3070 val = ldl_p(ptr);
3072 return val;
3075 /* warning: addr must be aligned */
3076 uint64_t ldq_phys(target_phys_addr_t addr)
3078 int io_index;
3079 uint8_t *ptr;
3080 uint64_t val;
3081 unsigned long pd;
3082 PhysPageDesc *p;
3084 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3085 if (!p) {
3086 pd = IO_MEM_UNASSIGNED;
3087 } else {
3088 pd = p->phys_offset;
3091 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3092 !(pd & IO_MEM_ROMD)) {
3093 /* I/O case */
3094 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3095 #ifdef TARGET_WORDS_BIGENDIAN
3096 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3097 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3098 #else
3099 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3100 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3101 #endif
3102 } else {
3103 /* RAM case */
3104 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3105 (addr & ~TARGET_PAGE_MASK);
3106 val = ldq_p(ptr);
3108 return val;
3111 /* XXX: optimize */
3112 uint32_t ldub_phys(target_phys_addr_t addr)
3114 uint8_t val;
3115 cpu_physical_memory_read(addr, &val, 1);
3116 return val;
3119 /* XXX: optimize */
3120 uint32_t lduw_phys(target_phys_addr_t addr)
3122 uint16_t val;
3123 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3124 return tswap16(val);
3127 #ifdef __GNUC__
3128 #define likely(x) __builtin_expect(!!(x), 1)
3129 #define unlikely(x) __builtin_expect(!!(x), 0)
3130 #else
3131 #define likely(x) x
3132 #define unlikely(x) x
3133 #endif
3135 /* warning: addr must be aligned. The ram page is not masked as dirty
3136 and the code inside is not invalidated. It is useful if the dirty
3137 bits are used to track modified PTEs */
3138 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3140 int io_index;
3141 uint8_t *ptr;
3142 unsigned long pd;
3143 PhysPageDesc *p;
3145 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3146 if (!p) {
3147 pd = IO_MEM_UNASSIGNED;
3148 } else {
3149 pd = p->phys_offset;
3152 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3153 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3154 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3155 } else {
3156 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3157 ptr = phys_ram_base + addr1;
3158 stl_p(ptr, val);
3160 if (unlikely(in_migration)) {
3161 if (!cpu_physical_memory_is_dirty(addr1)) {
3162 /* invalidate code */
3163 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3164 /* set dirty bit */
3165 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3166 (0xff & ~CODE_DIRTY_FLAG);
3172 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3174 int io_index;
3175 uint8_t *ptr;
3176 unsigned long pd;
3177 PhysPageDesc *p;
3179 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3180 if (!p) {
3181 pd = IO_MEM_UNASSIGNED;
3182 } else {
3183 pd = p->phys_offset;
3186 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3187 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3188 #ifdef TARGET_WORDS_BIGENDIAN
3189 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3190 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3191 #else
3192 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3193 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3194 #endif
3195 } else {
3196 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3197 (addr & ~TARGET_PAGE_MASK);
3198 stq_p(ptr, val);
3202 /* warning: addr must be aligned */
3203 void stl_phys(target_phys_addr_t addr, uint32_t val)
3205 int io_index;
3206 uint8_t *ptr;
3207 unsigned long pd;
3208 PhysPageDesc *p;
3210 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3211 if (!p) {
3212 pd = IO_MEM_UNASSIGNED;
3213 } else {
3214 pd = p->phys_offset;
3217 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3218 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3219 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3220 } else {
3221 unsigned long addr1;
3222 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3223 /* RAM case */
3224 ptr = phys_ram_base + addr1;
3225 stl_p(ptr, val);
3226 if (!cpu_physical_memory_is_dirty(addr1)) {
3227 /* invalidate code */
3228 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3229 /* set dirty bit */
3230 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3231 (0xff & ~CODE_DIRTY_FLAG);
3236 /* XXX: optimize */
3237 void stb_phys(target_phys_addr_t addr, uint32_t val)
3239 uint8_t v = val;
3240 cpu_physical_memory_write(addr, &v, 1);
3243 /* XXX: optimize */
3244 void stw_phys(target_phys_addr_t addr, uint32_t val)
3246 uint16_t v = tswap16(val);
3247 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3250 /* XXX: optimize */
3251 void stq_phys(target_phys_addr_t addr, uint64_t val)
3253 val = tswap64(val);
3254 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3257 #endif
3259 /* virtual memory access for debug */
3260 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3261 uint8_t *buf, int len, int is_write)
3263 int l;
3264 target_phys_addr_t phys_addr;
3265 target_ulong page;
3267 while (len > 0) {
3268 page = addr & TARGET_PAGE_MASK;
3269 phys_addr = cpu_get_phys_page_debug(env, page);
3270 /* if no physical page mapped, return an error */
3271 if (phys_addr == -1)
3272 return -1;
3273 l = (page + TARGET_PAGE_SIZE) - addr;
3274 if (l > len)
3275 l = len;
3276 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3277 buf, l, is_write);
3278 len -= l;
3279 buf += l;
3280 addr += l;
3282 return 0;
3285 /* in deterministic execution mode, instructions doing device I/Os
3286 must be at the end of the TB */
3287 void cpu_io_recompile(CPUState *env, void *retaddr)
3289 TranslationBlock *tb;
3290 uint32_t n, cflags;
3291 target_ulong pc, cs_base;
3292 uint64_t flags;
3294 tb = tb_find_pc((unsigned long)retaddr);
3295 if (!tb) {
3296 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3297 retaddr);
3299 n = env->icount_decr.u16.low + tb->icount;
3300 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3301 /* Calculate how many instructions had been executed before the fault
3302 occurred. */
3303 n = n - env->icount_decr.u16.low;
3304 /* Generate a new TB ending on the I/O insn. */
3305 n++;
3306 /* On MIPS and SH, delay slot instructions can only be restarted if
3307 they were already the first instruction in the TB. If this is not
3308 the first instruction in a TB then re-execute the preceding
3309 branch. */
3310 #if defined(TARGET_MIPS)
3311 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3312 env->active_tc.PC -= 4;
3313 env->icount_decr.u16.low++;
3314 env->hflags &= ~MIPS_HFLAG_BMASK;
3316 #elif defined(TARGET_SH4)
3317 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3318 && n > 1) {
3319 env->pc -= 2;
3320 env->icount_decr.u16.low++;
3321 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3323 #endif
3324 /* This should never happen. */
3325 if (n > CF_COUNT_MASK)
3326 cpu_abort(env, "TB too big during recompile");
3328 cflags = n | CF_LAST_IO;
3329 pc = tb->pc;
3330 cs_base = tb->cs_base;
3331 flags = tb->flags;
3332 tb_phys_invalidate(tb, -1);
3333 /* FIXME: In theory this could raise an exception. In practice
3334 we have already translated the block once so it's probably ok. */
3335 tb_gen_code(env, pc, cs_base, flags, cflags);
3336 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3337 the first in the TB) then we end up generating a whole new TB and
3338 repeating the fault, which is horribly inefficient.
3339 Better would be to execute just this insn uncached, or generate a
3340 second new TB. */
3341 cpu_resume_from_signal(env, NULL);
3344 void dump_exec_info(FILE *f,
3345 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3347 int i, target_code_size, max_target_code_size;
3348 int direct_jmp_count, direct_jmp2_count, cross_page;
3349 TranslationBlock *tb;
3351 target_code_size = 0;
3352 max_target_code_size = 0;
3353 cross_page = 0;
3354 direct_jmp_count = 0;
3355 direct_jmp2_count = 0;
3356 for(i = 0; i < nb_tbs; i++) {
3357 tb = &tbs[i];
3358 target_code_size += tb->size;
3359 if (tb->size > max_target_code_size)
3360 max_target_code_size = tb->size;
3361 if (tb->page_addr[1] != -1)
3362 cross_page++;
3363 if (tb->tb_next_offset[0] != 0xffff) {
3364 direct_jmp_count++;
3365 if (tb->tb_next_offset[1] != 0xffff) {
3366 direct_jmp2_count++;
3370 /* XXX: avoid using doubles ? */
3371 cpu_fprintf(f, "Translation buffer state:\n");
3372 cpu_fprintf(f, "gen code size %ld/%ld\n",
3373 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3374 cpu_fprintf(f, "TB count %d/%d\n",
3375 nb_tbs, code_gen_max_blocks);
3376 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3377 nb_tbs ? target_code_size / nb_tbs : 0,
3378 max_target_code_size);
3379 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3380 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3381 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3382 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3383 cross_page,
3384 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3385 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3386 direct_jmp_count,
3387 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3388 direct_jmp2_count,
3389 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3390 cpu_fprintf(f, "\nStatistics:\n");
3391 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3392 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3393 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3394 tcg_dump_info(f, cpu_fprintf);
3397 #if !defined(CONFIG_USER_ONLY)
3399 #define MMUSUFFIX _cmmu
3400 #define GETPC() NULL
3401 #define env cpu_single_env
3402 #define SOFTMMU_CODE_ACCESS
3404 #define SHIFT 0
3405 #include "softmmu_template.h"
3407 #define SHIFT 1
3408 #include "softmmu_template.h"
3410 #define SHIFT 2
3411 #include "softmmu_template.h"
3413 #define SHIFT 3
3414 #include "softmmu_template.h"
3416 #undef env
3418 #endif