Device-assignment: free device if hotplug fails
[qemu-kvm/fedora.git] / exec.c
blob8122d88fe856bb9d91ac047f647a2d89e6ce4ad8
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
41 #include "tcg.h"
42 #endif
43 #include "qemu-kvm.h"
45 #include "hw/hw.h"
46 #include "osdep.h"
47 #include "kvm.h"
48 #if defined(CONFIG_USER_ONLY)
49 #include <qemu.h>
50 #endif
52 //#define DEBUG_TB_INVALIDATE
53 //#define DEBUG_FLUSH
54 //#define DEBUG_TLB
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
66 #undef DEBUG_TB_CHECK
67 #endif
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #define MMAP_AREA_START 0x00000000
72 #define MMAP_AREA_END 0xa8000000
74 #if defined(TARGET_SPARC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 41
76 #elif defined(TARGET_SPARC)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #elif defined(TARGET_ALPHA)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #define TARGET_VIRT_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_PPC64)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 42
85 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
86 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 #elif defined(TARGET_IA64)
88 #define TARGET_PHYS_ADDR_SPACE_BITS 36
89 #else
90 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
91 #define TARGET_PHYS_ADDR_SPACE_BITS 32
92 #endif
94 static TranslationBlock *tbs;
95 int code_gen_max_blocks;
96 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
97 static int nb_tbs;
98 /* any access to the tbs or the page table must use this lock */
99 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
101 #if defined(__arm__) || defined(__sparc_v9__)
102 /* The prologue must be reachable with a direct jump. ARM and Sparc64
103 have limited branch ranges (possibly also PPC) so place it in a
104 section close to code segment. */
105 #define code_gen_section \
106 __attribute__((__section__(".gen_code"))) \
107 __attribute__((aligned (32)))
108 #else
109 #define code_gen_section \
110 __attribute__((aligned (32)))
111 #endif
113 uint8_t code_gen_prologue[1024] code_gen_section;
114 static uint8_t *code_gen_buffer;
115 static unsigned long code_gen_buffer_size;
116 /* threshold to flush the translated code buffer */
117 static unsigned long code_gen_buffer_max_size;
118 uint8_t *code_gen_ptr;
120 #if !defined(CONFIG_USER_ONLY)
121 ram_addr_t phys_ram_size;
122 int phys_ram_fd;
123 uint8_t *phys_ram_base;
124 uint8_t *phys_ram_dirty;
125 uint8_t *bios_mem;
126 static int in_migration;
127 static ram_addr_t phys_ram_alloc_offset = 0;
128 #endif
130 CPUState *first_cpu;
131 /* current CPU in the current thread. It is only valid inside
132 cpu_exec() */
133 CPUState *cpu_single_env;
134 /* 0 = Do not count executed instructions.
135 1 = Precise instruction counting.
136 2 = Adaptive rate instruction counting. */
137 int use_icount = 0;
138 /* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
140 int64_t qemu_icount;
142 typedef struct PageDesc {
143 /* list of TBs intersecting this ram page */
144 TranslationBlock *first_tb;
145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count;
148 uint8_t *code_bitmap;
149 #if defined(CONFIG_USER_ONLY)
150 unsigned long flags;
151 #endif
152 } PageDesc;
154 typedef struct PhysPageDesc {
155 /* offset in host memory of the page + io_index in the low bits */
156 ram_addr_t phys_offset;
157 } PhysPageDesc;
159 #define L2_BITS 10
160 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
161 /* XXX: this is a temporary hack for alpha target.
162 * In the future, this is to be replaced by a multi-level table
163 * to actually be able to handle the complete 64 bits address space.
165 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
166 #else
167 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
168 #endif
170 #define L1_SIZE (1 << L1_BITS)
171 #define L2_SIZE (1 << L2_BITS)
173 unsigned long qemu_real_host_page_size;
174 unsigned long qemu_host_page_bits;
175 unsigned long qemu_host_page_size;
176 unsigned long qemu_host_page_mask;
178 /* XXX: for system emulation, it could just be an array */
179 static PageDesc *l1_map[L1_SIZE];
180 static PhysPageDesc **l1_phys_map;
182 #if !defined(CONFIG_USER_ONLY)
183 static void io_mem_init(void);
185 /* io memory support */
186 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
187 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
188 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
189 char io_mem_used[IO_MEM_NB_ENTRIES];
190 static int io_mem_watch;
191 #endif
193 /* log support */
194 static const char *logfilename = "/tmp/qemu.log";
195 FILE *logfile;
196 int loglevel;
197 static int log_append = 0;
199 /* statistics */
200 static int tlb_flush_count;
201 static int tb_flush_count;
202 static int tb_phys_invalidate_count;
204 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
205 typedef struct subpage_t {
206 target_phys_addr_t base;
207 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
208 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
209 void *opaque[TARGET_PAGE_SIZE][2][4];
210 } subpage_t;
212 #ifdef _WIN32
213 static void map_exec(void *addr, long size)
215 DWORD old_protect;
216 VirtualProtect(addr, size,
217 PAGE_EXECUTE_READWRITE, &old_protect);
220 #else
221 static void map_exec(void *addr, long size)
223 unsigned long start, end, page_size;
225 page_size = getpagesize();
226 start = (unsigned long)addr;
227 start &= ~(page_size - 1);
229 end = (unsigned long)addr + size;
230 end += page_size - 1;
231 end &= ~(page_size - 1);
233 mprotect((void *)start, end - start,
234 PROT_READ | PROT_WRITE | PROT_EXEC);
236 #endif
238 static void page_init(void)
240 /* NOTE: we can always suppose that qemu_host_page_size >=
241 TARGET_PAGE_SIZE */
242 #ifdef _WIN32
244 SYSTEM_INFO system_info;
246 GetSystemInfo(&system_info);
247 qemu_real_host_page_size = system_info.dwPageSize;
249 #else
250 qemu_real_host_page_size = getpagesize();
251 #endif
252 if (qemu_host_page_size == 0)
253 qemu_host_page_size = qemu_real_host_page_size;
254 if (qemu_host_page_size < TARGET_PAGE_SIZE)
255 qemu_host_page_size = TARGET_PAGE_SIZE;
256 qemu_host_page_bits = 0;
257 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
258 qemu_host_page_bits++;
259 qemu_host_page_mask = ~(qemu_host_page_size - 1);
260 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
263 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
265 long long startaddr, endaddr;
266 FILE *f;
267 int n;
269 mmap_lock();
270 last_brk = (unsigned long)sbrk(0);
271 f = fopen("/proc/self/maps", "r");
272 if (f) {
273 do {
274 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
275 if (n == 2) {
276 startaddr = MIN(startaddr,
277 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278 endaddr = MIN(endaddr,
279 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280 page_set_flags(startaddr & TARGET_PAGE_MASK,
281 TARGET_PAGE_ALIGN(endaddr),
282 PAGE_RESERVED);
284 } while (!feof(f));
285 fclose(f);
287 mmap_unlock();
289 #endif
292 static inline PageDesc **page_l1_map(target_ulong index)
294 #if TARGET_LONG_BITS > 32
295 /* Host memory outside guest VM. For 32-bit targets we have already
296 excluded high addresses. */
297 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
298 return NULL;
299 #endif
300 return &l1_map[index >> L2_BITS];
303 static inline PageDesc *page_find_alloc(target_ulong index)
305 PageDesc **lp, *p;
306 lp = page_l1_map(index);
307 if (!lp)
308 return NULL;
310 p = *lp;
311 if (!p) {
312 /* allocate if not found */
313 #if defined(CONFIG_USER_ONLY)
314 unsigned long addr;
315 size_t len = sizeof(PageDesc) * L2_SIZE;
316 /* Don't use qemu_malloc because it may recurse. */
317 p = mmap(0, len, PROT_READ | PROT_WRITE,
318 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
319 *lp = p;
320 addr = h2g(p);
321 if (addr == (target_ulong)addr) {
322 page_set_flags(addr & TARGET_PAGE_MASK,
323 TARGET_PAGE_ALIGN(addr + len),
324 PAGE_RESERVED);
326 #else
327 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
328 *lp = p;
329 #endif
331 return p + (index & (L2_SIZE - 1));
334 static inline PageDesc *page_find(target_ulong index)
336 PageDesc **lp, *p;
337 lp = page_l1_map(index);
338 if (!lp)
339 return NULL;
341 p = *lp;
342 if (!p)
343 return 0;
344 return p + (index & (L2_SIZE - 1));
347 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
349 void **lp, **p;
350 PhysPageDesc *pd;
352 p = (void **)l1_phys_map;
353 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
355 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
356 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
357 #endif
358 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
359 p = *lp;
360 if (!p) {
361 /* allocate if not found */
362 if (!alloc)
363 return NULL;
364 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
365 memset(p, 0, sizeof(void *) * L1_SIZE);
366 *lp = p;
368 #endif
369 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
370 pd = *lp;
371 if (!pd) {
372 int i;
373 /* allocate if not found */
374 if (!alloc)
375 return NULL;
376 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
377 *lp = pd;
378 for (i = 0; i < L2_SIZE; i++)
379 pd[i].phys_offset = IO_MEM_UNASSIGNED;
381 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
384 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
386 return phys_page_find_alloc(index, 0);
389 #if !defined(CONFIG_USER_ONLY)
390 static void tlb_protect_code(ram_addr_t ram_addr);
391 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
392 target_ulong vaddr);
393 #define mmap_lock() do { } while(0)
394 #define mmap_unlock() do { } while(0)
395 #endif
397 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399 #if defined(CONFIG_USER_ONLY)
400 /* Currently it is not recommanded to allocate big chunks of data in
401 user mode. It will change when a dedicated libc will be used */
402 #define USE_STATIC_CODE_GEN_BUFFER
403 #endif
405 #ifdef USE_STATIC_CODE_GEN_BUFFER
406 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
407 #endif
409 static void code_gen_alloc(unsigned long tb_size)
411 if (kvm_enabled())
412 return;
414 #ifdef USE_STATIC_CODE_GEN_BUFFER
415 code_gen_buffer = static_code_gen_buffer;
416 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
417 map_exec(code_gen_buffer, code_gen_buffer_size);
418 #else
419 code_gen_buffer_size = tb_size;
420 if (code_gen_buffer_size == 0) {
421 #if defined(CONFIG_USER_ONLY)
422 /* in user mode, phys_ram_size is not meaningful */
423 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
424 #else
425 /* XXX: needs ajustments */
426 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
427 #endif
429 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
430 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
431 /* The code gen buffer location may have constraints depending on
432 the host cpu and OS */
433 #if defined(__linux__)
435 int flags;
436 void *start = NULL;
438 flags = MAP_PRIVATE | MAP_ANONYMOUS;
439 #if defined(__x86_64__)
440 flags |= MAP_32BIT;
441 /* Cannot map more than that */
442 if (code_gen_buffer_size > (800 * 1024 * 1024))
443 code_gen_buffer_size = (800 * 1024 * 1024);
444 #elif defined(__sparc_v9__)
445 // Map the buffer below 2G, so we can use direct calls and branches
446 flags |= MAP_FIXED;
447 start = (void *) 0x60000000UL;
448 if (code_gen_buffer_size > (512 * 1024 * 1024))
449 code_gen_buffer_size = (512 * 1024 * 1024);
450 #endif
451 code_gen_buffer = mmap(start, code_gen_buffer_size,
452 PROT_WRITE | PROT_READ | PROT_EXEC,
453 flags, -1, 0);
454 if (code_gen_buffer == MAP_FAILED) {
455 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
456 exit(1);
459 #elif defined(__FreeBSD__)
461 int flags;
462 void *addr = NULL;
463 flags = MAP_PRIVATE | MAP_ANONYMOUS;
464 #if defined(__x86_64__)
465 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
466 * 0x40000000 is free */
467 flags |= MAP_FIXED;
468 addr = (void *)0x40000000;
469 /* Cannot map more than that */
470 if (code_gen_buffer_size > (800 * 1024 * 1024))
471 code_gen_buffer_size = (800 * 1024 * 1024);
472 #endif
473 code_gen_buffer = mmap(addr, code_gen_buffer_size,
474 PROT_WRITE | PROT_READ | PROT_EXEC,
475 flags, -1, 0);
476 if (code_gen_buffer == MAP_FAILED) {
477 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
478 exit(1);
481 #else
482 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
483 if (!code_gen_buffer) {
484 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
485 exit(1);
487 map_exec(code_gen_buffer, code_gen_buffer_size);
488 #endif
489 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
490 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
491 code_gen_buffer_max_size = code_gen_buffer_size -
492 code_gen_max_block_size();
493 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
494 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
497 /* Must be called before using the QEMU cpus. 'tb_size' is the size
498 (in bytes) allocated to the translation buffer. Zero means default
499 size. */
500 void cpu_exec_init_all(unsigned long tb_size)
502 cpu_gen_init();
503 code_gen_alloc(tb_size);
504 code_gen_ptr = code_gen_buffer;
505 page_init();
506 #if !defined(CONFIG_USER_ONLY)
507 io_mem_init();
508 #endif
511 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
513 #define CPU_COMMON_SAVE_VERSION 1
515 static void cpu_common_save(QEMUFile *f, void *opaque)
517 CPUState *env = opaque;
519 qemu_put_be32s(f, &env->halted);
520 qemu_put_be32s(f, &env->interrupt_request);
523 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
525 CPUState *env = opaque;
527 if (version_id != CPU_COMMON_SAVE_VERSION)
528 return -EINVAL;
530 qemu_get_be32s(f, &env->halted);
531 qemu_get_be32s(f, &env->interrupt_request);
532 tlb_flush(env, 1);
534 return 0;
536 #endif
538 void cpu_exec_init(CPUState *env)
540 CPUState **penv;
541 int cpu_index;
543 env->next_cpu = NULL;
544 penv = &first_cpu;
545 cpu_index = 0;
546 while (*penv != NULL) {
547 penv = (CPUState **)&(*penv)->next_cpu;
548 cpu_index++;
550 env->cpu_index = cpu_index;
551 #ifdef __WIN32
552 env->thread_id = GetCurrentProcessId();
553 #else
554 env->thread_id = getpid();
555 #endif
556 *penv = env;
557 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
558 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
559 cpu_common_save, cpu_common_load, env);
560 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
561 cpu_save, cpu_load, env);
562 #endif
565 static inline void invalidate_page_bitmap(PageDesc *p)
567 if (p->code_bitmap) {
568 qemu_free(p->code_bitmap);
569 p->code_bitmap = NULL;
571 p->code_write_count = 0;
574 /* set to NULL all the 'first_tb' fields in all PageDescs */
575 static void page_flush_tb(void)
577 int i, j;
578 PageDesc *p;
580 for(i = 0; i < L1_SIZE; i++) {
581 p = l1_map[i];
582 if (p) {
583 for(j = 0; j < L2_SIZE; j++) {
584 p->first_tb = NULL;
585 invalidate_page_bitmap(p);
586 p++;
592 /* flush all the translation blocks */
593 /* XXX: tb_flush is currently not thread safe */
594 void tb_flush(CPUState *env1)
596 CPUState *env;
597 #if defined(DEBUG_FLUSH)
598 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
599 (unsigned long)(code_gen_ptr - code_gen_buffer),
600 nb_tbs, nb_tbs > 0 ?
601 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
602 #endif
603 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
604 cpu_abort(env1, "Internal error: code buffer overflow\n");
606 nb_tbs = 0;
608 for(env = first_cpu; env != NULL; env = env->next_cpu) {
609 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
612 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
613 page_flush_tb();
615 code_gen_ptr = code_gen_buffer;
616 /* XXX: flush processor icache at this point if cache flush is
617 expensive */
618 tb_flush_count++;
621 #ifdef DEBUG_TB_CHECK
623 static void tb_invalidate_check(target_ulong address)
625 TranslationBlock *tb;
626 int i;
627 address &= TARGET_PAGE_MASK;
628 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
629 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
630 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
631 address >= tb->pc + tb->size)) {
632 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
633 address, (long)tb->pc, tb->size);
639 /* verify that all the pages have correct rights for code */
640 static void tb_page_check(void)
642 TranslationBlock *tb;
643 int i, flags1, flags2;
645 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
646 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
647 flags1 = page_get_flags(tb->pc);
648 flags2 = page_get_flags(tb->pc + tb->size - 1);
649 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
650 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
651 (long)tb->pc, tb->size, flags1, flags2);
657 static void tb_jmp_check(TranslationBlock *tb)
659 TranslationBlock *tb1;
660 unsigned int n1;
662 /* suppress any remaining jumps to this TB */
663 tb1 = tb->jmp_first;
664 for(;;) {
665 n1 = (long)tb1 & 3;
666 tb1 = (TranslationBlock *)((long)tb1 & ~3);
667 if (n1 == 2)
668 break;
669 tb1 = tb1->jmp_next[n1];
671 /* check end of list */
672 if (tb1 != tb) {
673 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
677 #endif
679 /* invalidate one TB */
680 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
681 int next_offset)
683 TranslationBlock *tb1;
684 for(;;) {
685 tb1 = *ptb;
686 if (tb1 == tb) {
687 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
688 break;
690 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
694 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
696 TranslationBlock *tb1;
697 unsigned int n1;
699 for(;;) {
700 tb1 = *ptb;
701 n1 = (long)tb1 & 3;
702 tb1 = (TranslationBlock *)((long)tb1 & ~3);
703 if (tb1 == tb) {
704 *ptb = tb1->page_next[n1];
705 break;
707 ptb = &tb1->page_next[n1];
711 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
713 TranslationBlock *tb1, **ptb;
714 unsigned int n1;
716 ptb = &tb->jmp_next[n];
717 tb1 = *ptb;
718 if (tb1) {
719 /* find tb(n) in circular list */
720 for(;;) {
721 tb1 = *ptb;
722 n1 = (long)tb1 & 3;
723 tb1 = (TranslationBlock *)((long)tb1 & ~3);
724 if (n1 == n && tb1 == tb)
725 break;
726 if (n1 == 2) {
727 ptb = &tb1->jmp_first;
728 } else {
729 ptb = &tb1->jmp_next[n1];
732 /* now we can suppress tb(n) from the list */
733 *ptb = tb->jmp_next[n];
735 tb->jmp_next[n] = NULL;
739 /* reset the jump entry 'n' of a TB so that it is not chained to
740 another TB */
741 static inline void tb_reset_jump(TranslationBlock *tb, int n)
743 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
746 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
748 CPUState *env;
749 PageDesc *p;
750 unsigned int h, n1;
751 target_phys_addr_t phys_pc;
752 TranslationBlock *tb1, *tb2;
754 /* remove the TB from the hash list */
755 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
756 h = tb_phys_hash_func(phys_pc);
757 tb_remove(&tb_phys_hash[h], tb,
758 offsetof(TranslationBlock, phys_hash_next));
760 /* remove the TB from the page list */
761 if (tb->page_addr[0] != page_addr) {
762 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
763 tb_page_remove(&p->first_tb, tb);
764 invalidate_page_bitmap(p);
766 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
767 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
768 tb_page_remove(&p->first_tb, tb);
769 invalidate_page_bitmap(p);
772 tb_invalidated_flag = 1;
774 /* remove the TB from the hash list */
775 h = tb_jmp_cache_hash_func(tb->pc);
776 for(env = first_cpu; env != NULL; env = env->next_cpu) {
777 if (env->tb_jmp_cache[h] == tb)
778 env->tb_jmp_cache[h] = NULL;
781 /* suppress this TB from the two jump lists */
782 tb_jmp_remove(tb, 0);
783 tb_jmp_remove(tb, 1);
785 /* suppress any remaining jumps to this TB */
786 tb1 = tb->jmp_first;
787 for(;;) {
788 n1 = (long)tb1 & 3;
789 if (n1 == 2)
790 break;
791 tb1 = (TranslationBlock *)((long)tb1 & ~3);
792 tb2 = tb1->jmp_next[n1];
793 tb_reset_jump(tb1, n1);
794 tb1->jmp_next[n1] = NULL;
795 tb1 = tb2;
797 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
799 tb_phys_invalidate_count++;
802 static inline void set_bits(uint8_t *tab, int start, int len)
804 int end, mask, end1;
806 end = start + len;
807 tab += start >> 3;
808 mask = 0xff << (start & 7);
809 if ((start & ~7) == (end & ~7)) {
810 if (start < end) {
811 mask &= ~(0xff << (end & 7));
812 *tab |= mask;
814 } else {
815 *tab++ |= mask;
816 start = (start + 8) & ~7;
817 end1 = end & ~7;
818 while (start < end1) {
819 *tab++ = 0xff;
820 start += 8;
822 if (start < end) {
823 mask = ~(0xff << (end & 7));
824 *tab |= mask;
829 static void build_page_bitmap(PageDesc *p)
831 int n, tb_start, tb_end;
832 TranslationBlock *tb;
834 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
835 if (!p->code_bitmap)
836 return;
838 tb = p->first_tb;
839 while (tb != NULL) {
840 n = (long)tb & 3;
841 tb = (TranslationBlock *)((long)tb & ~3);
842 /* NOTE: this is subtle as a TB may span two physical pages */
843 if (n == 0) {
844 /* NOTE: tb_end may be after the end of the page, but
845 it is not a problem */
846 tb_start = tb->pc & ~TARGET_PAGE_MASK;
847 tb_end = tb_start + tb->size;
848 if (tb_end > TARGET_PAGE_SIZE)
849 tb_end = TARGET_PAGE_SIZE;
850 } else {
851 tb_start = 0;
852 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
854 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
855 tb = tb->page_next[n];
859 TranslationBlock *tb_gen_code(CPUState *env,
860 target_ulong pc, target_ulong cs_base,
861 int flags, int cflags)
863 TranslationBlock *tb;
864 uint8_t *tc_ptr;
865 target_ulong phys_pc, phys_page2, virt_page2;
866 int code_gen_size;
868 phys_pc = get_phys_addr_code(env, pc);
869 tb = tb_alloc(pc);
870 if (!tb) {
871 /* flush must be done */
872 tb_flush(env);
873 /* cannot fail at this point */
874 tb = tb_alloc(pc);
875 /* Don't forget to invalidate previous TB info. */
876 tb_invalidated_flag = 1;
878 tc_ptr = code_gen_ptr;
879 tb->tc_ptr = tc_ptr;
880 tb->cs_base = cs_base;
881 tb->flags = flags;
882 tb->cflags = cflags;
883 cpu_gen_code(env, tb, &code_gen_size);
884 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
886 /* check next page if needed */
887 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
888 phys_page2 = -1;
889 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
890 phys_page2 = get_phys_addr_code(env, virt_page2);
892 tb_link_phys(tb, phys_pc, phys_page2);
893 return tb;
896 /* invalidate all TBs which intersect with the target physical page
897 starting in range [start;end[. NOTE: start and end must refer to
898 the same physical page. 'is_cpu_write_access' should be true if called
899 from a real cpu write access: the virtual CPU will exit the current
900 TB if code is modified inside this TB. */
901 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
902 int is_cpu_write_access)
904 TranslationBlock *tb, *tb_next, *saved_tb;
905 CPUState *env = cpu_single_env;
906 target_ulong tb_start, tb_end;
907 PageDesc *p;
908 int n;
909 #ifdef TARGET_HAS_PRECISE_SMC
910 int current_tb_not_found = is_cpu_write_access;
911 TranslationBlock *current_tb = NULL;
912 int current_tb_modified = 0;
913 target_ulong current_pc = 0;
914 target_ulong current_cs_base = 0;
915 int current_flags = 0;
916 #endif /* TARGET_HAS_PRECISE_SMC */
918 p = page_find(start >> TARGET_PAGE_BITS);
919 if (!p)
920 return;
921 if (!p->code_bitmap &&
922 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
923 is_cpu_write_access) {
924 /* build code bitmap */
925 build_page_bitmap(p);
928 /* we remove all the TBs in the range [start, end[ */
929 /* XXX: see if in some cases it could be faster to invalidate all the code */
930 tb = p->first_tb;
931 while (tb != NULL) {
932 n = (long)tb & 3;
933 tb = (TranslationBlock *)((long)tb & ~3);
934 tb_next = tb->page_next[n];
935 /* NOTE: this is subtle as a TB may span two physical pages */
936 if (n == 0) {
937 /* NOTE: tb_end may be after the end of the page, but
938 it is not a problem */
939 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
940 tb_end = tb_start + tb->size;
941 } else {
942 tb_start = tb->page_addr[1];
943 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
945 if (!(tb_end <= start || tb_start >= end)) {
946 #ifdef TARGET_HAS_PRECISE_SMC
947 if (current_tb_not_found) {
948 current_tb_not_found = 0;
949 current_tb = NULL;
950 if (env->mem_io_pc) {
951 /* now we have a real cpu fault */
952 current_tb = tb_find_pc(env->mem_io_pc);
955 if (current_tb == tb &&
956 (current_tb->cflags & CF_COUNT_MASK) != 1) {
957 /* If we are modifying the current TB, we must stop
958 its execution. We could be more precise by checking
959 that the modification is after the current PC, but it
960 would require a specialized function to partially
961 restore the CPU state */
963 current_tb_modified = 1;
964 cpu_restore_state(current_tb, env,
965 env->mem_io_pc, NULL);
966 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
967 &current_flags);
969 #endif /* TARGET_HAS_PRECISE_SMC */
970 /* we need to do that to handle the case where a signal
971 occurs while doing tb_phys_invalidate() */
972 saved_tb = NULL;
973 if (env) {
974 saved_tb = env->current_tb;
975 env->current_tb = NULL;
977 tb_phys_invalidate(tb, -1);
978 if (env) {
979 env->current_tb = saved_tb;
980 if (env->interrupt_request && env->current_tb)
981 cpu_interrupt(env, env->interrupt_request);
984 tb = tb_next;
986 #if !defined(CONFIG_USER_ONLY)
987 /* if no code remaining, no need to continue to use slow writes */
988 if (!p->first_tb) {
989 invalidate_page_bitmap(p);
990 if (is_cpu_write_access) {
991 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
994 #endif
995 #ifdef TARGET_HAS_PRECISE_SMC
996 if (current_tb_modified) {
997 /* we generate a block containing just the instruction
998 modifying the memory. It will ensure that it cannot modify
999 itself */
1000 env->current_tb = NULL;
1001 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1002 cpu_resume_from_signal(env, NULL);
1004 #endif
1007 /* len must be <= 8 and start must be a multiple of len */
1008 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1010 PageDesc *p;
1011 int offset, b;
1012 #if 0
1013 if (1) {
1014 if (loglevel) {
1015 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1016 cpu_single_env->mem_io_vaddr, len,
1017 cpu_single_env->eip,
1018 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1021 #endif
1022 p = page_find(start >> TARGET_PAGE_BITS);
1023 if (!p)
1024 return;
1025 if (p->code_bitmap) {
1026 offset = start & ~TARGET_PAGE_MASK;
1027 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1028 if (b & ((1 << len) - 1))
1029 goto do_invalidate;
1030 } else {
1031 do_invalidate:
1032 tb_invalidate_phys_page_range(start, start + len, 1);
1036 #if !defined(CONFIG_SOFTMMU)
1037 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1038 unsigned long pc, void *puc)
1040 TranslationBlock *tb;
1041 PageDesc *p;
1042 int n;
1043 #ifdef TARGET_HAS_PRECISE_SMC
1044 TranslationBlock *current_tb = NULL;
1045 CPUState *env = cpu_single_env;
1046 int current_tb_modified = 0;
1047 target_ulong current_pc = 0;
1048 target_ulong current_cs_base = 0;
1049 int current_flags = 0;
1050 #endif
1052 addr &= TARGET_PAGE_MASK;
1053 p = page_find(addr >> TARGET_PAGE_BITS);
1054 if (!p)
1055 return;
1056 tb = p->first_tb;
1057 #ifdef TARGET_HAS_PRECISE_SMC
1058 if (tb && pc != 0) {
1059 current_tb = tb_find_pc(pc);
1061 #endif
1062 while (tb != NULL) {
1063 n = (long)tb & 3;
1064 tb = (TranslationBlock *)((long)tb & ~3);
1065 #ifdef TARGET_HAS_PRECISE_SMC
1066 if (current_tb == tb &&
1067 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1068 /* If we are modifying the current TB, we must stop
1069 its execution. We could be more precise by checking
1070 that the modification is after the current PC, but it
1071 would require a specialized function to partially
1072 restore the CPU state */
1074 current_tb_modified = 1;
1075 cpu_restore_state(current_tb, env, pc, puc);
1076 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1077 &current_flags);
1079 #endif /* TARGET_HAS_PRECISE_SMC */
1080 tb_phys_invalidate(tb, addr);
1081 tb = tb->page_next[n];
1083 p->first_tb = NULL;
1084 #ifdef TARGET_HAS_PRECISE_SMC
1085 if (current_tb_modified) {
1086 /* we generate a block containing just the instruction
1087 modifying the memory. It will ensure that it cannot modify
1088 itself */
1089 env->current_tb = NULL;
1090 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1091 cpu_resume_from_signal(env, puc);
1093 #endif
1095 #endif
1097 /* add the tb in the target page and protect it if necessary */
1098 static inline void tb_alloc_page(TranslationBlock *tb,
1099 unsigned int n, target_ulong page_addr)
1101 PageDesc *p;
1102 TranslationBlock *last_first_tb;
1104 tb->page_addr[n] = page_addr;
1105 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1106 tb->page_next[n] = p->first_tb;
1107 last_first_tb = p->first_tb;
1108 p->first_tb = (TranslationBlock *)((long)tb | n);
1109 invalidate_page_bitmap(p);
1111 #if defined(TARGET_HAS_SMC) || 1
1113 #if defined(CONFIG_USER_ONLY)
1114 if (p->flags & PAGE_WRITE) {
1115 target_ulong addr;
1116 PageDesc *p2;
1117 int prot;
1119 /* force the host page as non writable (writes will have a
1120 page fault + mprotect overhead) */
1121 page_addr &= qemu_host_page_mask;
1122 prot = 0;
1123 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1124 addr += TARGET_PAGE_SIZE) {
1126 p2 = page_find (addr >> TARGET_PAGE_BITS);
1127 if (!p2)
1128 continue;
1129 prot |= p2->flags;
1130 p2->flags &= ~PAGE_WRITE;
1131 page_get_flags(addr);
1133 mprotect(g2h(page_addr), qemu_host_page_size,
1134 (prot & PAGE_BITS) & ~PAGE_WRITE);
1135 #ifdef DEBUG_TB_INVALIDATE
1136 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1137 page_addr);
1138 #endif
1140 #else
1141 /* if some code is already present, then the pages are already
1142 protected. So we handle the case where only the first TB is
1143 allocated in a physical page */
1144 if (!last_first_tb) {
1145 tlb_protect_code(page_addr);
1147 #endif
1149 #endif /* TARGET_HAS_SMC */
1152 /* Allocate a new translation block. Flush the translation buffer if
1153 too many translation blocks or too much generated code. */
1154 TranslationBlock *tb_alloc(target_ulong pc)
1156 TranslationBlock *tb;
1158 if (nb_tbs >= code_gen_max_blocks ||
1159 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1160 return NULL;
1161 tb = &tbs[nb_tbs++];
1162 tb->pc = pc;
1163 tb->cflags = 0;
1164 return tb;
1167 void tb_free(TranslationBlock *tb)
1169 /* In practice this is mostly used for single use temporary TB
1170 Ignore the hard cases and just back up if this TB happens to
1171 be the last one generated. */
1172 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1173 code_gen_ptr = tb->tc_ptr;
1174 nb_tbs--;
1178 /* add a new TB and link it to the physical page tables. phys_page2 is
1179 (-1) to indicate that only one page contains the TB. */
1180 void tb_link_phys(TranslationBlock *tb,
1181 target_ulong phys_pc, target_ulong phys_page2)
1183 unsigned int h;
1184 TranslationBlock **ptb;
1186 /* Grab the mmap lock to stop another thread invalidating this TB
1187 before we are done. */
1188 mmap_lock();
1189 /* add in the physical hash table */
1190 h = tb_phys_hash_func(phys_pc);
1191 ptb = &tb_phys_hash[h];
1192 tb->phys_hash_next = *ptb;
1193 *ptb = tb;
1195 /* add in the page list */
1196 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1197 if (phys_page2 != -1)
1198 tb_alloc_page(tb, 1, phys_page2);
1199 else
1200 tb->page_addr[1] = -1;
1202 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1203 tb->jmp_next[0] = NULL;
1204 tb->jmp_next[1] = NULL;
1206 /* init original jump addresses */
1207 if (tb->tb_next_offset[0] != 0xffff)
1208 tb_reset_jump(tb, 0);
1209 if (tb->tb_next_offset[1] != 0xffff)
1210 tb_reset_jump(tb, 1);
1212 #ifdef DEBUG_TB_CHECK
1213 tb_page_check();
1214 #endif
1215 mmap_unlock();
1218 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1219 tb[1].tc_ptr. Return NULL if not found */
1220 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1222 int m_min, m_max, m;
1223 unsigned long v;
1224 TranslationBlock *tb;
1226 if (nb_tbs <= 0)
1227 return NULL;
1228 if (tc_ptr < (unsigned long)code_gen_buffer ||
1229 tc_ptr >= (unsigned long)code_gen_ptr)
1230 return NULL;
1231 /* binary search (cf Knuth) */
1232 m_min = 0;
1233 m_max = nb_tbs - 1;
1234 while (m_min <= m_max) {
1235 m = (m_min + m_max) >> 1;
1236 tb = &tbs[m];
1237 v = (unsigned long)tb->tc_ptr;
1238 if (v == tc_ptr)
1239 return tb;
1240 else if (tc_ptr < v) {
1241 m_max = m - 1;
1242 } else {
1243 m_min = m + 1;
1246 return &tbs[m_max];
1249 static void tb_reset_jump_recursive(TranslationBlock *tb);
1251 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1253 TranslationBlock *tb1, *tb_next, **ptb;
1254 unsigned int n1;
1256 tb1 = tb->jmp_next[n];
1257 if (tb1 != NULL) {
1258 /* find head of list */
1259 for(;;) {
1260 n1 = (long)tb1 & 3;
1261 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1262 if (n1 == 2)
1263 break;
1264 tb1 = tb1->jmp_next[n1];
1266 /* we are now sure now that tb jumps to tb1 */
1267 tb_next = tb1;
1269 /* remove tb from the jmp_first list */
1270 ptb = &tb_next->jmp_first;
1271 for(;;) {
1272 tb1 = *ptb;
1273 n1 = (long)tb1 & 3;
1274 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1275 if (n1 == n && tb1 == tb)
1276 break;
1277 ptb = &tb1->jmp_next[n1];
1279 *ptb = tb->jmp_next[n];
1280 tb->jmp_next[n] = NULL;
1282 /* suppress the jump to next tb in generated code */
1283 tb_reset_jump(tb, n);
1285 /* suppress jumps in the tb on which we could have jumped */
1286 tb_reset_jump_recursive(tb_next);
1290 static void tb_reset_jump_recursive(TranslationBlock *tb)
1292 tb_reset_jump_recursive2(tb, 0);
1293 tb_reset_jump_recursive2(tb, 1);
1296 #if defined(TARGET_HAS_ICE)
1297 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1299 target_phys_addr_t addr;
1300 target_ulong pd;
1301 ram_addr_t ram_addr;
1302 PhysPageDesc *p;
1304 addr = cpu_get_phys_page_debug(env, pc);
1305 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1306 if (!p) {
1307 pd = IO_MEM_UNASSIGNED;
1308 } else {
1309 pd = p->phys_offset;
1311 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1312 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1314 #endif
1316 /* Add a watchpoint. */
1317 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1318 int flags, CPUWatchpoint **watchpoint)
1320 target_ulong len_mask = ~(len - 1);
1321 CPUWatchpoint *wp, *prev_wp;
1323 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1324 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1325 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1326 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1327 return -EINVAL;
1329 wp = qemu_malloc(sizeof(*wp));
1330 if (!wp)
1331 return -ENOMEM;
1333 wp->vaddr = addr;
1334 wp->len_mask = len_mask;
1335 wp->flags = flags;
1337 /* keep all GDB-injected watchpoints in front */
1338 if (!(flags & BP_GDB) && env->watchpoints) {
1339 prev_wp = env->watchpoints;
1340 while (prev_wp->next != NULL && (prev_wp->next->flags & BP_GDB))
1341 prev_wp = prev_wp->next;
1342 } else {
1343 prev_wp = NULL;
1346 /* Insert new watchpoint */
1347 if (prev_wp) {
1348 wp->next = prev_wp->next;
1349 prev_wp->next = wp;
1350 } else {
1351 wp->next = env->watchpoints;
1352 env->watchpoints = wp;
1354 if (wp->next)
1355 wp->next->prev = wp;
1356 wp->prev = prev_wp;
1358 tlb_flush_page(env, addr);
1360 if (watchpoint)
1361 *watchpoint = wp;
1362 return 0;
1365 /* Remove a specific watchpoint. */
1366 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1367 int flags)
1369 target_ulong len_mask = ~(len - 1);
1370 CPUWatchpoint *wp;
1372 for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
1373 if (addr == wp->vaddr && len_mask == wp->len_mask
1374 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1375 cpu_watchpoint_remove_by_ref(env, wp);
1376 return 0;
1379 return -ENOENT;
1382 /* Remove a specific watchpoint by reference. */
1383 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1385 if (watchpoint->next)
1386 watchpoint->next->prev = watchpoint->prev;
1387 if (watchpoint->prev)
1388 watchpoint->prev->next = watchpoint->next;
1389 else
1390 env->watchpoints = watchpoint->next;
1392 tlb_flush_page(env, watchpoint->vaddr);
1394 qemu_free(watchpoint);
1397 /* Remove all matching watchpoints. */
1398 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1400 CPUWatchpoint *wp;
1402 for (wp = env->watchpoints; wp != NULL; wp = wp->next)
1403 if (wp->flags & mask)
1404 cpu_watchpoint_remove_by_ref(env, wp);
1407 /* Add a breakpoint. */
1408 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1409 CPUBreakpoint **breakpoint)
1411 #if defined(TARGET_HAS_ICE)
1412 CPUBreakpoint *bp, *prev_bp;
1414 bp = qemu_malloc(sizeof(*bp));
1415 if (!bp)
1416 return -ENOMEM;
1418 bp->pc = pc;
1419 bp->flags = flags;
1421 /* keep all GDB-injected breakpoints in front */
1422 if (!(flags & BP_GDB) && env->breakpoints) {
1423 prev_bp = env->breakpoints;
1424 while (prev_bp->next != NULL && (prev_bp->next->flags & BP_GDB))
1425 prev_bp = prev_bp->next;
1426 } else {
1427 prev_bp = NULL;
1430 /* Insert new breakpoint */
1431 if (prev_bp) {
1432 bp->next = prev_bp->next;
1433 prev_bp->next = bp;
1434 } else {
1435 bp->next = env->breakpoints;
1436 env->breakpoints = bp;
1438 if (bp->next)
1439 bp->next->prev = bp;
1440 bp->prev = prev_bp;
1442 if (kvm_enabled())
1443 kvm_update_debugger(env);
1445 breakpoint_invalidate(env, pc);
1447 if (breakpoint)
1448 *breakpoint = bp;
1449 return 0;
1450 #else
1451 return -ENOSYS;
1452 #endif
1455 /* Remove a specific breakpoint. */
1456 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1458 #if defined(TARGET_HAS_ICE)
1459 CPUBreakpoint *bp;
1461 for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
1462 if (bp->pc == pc && bp->flags == flags) {
1463 cpu_breakpoint_remove_by_ref(env, bp);
1464 return 0;
1467 return -ENOENT;
1468 #else
1469 return -ENOSYS;
1470 #endif
1473 /* Remove a specific breakpoint by reference. */
1474 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1476 #if defined(TARGET_HAS_ICE)
1477 if (breakpoint->next)
1478 breakpoint->next->prev = breakpoint->prev;
1479 if (breakpoint->prev)
1480 breakpoint->prev->next = breakpoint->next;
1481 else
1482 env->breakpoints = breakpoint->next;
1484 if (kvm_enabled())
1485 kvm_update_debugger(env);
1487 breakpoint_invalidate(env, breakpoint->pc);
1489 qemu_free(breakpoint);
1490 #endif
1493 /* Remove all matching breakpoints. */
1494 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1496 #if defined(TARGET_HAS_ICE)
1497 CPUBreakpoint *bp;
1499 for (bp = env->breakpoints; bp != NULL; bp = bp->next)
1500 if (bp->flags & mask)
1501 cpu_breakpoint_remove_by_ref(env, bp);
1502 #endif
1505 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1506 CPU loop after each instruction */
1507 void cpu_single_step(CPUState *env, int enabled)
1509 #if defined(TARGET_HAS_ICE)
1510 if (env->singlestep_enabled != enabled) {
1511 env->singlestep_enabled = enabled;
1512 /* must flush all the translated code to avoid inconsistancies */
1513 /* XXX: only flush what is necessary */
1514 tb_flush(env);
1516 if (kvm_enabled())
1517 kvm_update_debugger(env);
1518 #endif
1521 /* enable or disable low levels log */
1522 void cpu_set_log(int log_flags)
1524 loglevel = log_flags;
1525 if (loglevel && !logfile) {
1526 logfile = fopen(logfilename, log_append ? "a" : "w");
1527 if (!logfile) {
1528 perror(logfilename);
1529 _exit(1);
1531 #if !defined(CONFIG_SOFTMMU)
1532 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1534 static char logfile_buf[4096];
1535 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1537 #else
1538 setvbuf(logfile, NULL, _IOLBF, 0);
1539 #endif
1540 log_append = 1;
1542 if (!loglevel && logfile) {
1543 fclose(logfile);
1544 logfile = NULL;
1548 void cpu_set_log_filename(const char *filename)
1550 logfilename = strdup(filename);
1551 if (logfile) {
1552 fclose(logfile);
1553 logfile = NULL;
1555 cpu_set_log(loglevel);
1558 /* mask must never be zero, except for A20 change call */
1559 void cpu_interrupt(CPUState *env, int mask)
1561 #if !defined(USE_NPTL)
1562 TranslationBlock *tb;
1563 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1564 #endif
1565 int old_mask;
1567 old_mask = env->interrupt_request;
1568 /* FIXME: This is probably not threadsafe. A different thread could
1569 be in the middle of a read-modify-write operation. */
1570 env->interrupt_request |= mask;
1571 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1572 kvm_update_interrupt_request(env);
1573 #if defined(USE_NPTL)
1574 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1575 problem and hope the cpu will stop of its own accord. For userspace
1576 emulation this often isn't actually as bad as it sounds. Often
1577 signals are used primarily to interrupt blocking syscalls. */
1578 #else
1579 if (use_icount) {
1580 env->icount_decr.u16.high = 0xffff;
1581 #ifndef CONFIG_USER_ONLY
1582 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1583 an async event happened and we need to process it. */
1584 if (!can_do_io(env)
1585 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1586 cpu_abort(env, "Raised interrupt while not in I/O function");
1588 #endif
1589 } else {
1590 tb = env->current_tb;
1591 /* if the cpu is currently executing code, we must unlink it and
1592 all the potentially executing TB */
1593 if (tb && !testandset(&interrupt_lock)) {
1594 env->current_tb = NULL;
1595 tb_reset_jump_recursive(tb);
1596 resetlock(&interrupt_lock);
1599 #endif
1602 void cpu_reset_interrupt(CPUState *env, int mask)
1604 env->interrupt_request &= ~mask;
1607 const CPULogItem cpu_log_items[] = {
1608 { CPU_LOG_TB_OUT_ASM, "out_asm",
1609 "show generated host assembly code for each compiled TB" },
1610 { CPU_LOG_TB_IN_ASM, "in_asm",
1611 "show target assembly code for each compiled TB" },
1612 { CPU_LOG_TB_OP, "op",
1613 "show micro ops for each compiled TB" },
1614 { CPU_LOG_TB_OP_OPT, "op_opt",
1615 "show micro ops "
1616 #ifdef TARGET_I386
1617 "before eflags optimization and "
1618 #endif
1619 "after liveness analysis" },
1620 { CPU_LOG_INT, "int",
1621 "show interrupts/exceptions in short format" },
1622 { CPU_LOG_EXEC, "exec",
1623 "show trace before each executed TB (lots of logs)" },
1624 { CPU_LOG_TB_CPU, "cpu",
1625 "show CPU state before block translation" },
1626 #ifdef TARGET_I386
1627 { CPU_LOG_PCALL, "pcall",
1628 "show protected mode far calls/returns/exceptions" },
1629 #endif
1630 #ifdef DEBUG_IOPORT
1631 { CPU_LOG_IOPORT, "ioport",
1632 "show all i/o ports accesses" },
1633 #endif
1634 { 0, NULL, NULL },
1637 static int cmp1(const char *s1, int n, const char *s2)
1639 if (strlen(s2) != n)
1640 return 0;
1641 return memcmp(s1, s2, n) == 0;
1644 /* takes a comma separated list of log masks. Return 0 if error. */
1645 int cpu_str_to_log_mask(const char *str)
1647 const CPULogItem *item;
1648 int mask;
1649 const char *p, *p1;
1651 p = str;
1652 mask = 0;
1653 for(;;) {
1654 p1 = strchr(p, ',');
1655 if (!p1)
1656 p1 = p + strlen(p);
1657 if(cmp1(p,p1-p,"all")) {
1658 for(item = cpu_log_items; item->mask != 0; item++) {
1659 mask |= item->mask;
1661 } else {
1662 for(item = cpu_log_items; item->mask != 0; item++) {
1663 if (cmp1(p, p1 - p, item->name))
1664 goto found;
1666 return 0;
1668 found:
1669 mask |= item->mask;
1670 if (*p1 != ',')
1671 break;
1672 p = p1 + 1;
1674 return mask;
1677 void cpu_abort(CPUState *env, const char *fmt, ...)
1679 va_list ap;
1680 va_list ap2;
1682 va_start(ap, fmt);
1683 va_copy(ap2, ap);
1684 fprintf(stderr, "qemu: fatal: ");
1685 vfprintf(stderr, fmt, ap);
1686 fprintf(stderr, "\n");
1687 #ifdef TARGET_I386
1688 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1689 #else
1690 cpu_dump_state(env, stderr, fprintf, 0);
1691 #endif
1692 if (logfile) {
1693 fprintf(logfile, "qemu: fatal: ");
1694 vfprintf(logfile, fmt, ap2);
1695 fprintf(logfile, "\n");
1696 #ifdef TARGET_I386
1697 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1698 #else
1699 cpu_dump_state(env, logfile, fprintf, 0);
1700 #endif
1701 fflush(logfile);
1702 fclose(logfile);
1704 va_end(ap2);
1705 va_end(ap);
1706 abort();
1709 CPUState *cpu_copy(CPUState *env)
1711 CPUState *new_env = cpu_init(env->cpu_model_str);
1712 /* preserve chaining and index */
1713 CPUState *next_cpu = new_env->next_cpu;
1714 int cpu_index = new_env->cpu_index;
1715 memcpy(new_env, env, sizeof(CPUState));
1716 new_env->next_cpu = next_cpu;
1717 new_env->cpu_index = cpu_index;
1718 return new_env;
1721 #if !defined(CONFIG_USER_ONLY)
1723 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1725 unsigned int i;
1727 /* Discard jump cache entries for any tb which might potentially
1728 overlap the flushed page. */
1729 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1730 memset (&env->tb_jmp_cache[i], 0,
1731 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1733 i = tb_jmp_cache_hash_page(addr);
1734 memset (&env->tb_jmp_cache[i], 0,
1735 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1738 /* NOTE: if flush_global is true, also flush global entries (not
1739 implemented yet) */
1740 void tlb_flush(CPUState *env, int flush_global)
1742 int i;
1744 #if defined(DEBUG_TLB)
1745 printf("tlb_flush:\n");
1746 #endif
1747 /* must reset current TB so that interrupts cannot modify the
1748 links while we are modifying them */
1749 env->current_tb = NULL;
1751 for(i = 0; i < CPU_TLB_SIZE; i++) {
1752 env->tlb_table[0][i].addr_read = -1;
1753 env->tlb_table[0][i].addr_write = -1;
1754 env->tlb_table[0][i].addr_code = -1;
1755 env->tlb_table[1][i].addr_read = -1;
1756 env->tlb_table[1][i].addr_write = -1;
1757 env->tlb_table[1][i].addr_code = -1;
1758 #if (NB_MMU_MODES >= 3)
1759 env->tlb_table[2][i].addr_read = -1;
1760 env->tlb_table[2][i].addr_write = -1;
1761 env->tlb_table[2][i].addr_code = -1;
1762 #if (NB_MMU_MODES == 4)
1763 env->tlb_table[3][i].addr_read = -1;
1764 env->tlb_table[3][i].addr_write = -1;
1765 env->tlb_table[3][i].addr_code = -1;
1766 #endif
1767 #endif
1770 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1772 #ifdef USE_KQEMU
1773 if (env->kqemu_enabled) {
1774 kqemu_flush(env, flush_global);
1776 #endif
1777 tlb_flush_count++;
1780 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1782 if (addr == (tlb_entry->addr_read &
1783 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1784 addr == (tlb_entry->addr_write &
1785 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1786 addr == (tlb_entry->addr_code &
1787 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1788 tlb_entry->addr_read = -1;
1789 tlb_entry->addr_write = -1;
1790 tlb_entry->addr_code = -1;
1794 void tlb_flush_page(CPUState *env, target_ulong addr)
1796 int i;
1798 #if defined(DEBUG_TLB)
1799 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1800 #endif
1801 /* must reset current TB so that interrupts cannot modify the
1802 links while we are modifying them */
1803 env->current_tb = NULL;
1805 addr &= TARGET_PAGE_MASK;
1806 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1807 tlb_flush_entry(&env->tlb_table[0][i], addr);
1808 tlb_flush_entry(&env->tlb_table[1][i], addr);
1809 #if (NB_MMU_MODES >= 3)
1810 tlb_flush_entry(&env->tlb_table[2][i], addr);
1811 #if (NB_MMU_MODES == 4)
1812 tlb_flush_entry(&env->tlb_table[3][i], addr);
1813 #endif
1814 #endif
1816 tlb_flush_jmp_cache(env, addr);
1818 #ifdef USE_KQEMU
1819 if (env->kqemu_enabled) {
1820 kqemu_flush_page(env, addr);
1822 #endif
1825 /* update the TLBs so that writes to code in the virtual page 'addr'
1826 can be detected */
1827 static void tlb_protect_code(ram_addr_t ram_addr)
1829 cpu_physical_memory_reset_dirty(ram_addr,
1830 ram_addr + TARGET_PAGE_SIZE,
1831 CODE_DIRTY_FLAG);
1834 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1835 tested for self modifying code */
1836 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1837 target_ulong vaddr)
1839 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1842 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1843 unsigned long start, unsigned long length)
1845 unsigned long addr;
1846 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1847 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1848 if ((addr - start) < length) {
1849 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1854 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1855 int dirty_flags)
1857 CPUState *env;
1858 unsigned long length, start1;
1859 int i, mask, len;
1860 uint8_t *p;
1862 start &= TARGET_PAGE_MASK;
1863 end = TARGET_PAGE_ALIGN(end);
1865 length = end - start;
1866 if (length == 0)
1867 return;
1868 len = length >> TARGET_PAGE_BITS;
1869 #ifdef USE_KQEMU
1870 /* XXX: should not depend on cpu context */
1871 env = first_cpu;
1872 if (env->kqemu_enabled) {
1873 ram_addr_t addr;
1874 addr = start;
1875 for(i = 0; i < len; i++) {
1876 kqemu_set_notdirty(env, addr);
1877 addr += TARGET_PAGE_SIZE;
1880 #endif
1881 mask = ~dirty_flags;
1882 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1883 for(i = 0; i < len; i++)
1884 p[i] &= mask;
1886 /* we modify the TLB cache so that the dirty bit will be set again
1887 when accessing the range */
1888 start1 = start + (unsigned long)phys_ram_base;
1889 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1890 for(i = 0; i < CPU_TLB_SIZE; i++)
1891 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1892 for(i = 0; i < CPU_TLB_SIZE; i++)
1893 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1894 #if (NB_MMU_MODES >= 3)
1895 for(i = 0; i < CPU_TLB_SIZE; i++)
1896 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1897 #if (NB_MMU_MODES == 4)
1898 for(i = 0; i < CPU_TLB_SIZE; i++)
1899 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1900 #endif
1901 #endif
1905 int cpu_physical_memory_set_dirty_tracking(int enable)
1907 int r=0;
1909 if (kvm_enabled())
1910 r = kvm_physical_memory_set_dirty_tracking(enable);
1911 in_migration = enable;
1912 return r;
1915 int cpu_physical_memory_get_dirty_tracking(void)
1917 return in_migration;
1920 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1922 ram_addr_t ram_addr;
1924 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1925 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1926 tlb_entry->addend - (unsigned long)phys_ram_base;
1927 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1928 tlb_entry->addr_write |= TLB_NOTDIRTY;
1933 /* update the TLB according to the current state of the dirty bits */
1934 void cpu_tlb_update_dirty(CPUState *env)
1936 int i;
1937 for(i = 0; i < CPU_TLB_SIZE; i++)
1938 tlb_update_dirty(&env->tlb_table[0][i]);
1939 for(i = 0; i < CPU_TLB_SIZE; i++)
1940 tlb_update_dirty(&env->tlb_table[1][i]);
1941 #if (NB_MMU_MODES >= 3)
1942 for(i = 0; i < CPU_TLB_SIZE; i++)
1943 tlb_update_dirty(&env->tlb_table[2][i]);
1944 #if (NB_MMU_MODES == 4)
1945 for(i = 0; i < CPU_TLB_SIZE; i++)
1946 tlb_update_dirty(&env->tlb_table[3][i]);
1947 #endif
1948 #endif
1951 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1953 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1954 tlb_entry->addr_write = vaddr;
1957 /* update the TLB corresponding to virtual page vaddr
1958 so that it is no longer dirty */
1959 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1961 int i;
1963 vaddr &= TARGET_PAGE_MASK;
1964 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1965 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1966 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1967 #if (NB_MMU_MODES >= 3)
1968 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1969 #if (NB_MMU_MODES == 4)
1970 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1971 #endif
1972 #endif
1975 /* add a new TLB entry. At most one entry for a given virtual address
1976 is permitted. Return 0 if OK or 2 if the page could not be mapped
1977 (can only happen in non SOFTMMU mode for I/O pages or pages
1978 conflicting with the host address space). */
1979 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1980 target_phys_addr_t paddr, int prot,
1981 int mmu_idx, int is_softmmu)
1983 PhysPageDesc *p;
1984 unsigned long pd;
1985 unsigned int index;
1986 target_ulong address;
1987 target_ulong code_address;
1988 target_phys_addr_t addend;
1989 int ret;
1990 CPUTLBEntry *te;
1991 CPUWatchpoint *wp;
1992 target_phys_addr_t iotlb;
1994 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1995 if (!p) {
1996 pd = IO_MEM_UNASSIGNED;
1997 } else {
1998 pd = p->phys_offset;
2000 #if defined(DEBUG_TLB)
2001 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2002 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2003 #endif
2005 ret = 0;
2006 address = vaddr;
2007 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2008 /* IO memory case (romd handled later) */
2009 address |= TLB_MMIO;
2011 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2012 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2013 /* Normal RAM. */
2014 iotlb = pd & TARGET_PAGE_MASK;
2015 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2016 iotlb |= IO_MEM_NOTDIRTY;
2017 else
2018 iotlb |= IO_MEM_ROM;
2019 } else {
2020 /* IO handlers are currently passed a phsical address.
2021 It would be nice to pass an offset from the base address
2022 of that region. This would avoid having to special case RAM,
2023 and avoid full address decoding in every device.
2024 We can't use the high bits of pd for this because
2025 IO_MEM_ROMD uses these as a ram address. */
2026 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2029 code_address = address;
2030 /* Make accesses to pages with watchpoints go via the
2031 watchpoint trap routines. */
2032 for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
2033 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2034 iotlb = io_mem_watch + paddr;
2035 /* TODO: The memory case can be optimized by not trapping
2036 reads of pages with a write breakpoint. */
2037 address |= TLB_MMIO;
2041 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2042 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2043 te = &env->tlb_table[mmu_idx][index];
2044 te->addend = addend - vaddr;
2045 if (prot & PAGE_READ) {
2046 te->addr_read = address;
2047 } else {
2048 te->addr_read = -1;
2051 if (prot & PAGE_EXEC) {
2052 te->addr_code = code_address;
2053 } else {
2054 te->addr_code = -1;
2056 if (prot & PAGE_WRITE) {
2057 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2058 (pd & IO_MEM_ROMD)) {
2059 /* Write access calls the I/O callback. */
2060 te->addr_write = address | TLB_MMIO;
2061 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2062 !cpu_physical_memory_is_dirty(pd)) {
2063 te->addr_write = address | TLB_NOTDIRTY;
2064 } else {
2065 te->addr_write = address;
2067 } else {
2068 te->addr_write = -1;
2070 return ret;
2073 #else
2075 void tlb_flush(CPUState *env, int flush_global)
2079 void tlb_flush_page(CPUState *env, target_ulong addr)
2083 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2084 target_phys_addr_t paddr, int prot,
2085 int mmu_idx, int is_softmmu)
2087 return 0;
2090 /* dump memory mappings */
2091 void page_dump(FILE *f)
2093 unsigned long start, end;
2094 int i, j, prot, prot1;
2095 PageDesc *p;
2097 fprintf(f, "%-8s %-8s %-8s %s\n",
2098 "start", "end", "size", "prot");
2099 start = -1;
2100 end = -1;
2101 prot = 0;
2102 for(i = 0; i <= L1_SIZE; i++) {
2103 if (i < L1_SIZE)
2104 p = l1_map[i];
2105 else
2106 p = NULL;
2107 for(j = 0;j < L2_SIZE; j++) {
2108 if (!p)
2109 prot1 = 0;
2110 else
2111 prot1 = p[j].flags;
2112 if (prot1 != prot) {
2113 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2114 if (start != -1) {
2115 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2116 start, end, end - start,
2117 prot & PAGE_READ ? 'r' : '-',
2118 prot & PAGE_WRITE ? 'w' : '-',
2119 prot & PAGE_EXEC ? 'x' : '-');
2121 if (prot1 != 0)
2122 start = end;
2123 else
2124 start = -1;
2125 prot = prot1;
2127 if (!p)
2128 break;
2133 int page_get_flags(target_ulong address)
2135 PageDesc *p;
2137 p = page_find(address >> TARGET_PAGE_BITS);
2138 if (!p)
2139 return 0;
2140 return p->flags;
2143 /* modify the flags of a page and invalidate the code if
2144 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2145 depending on PAGE_WRITE */
2146 void page_set_flags(target_ulong start, target_ulong end, int flags)
2148 PageDesc *p;
2149 target_ulong addr;
2151 /* mmap_lock should already be held. */
2152 start = start & TARGET_PAGE_MASK;
2153 end = TARGET_PAGE_ALIGN(end);
2154 if (flags & PAGE_WRITE)
2155 flags |= PAGE_WRITE_ORG;
2156 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2157 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2158 /* We may be called for host regions that are outside guest
2159 address space. */
2160 if (!p)
2161 return;
2162 /* if the write protection is set, then we invalidate the code
2163 inside */
2164 if (!(p->flags & PAGE_WRITE) &&
2165 (flags & PAGE_WRITE) &&
2166 p->first_tb) {
2167 tb_invalidate_phys_page(addr, 0, NULL);
2169 p->flags = flags;
2173 int page_check_range(target_ulong start, target_ulong len, int flags)
2175 PageDesc *p;
2176 target_ulong end;
2177 target_ulong addr;
2179 if (start + len < start)
2180 /* we've wrapped around */
2181 return -1;
2183 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2184 start = start & TARGET_PAGE_MASK;
2186 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2187 p = page_find(addr >> TARGET_PAGE_BITS);
2188 if( !p )
2189 return -1;
2190 if( !(p->flags & PAGE_VALID) )
2191 return -1;
2193 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2194 return -1;
2195 if (flags & PAGE_WRITE) {
2196 if (!(p->flags & PAGE_WRITE_ORG))
2197 return -1;
2198 /* unprotect the page if it was put read-only because it
2199 contains translated code */
2200 if (!(p->flags & PAGE_WRITE)) {
2201 if (!page_unprotect(addr, 0, NULL))
2202 return -1;
2204 return 0;
2207 return 0;
2210 /* called from signal handler: invalidate the code and unprotect the
2211 page. Return TRUE if the fault was succesfully handled. */
2212 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2214 unsigned int page_index, prot, pindex;
2215 PageDesc *p, *p1;
2216 target_ulong host_start, host_end, addr;
2218 /* Technically this isn't safe inside a signal handler. However we
2219 know this only ever happens in a synchronous SEGV handler, so in
2220 practice it seems to be ok. */
2221 mmap_lock();
2223 host_start = address & qemu_host_page_mask;
2224 page_index = host_start >> TARGET_PAGE_BITS;
2225 p1 = page_find(page_index);
2226 if (!p1) {
2227 mmap_unlock();
2228 return 0;
2230 host_end = host_start + qemu_host_page_size;
2231 p = p1;
2232 prot = 0;
2233 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2234 prot |= p->flags;
2235 p++;
2237 /* if the page was really writable, then we change its
2238 protection back to writable */
2239 if (prot & PAGE_WRITE_ORG) {
2240 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2241 if (!(p1[pindex].flags & PAGE_WRITE)) {
2242 mprotect((void *)g2h(host_start), qemu_host_page_size,
2243 (prot & PAGE_BITS) | PAGE_WRITE);
2244 p1[pindex].flags |= PAGE_WRITE;
2245 /* and since the content will be modified, we must invalidate
2246 the corresponding translated code. */
2247 tb_invalidate_phys_page(address, pc, puc);
2248 #ifdef DEBUG_TB_CHECK
2249 tb_invalidate_check(address);
2250 #endif
2251 mmap_unlock();
2252 return 1;
2255 mmap_unlock();
2256 return 0;
2259 static inline void tlb_set_dirty(CPUState *env,
2260 unsigned long addr, target_ulong vaddr)
2263 #endif /* defined(CONFIG_USER_ONLY) */
2265 #if !defined(CONFIG_USER_ONLY)
2266 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2267 ram_addr_t memory);
2268 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2269 ram_addr_t orig_memory);
2270 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2271 need_subpage) \
2272 do { \
2273 if (addr > start_addr) \
2274 start_addr2 = 0; \
2275 else { \
2276 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2277 if (start_addr2 > 0) \
2278 need_subpage = 1; \
2281 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2282 end_addr2 = TARGET_PAGE_SIZE - 1; \
2283 else { \
2284 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2285 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2286 need_subpage = 1; \
2288 } while (0)
2290 /* register physical memory. 'size' must be a multiple of the target
2291 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2292 io memory page */
2293 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2294 ram_addr_t size,
2295 ram_addr_t phys_offset)
2297 target_phys_addr_t addr, end_addr;
2298 PhysPageDesc *p;
2299 CPUState *env;
2300 ram_addr_t orig_size = size;
2301 void *subpage;
2303 #ifdef USE_KQEMU
2304 /* XXX: should not depend on cpu context */
2305 env = first_cpu;
2306 if (env->kqemu_enabled) {
2307 kqemu_set_phys_mem(start_addr, size, phys_offset);
2309 #endif
2310 if (kvm_enabled())
2311 kvm_set_phys_mem(start_addr, size, phys_offset);
2313 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2314 end_addr = start_addr + (target_phys_addr_t)size;
2315 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2316 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2317 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2318 ram_addr_t orig_memory = p->phys_offset;
2319 target_phys_addr_t start_addr2, end_addr2;
2320 int need_subpage = 0;
2322 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2323 need_subpage);
2324 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2325 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2326 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2327 &p->phys_offset, orig_memory);
2328 } else {
2329 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2330 >> IO_MEM_SHIFT];
2332 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2333 } else {
2334 p->phys_offset = phys_offset;
2335 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2336 (phys_offset & IO_MEM_ROMD))
2337 phys_offset += TARGET_PAGE_SIZE;
2339 } else {
2340 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2341 p->phys_offset = phys_offset;
2342 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2343 (phys_offset & IO_MEM_ROMD))
2344 phys_offset += TARGET_PAGE_SIZE;
2345 else {
2346 target_phys_addr_t start_addr2, end_addr2;
2347 int need_subpage = 0;
2349 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2350 end_addr2, need_subpage);
2352 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2353 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2354 &p->phys_offset, IO_MEM_UNASSIGNED);
2355 subpage_register(subpage, start_addr2, end_addr2,
2356 phys_offset);
2362 /* since each CPU stores ram addresses in its TLB cache, we must
2363 reset the modified entries */
2364 /* XXX: slow ! */
2365 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2366 tlb_flush(env, 1);
2370 /* XXX: temporary until new memory mapping API */
2371 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2373 PhysPageDesc *p;
2375 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2376 if (!p)
2377 return IO_MEM_UNASSIGNED;
2378 return p->phys_offset;
2381 /* XXX: better than nothing */
2382 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2384 ram_addr_t addr;
2385 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2386 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2387 (uint64_t)size, (uint64_t)phys_ram_size);
2388 abort();
2390 addr = phys_ram_alloc_offset;
2391 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2392 return addr;
2395 void qemu_ram_free(ram_addr_t addr)
2399 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2401 #ifdef DEBUG_UNASSIGNED
2402 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2403 #endif
2404 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2405 do_unassigned_access(addr, 0, 0, 0, 1);
2406 #endif
2407 return 0;
2410 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2412 #ifdef DEBUG_UNASSIGNED
2413 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2414 #endif
2415 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2416 do_unassigned_access(addr, 0, 0, 0, 2);
2417 #endif
2418 return 0;
2421 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2423 #ifdef DEBUG_UNASSIGNED
2424 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2425 #endif
2426 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2427 do_unassigned_access(addr, 0, 0, 0, 4);
2428 #endif
2429 return 0;
2432 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2434 #ifdef DEBUG_UNASSIGNED
2435 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2436 #endif
2437 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2438 do_unassigned_access(addr, 1, 0, 0, 1);
2439 #endif
2442 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2444 #ifdef DEBUG_UNASSIGNED
2445 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2446 #endif
2447 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2448 do_unassigned_access(addr, 1, 0, 0, 2);
2449 #endif
2452 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2454 #ifdef DEBUG_UNASSIGNED
2455 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2456 #endif
2457 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2458 do_unassigned_access(addr, 1, 0, 0, 4);
2459 #endif
2462 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2463 unassigned_mem_readb,
2464 unassigned_mem_readw,
2465 unassigned_mem_readl,
2468 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2469 unassigned_mem_writeb,
2470 unassigned_mem_writew,
2471 unassigned_mem_writel,
2474 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2475 uint32_t val)
2477 int dirty_flags;
2478 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2479 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2480 #if !defined(CONFIG_USER_ONLY)
2481 tb_invalidate_phys_page_fast(ram_addr, 1);
2482 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2483 #endif
2485 stb_p(phys_ram_base + ram_addr, val);
2486 #ifdef USE_KQEMU
2487 if (cpu_single_env->kqemu_enabled &&
2488 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2489 kqemu_modify_page(cpu_single_env, ram_addr);
2490 #endif
2491 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2492 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2493 /* we remove the notdirty callback only if the code has been
2494 flushed */
2495 if (dirty_flags == 0xff)
2496 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2499 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2500 uint32_t val)
2502 int dirty_flags;
2503 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2504 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2505 #if !defined(CONFIG_USER_ONLY)
2506 tb_invalidate_phys_page_fast(ram_addr, 2);
2507 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2508 #endif
2510 stw_p(phys_ram_base + ram_addr, val);
2511 #ifdef USE_KQEMU
2512 if (cpu_single_env->kqemu_enabled &&
2513 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2514 kqemu_modify_page(cpu_single_env, ram_addr);
2515 #endif
2516 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2517 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2518 /* we remove the notdirty callback only if the code has been
2519 flushed */
2520 if (dirty_flags == 0xff)
2521 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2524 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2525 uint32_t val)
2527 int dirty_flags;
2528 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2529 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2530 #if !defined(CONFIG_USER_ONLY)
2531 tb_invalidate_phys_page_fast(ram_addr, 4);
2532 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2533 #endif
2535 stl_p(phys_ram_base + ram_addr, val);
2536 #ifdef USE_KQEMU
2537 if (cpu_single_env->kqemu_enabled &&
2538 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2539 kqemu_modify_page(cpu_single_env, ram_addr);
2540 #endif
2541 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2542 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2543 /* we remove the notdirty callback only if the code has been
2544 flushed */
2545 if (dirty_flags == 0xff)
2546 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2549 static CPUReadMemoryFunc *error_mem_read[3] = {
2550 NULL, /* never used */
2551 NULL, /* never used */
2552 NULL, /* never used */
2555 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2556 notdirty_mem_writeb,
2557 notdirty_mem_writew,
2558 notdirty_mem_writel,
2561 /* Generate a debug exception if a watchpoint has been hit. */
2562 static void check_watchpoint(int offset, int len_mask, int flags)
2564 CPUState *env = cpu_single_env;
2565 target_ulong pc, cs_base;
2566 TranslationBlock *tb;
2567 target_ulong vaddr;
2568 CPUWatchpoint *wp;
2569 int cpu_flags;
2571 if (env->watchpoint_hit) {
2572 /* We re-entered the check after replacing the TB. Now raise
2573 * the debug interrupt so that is will trigger after the
2574 * current instruction. */
2575 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2576 return;
2578 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2579 for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
2580 if ((vaddr == (wp->vaddr & len_mask) ||
2581 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2582 wp->flags |= BP_WATCHPOINT_HIT;
2583 if (!env->watchpoint_hit) {
2584 env->watchpoint_hit = wp;
2585 tb = tb_find_pc(env->mem_io_pc);
2586 if (!tb) {
2587 cpu_abort(env, "check_watchpoint: could not find TB for "
2588 "pc=%p", (void *)env->mem_io_pc);
2590 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2591 tb_phys_invalidate(tb, -1);
2592 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2593 env->exception_index = EXCP_DEBUG;
2594 } else {
2595 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2596 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2598 cpu_resume_from_signal(env, NULL);
2600 } else {
2601 wp->flags &= ~BP_WATCHPOINT_HIT;
2606 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2607 so these check for a hit then pass through to the normal out-of-line
2608 phys routines. */
2609 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2611 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2612 return ldub_phys(addr);
2615 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2617 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2618 return lduw_phys(addr);
2621 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2623 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2624 return ldl_phys(addr);
2627 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2628 uint32_t val)
2630 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2631 stb_phys(addr, val);
2634 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2635 uint32_t val)
2637 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2638 stw_phys(addr, val);
2641 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2642 uint32_t val)
2644 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2645 stl_phys(addr, val);
2648 static CPUReadMemoryFunc *watch_mem_read[3] = {
2649 watch_mem_readb,
2650 watch_mem_readw,
2651 watch_mem_readl,
2654 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2655 watch_mem_writeb,
2656 watch_mem_writew,
2657 watch_mem_writel,
2660 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2661 unsigned int len)
2663 uint32_t ret;
2664 unsigned int idx;
2666 idx = SUBPAGE_IDX(addr - mmio->base);
2667 #if defined(DEBUG_SUBPAGE)
2668 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2669 mmio, len, addr, idx);
2670 #endif
2671 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2673 return ret;
2676 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2677 uint32_t value, unsigned int len)
2679 unsigned int idx;
2681 idx = SUBPAGE_IDX(addr - mmio->base);
2682 #if defined(DEBUG_SUBPAGE)
2683 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2684 mmio, len, addr, idx, value);
2685 #endif
2686 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2689 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2691 #if defined(DEBUG_SUBPAGE)
2692 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2693 #endif
2695 return subpage_readlen(opaque, addr, 0);
2698 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2699 uint32_t value)
2701 #if defined(DEBUG_SUBPAGE)
2702 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2703 #endif
2704 subpage_writelen(opaque, addr, value, 0);
2707 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2709 #if defined(DEBUG_SUBPAGE)
2710 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2711 #endif
2713 return subpage_readlen(opaque, addr, 1);
2716 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2717 uint32_t value)
2719 #if defined(DEBUG_SUBPAGE)
2720 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2721 #endif
2722 subpage_writelen(opaque, addr, value, 1);
2725 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2727 #if defined(DEBUG_SUBPAGE)
2728 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2729 #endif
2731 return subpage_readlen(opaque, addr, 2);
2734 static void subpage_writel (void *opaque,
2735 target_phys_addr_t addr, uint32_t value)
2737 #if defined(DEBUG_SUBPAGE)
2738 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2739 #endif
2740 subpage_writelen(opaque, addr, value, 2);
2743 static CPUReadMemoryFunc *subpage_read[] = {
2744 &subpage_readb,
2745 &subpage_readw,
2746 &subpage_readl,
2749 static CPUWriteMemoryFunc *subpage_write[] = {
2750 &subpage_writeb,
2751 &subpage_writew,
2752 &subpage_writel,
2755 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2756 ram_addr_t memory)
2758 int idx, eidx;
2759 unsigned int i;
2761 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2762 return -1;
2763 idx = SUBPAGE_IDX(start);
2764 eidx = SUBPAGE_IDX(end);
2765 #if defined(DEBUG_SUBPAGE)
2766 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2767 mmio, start, end, idx, eidx, memory);
2768 #endif
2769 memory >>= IO_MEM_SHIFT;
2770 for (; idx <= eidx; idx++) {
2771 for (i = 0; i < 4; i++) {
2772 if (io_mem_read[memory][i]) {
2773 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2774 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2776 if (io_mem_write[memory][i]) {
2777 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2778 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2783 return 0;
2786 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2787 ram_addr_t orig_memory)
2789 subpage_t *mmio;
2790 int subpage_memory;
2792 mmio = qemu_mallocz(sizeof(subpage_t));
2793 if (mmio != NULL) {
2794 mmio->base = base;
2795 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2796 #if defined(DEBUG_SUBPAGE)
2797 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2798 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2799 #endif
2800 *phys = subpage_memory | IO_MEM_SUBPAGE;
2801 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2804 return mmio;
2807 static int get_free_io_mem_idx(void)
2809 int i;
2811 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2812 if (!io_mem_used[i]) {
2813 io_mem_used[i] = 1;
2814 return i;
2817 return -1;
2820 static void io_mem_init(void)
2822 int i;
2824 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2825 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2826 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2827 for (i=0; i<5; i++)
2828 io_mem_used[i] = 1;
2830 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2831 watch_mem_write, NULL);
2832 /* alloc dirty bits array */
2833 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2834 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2837 /* mem_read and mem_write are arrays of functions containing the
2838 function to access byte (index 0), word (index 1) and dword (index
2839 2). Functions can be omitted with a NULL function pointer. The
2840 registered functions may be modified dynamically later.
2841 If io_index is non zero, the corresponding io zone is
2842 modified. If it is zero, a new io zone is allocated. The return
2843 value can be used with cpu_register_physical_memory(). (-1) is
2844 returned if error. */
2845 int cpu_register_io_memory(int io_index,
2846 CPUReadMemoryFunc **mem_read,
2847 CPUWriteMemoryFunc **mem_write,
2848 void *opaque)
2850 int i, subwidth = 0;
2852 if (io_index <= 0) {
2853 io_index = get_free_io_mem_idx();
2854 if (io_index == -1)
2855 return io_index;
2856 } else {
2857 if (io_index >= IO_MEM_NB_ENTRIES)
2858 return -1;
2861 for(i = 0;i < 3; i++) {
2862 if (!mem_read[i] || !mem_write[i])
2863 subwidth = IO_MEM_SUBWIDTH;
2864 io_mem_read[io_index][i] = mem_read[i];
2865 io_mem_write[io_index][i] = mem_write[i];
2867 io_mem_opaque[io_index] = opaque;
2868 return (io_index << IO_MEM_SHIFT) | subwidth;
2871 void cpu_unregister_io_memory(int io_table_address)
2873 int i;
2874 int io_index = io_table_address >> IO_MEM_SHIFT;
2876 for (i=0;i < 3; i++) {
2877 io_mem_read[io_index][i] = unassigned_mem_read[i];
2878 io_mem_write[io_index][i] = unassigned_mem_write[i];
2880 io_mem_opaque[io_index] = NULL;
2881 io_mem_used[io_index] = 0;
2884 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2886 return io_mem_write[io_index >> IO_MEM_SHIFT];
2889 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2891 return io_mem_read[io_index >> IO_MEM_SHIFT];
2894 #endif /* !defined(CONFIG_USER_ONLY) */
2896 /* physical memory access (slow version, mainly for debug) */
2897 #if defined(CONFIG_USER_ONLY)
2898 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2899 int len, int is_write)
2901 int l, flags;
2902 target_ulong page;
2903 void * p;
2905 while (len > 0) {
2906 page = addr & TARGET_PAGE_MASK;
2907 l = (page + TARGET_PAGE_SIZE) - addr;
2908 if (l > len)
2909 l = len;
2910 flags = page_get_flags(page);
2911 if (!(flags & PAGE_VALID))
2912 return;
2913 if (is_write) {
2914 if (!(flags & PAGE_WRITE))
2915 return;
2916 /* XXX: this code should not depend on lock_user */
2917 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2918 /* FIXME - should this return an error rather than just fail? */
2919 return;
2920 memcpy(p, buf, l);
2921 unlock_user(p, addr, l);
2922 } else {
2923 if (!(flags & PAGE_READ))
2924 return;
2925 /* XXX: this code should not depend on lock_user */
2926 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2927 /* FIXME - should this return an error rather than just fail? */
2928 return;
2929 memcpy(buf, p, l);
2930 unlock_user(p, addr, 0);
2932 len -= l;
2933 buf += l;
2934 addr += l;
2938 #else
2939 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2940 int len, int is_write)
2942 int l, io_index;
2943 uint8_t *ptr;
2944 uint32_t val;
2945 target_phys_addr_t page;
2946 unsigned long pd;
2947 PhysPageDesc *p;
2949 while (len > 0) {
2950 page = addr & TARGET_PAGE_MASK;
2951 l = (page + TARGET_PAGE_SIZE) - addr;
2952 if (l > len)
2953 l = len;
2954 p = phys_page_find(page >> TARGET_PAGE_BITS);
2955 if (!p) {
2956 pd = IO_MEM_UNASSIGNED;
2957 } else {
2958 pd = p->phys_offset;
2961 if (is_write) {
2962 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2963 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2964 /* XXX: could force cpu_single_env to NULL to avoid
2965 potential bugs */
2966 if (l >= 4 && ((addr & 3) == 0)) {
2967 /* 32 bit write access */
2968 val = ldl_p(buf);
2969 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2970 l = 4;
2971 } else if (l >= 2 && ((addr & 1) == 0)) {
2972 /* 16 bit write access */
2973 val = lduw_p(buf);
2974 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2975 l = 2;
2976 } else {
2977 /* 8 bit write access */
2978 val = ldub_p(buf);
2979 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2980 l = 1;
2982 } else {
2983 unsigned long addr1;
2984 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2985 /* RAM case */
2986 ptr = phys_ram_base + addr1;
2987 memcpy(ptr, buf, l);
2988 if (!cpu_physical_memory_is_dirty(addr1)) {
2989 /* invalidate code */
2990 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2991 /* set dirty bit */
2992 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2993 (0xff & ~CODE_DIRTY_FLAG);
2995 /* qemu doesn't execute guest code directly, but kvm does
2996 therefore fluch instruction caches */
2997 if (kvm_enabled())
2998 flush_icache_range((unsigned long)ptr,
2999 ((unsigned long)ptr)+l);
3001 } else {
3002 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3003 !(pd & IO_MEM_ROMD)) {
3004 /* I/O case */
3005 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3006 if (l >= 4 && ((addr & 3) == 0)) {
3007 /* 32 bit read access */
3008 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3009 stl_p(buf, val);
3010 l = 4;
3011 } else if (l >= 2 && ((addr & 1) == 0)) {
3012 /* 16 bit read access */
3013 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3014 stw_p(buf, val);
3015 l = 2;
3016 } else {
3017 /* 8 bit read access */
3018 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3019 stb_p(buf, val);
3020 l = 1;
3022 } else {
3023 /* RAM case */
3024 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3025 (addr & ~TARGET_PAGE_MASK);
3026 memcpy(buf, ptr, l);
3029 len -= l;
3030 buf += l;
3031 addr += l;
3035 /* used for ROM loading : can write in RAM and ROM */
3036 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3037 const uint8_t *buf, int len)
3039 int l;
3040 uint8_t *ptr;
3041 target_phys_addr_t page;
3042 unsigned long pd;
3043 PhysPageDesc *p;
3045 while (len > 0) {
3046 page = addr & TARGET_PAGE_MASK;
3047 l = (page + TARGET_PAGE_SIZE) - addr;
3048 if (l > len)
3049 l = len;
3050 p = phys_page_find(page >> TARGET_PAGE_BITS);
3051 if (!p) {
3052 pd = IO_MEM_UNASSIGNED;
3053 } else {
3054 pd = p->phys_offset;
3057 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3058 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3059 !(pd & IO_MEM_ROMD)) {
3060 /* do nothing */
3061 } else {
3062 unsigned long addr1;
3063 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3064 /* ROM/RAM case */
3065 ptr = phys_ram_base + addr1;
3066 memcpy(ptr, buf, l);
3068 len -= l;
3069 buf += l;
3070 addr += l;
3075 /* warning: addr must be aligned */
3076 uint32_t ldl_phys(target_phys_addr_t addr)
3078 int io_index;
3079 uint8_t *ptr;
3080 uint32_t val;
3081 unsigned long pd;
3082 PhysPageDesc *p;
3084 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3085 if (!p) {
3086 pd = IO_MEM_UNASSIGNED;
3087 } else {
3088 pd = p->phys_offset;
3091 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3092 !(pd & IO_MEM_ROMD)) {
3093 /* I/O case */
3094 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3095 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3096 } else {
3097 /* RAM case */
3098 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3099 (addr & ~TARGET_PAGE_MASK);
3100 val = ldl_p(ptr);
3102 return val;
3105 /* warning: addr must be aligned */
3106 uint64_t ldq_phys(target_phys_addr_t addr)
3108 int io_index;
3109 uint8_t *ptr;
3110 uint64_t val;
3111 unsigned long pd;
3112 PhysPageDesc *p;
3114 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3115 if (!p) {
3116 pd = IO_MEM_UNASSIGNED;
3117 } else {
3118 pd = p->phys_offset;
3121 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3122 !(pd & IO_MEM_ROMD)) {
3123 /* I/O case */
3124 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3125 #ifdef TARGET_WORDS_BIGENDIAN
3126 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3127 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3128 #else
3129 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3130 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3131 #endif
3132 } else {
3133 /* RAM case */
3134 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3135 (addr & ~TARGET_PAGE_MASK);
3136 val = ldq_p(ptr);
3138 return val;
3141 /* XXX: optimize */
3142 uint32_t ldub_phys(target_phys_addr_t addr)
3144 uint8_t val;
3145 cpu_physical_memory_read(addr, &val, 1);
3146 return val;
3149 /* XXX: optimize */
3150 uint32_t lduw_phys(target_phys_addr_t addr)
3152 uint16_t val;
3153 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3154 return tswap16(val);
3157 #ifdef __GNUC__
3158 #define likely(x) __builtin_expect(!!(x), 1)
3159 #define unlikely(x) __builtin_expect(!!(x), 0)
3160 #else
3161 #define likely(x) x
3162 #define unlikely(x) x
3163 #endif
3165 /* warning: addr must be aligned. The ram page is not masked as dirty
3166 and the code inside is not invalidated. It is useful if the dirty
3167 bits are used to track modified PTEs */
3168 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3170 int io_index;
3171 uint8_t *ptr;
3172 unsigned long pd;
3173 PhysPageDesc *p;
3175 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3176 if (!p) {
3177 pd = IO_MEM_UNASSIGNED;
3178 } else {
3179 pd = p->phys_offset;
3182 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3183 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3184 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3185 } else {
3186 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3187 ptr = phys_ram_base + addr1;
3188 stl_p(ptr, val);
3190 if (unlikely(in_migration)) {
3191 if (!cpu_physical_memory_is_dirty(addr1)) {
3192 /* invalidate code */
3193 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3194 /* set dirty bit */
3195 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3196 (0xff & ~CODE_DIRTY_FLAG);
3202 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3204 int io_index;
3205 uint8_t *ptr;
3206 unsigned long pd;
3207 PhysPageDesc *p;
3209 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3210 if (!p) {
3211 pd = IO_MEM_UNASSIGNED;
3212 } else {
3213 pd = p->phys_offset;
3216 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3217 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3218 #ifdef TARGET_WORDS_BIGENDIAN
3219 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3220 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3221 #else
3222 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3223 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3224 #endif
3225 } else {
3226 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3227 (addr & ~TARGET_PAGE_MASK);
3228 stq_p(ptr, val);
3232 /* warning: addr must be aligned */
3233 void stl_phys(target_phys_addr_t addr, uint32_t val)
3235 int io_index;
3236 uint8_t *ptr;
3237 unsigned long pd;
3238 PhysPageDesc *p;
3240 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3241 if (!p) {
3242 pd = IO_MEM_UNASSIGNED;
3243 } else {
3244 pd = p->phys_offset;
3247 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3248 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3249 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3250 } else {
3251 unsigned long addr1;
3252 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3253 /* RAM case */
3254 ptr = phys_ram_base + addr1;
3255 stl_p(ptr, val);
3256 if (!cpu_physical_memory_is_dirty(addr1)) {
3257 /* invalidate code */
3258 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3259 /* set dirty bit */
3260 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3261 (0xff & ~CODE_DIRTY_FLAG);
3266 /* XXX: optimize */
3267 void stb_phys(target_phys_addr_t addr, uint32_t val)
3269 uint8_t v = val;
3270 cpu_physical_memory_write(addr, &v, 1);
3273 /* XXX: optimize */
3274 void stw_phys(target_phys_addr_t addr, uint32_t val)
3276 uint16_t v = tswap16(val);
3277 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3280 /* XXX: optimize */
3281 void stq_phys(target_phys_addr_t addr, uint64_t val)
3283 val = tswap64(val);
3284 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3287 #endif
3289 /* virtual memory access for debug */
3290 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3291 uint8_t *buf, int len, int is_write)
3293 int l;
3294 target_phys_addr_t phys_addr;
3295 target_ulong page;
3297 while (len > 0) {
3298 page = addr & TARGET_PAGE_MASK;
3299 phys_addr = cpu_get_phys_page_debug(env, page);
3300 /* if no physical page mapped, return an error */
3301 if (phys_addr == -1)
3302 return -1;
3303 l = (page + TARGET_PAGE_SIZE) - addr;
3304 if (l > len)
3305 l = len;
3306 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3307 buf, l, is_write);
3308 len -= l;
3309 buf += l;
3310 addr += l;
3312 return 0;
3315 /* in deterministic execution mode, instructions doing device I/Os
3316 must be at the end of the TB */
3317 void cpu_io_recompile(CPUState *env, void *retaddr)
3319 TranslationBlock *tb;
3320 uint32_t n, cflags;
3321 target_ulong pc, cs_base;
3322 uint64_t flags;
3324 tb = tb_find_pc((unsigned long)retaddr);
3325 if (!tb) {
3326 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3327 retaddr);
3329 n = env->icount_decr.u16.low + tb->icount;
3330 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3331 /* Calculate how many instructions had been executed before the fault
3332 occurred. */
3333 n = n - env->icount_decr.u16.low;
3334 /* Generate a new TB ending on the I/O insn. */
3335 n++;
3336 /* On MIPS and SH, delay slot instructions can only be restarted if
3337 they were already the first instruction in the TB. If this is not
3338 the first instruction in a TB then re-execute the preceding
3339 branch. */
3340 #if defined(TARGET_MIPS)
3341 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3342 env->active_tc.PC -= 4;
3343 env->icount_decr.u16.low++;
3344 env->hflags &= ~MIPS_HFLAG_BMASK;
3346 #elif defined(TARGET_SH4)
3347 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3348 && n > 1) {
3349 env->pc -= 2;
3350 env->icount_decr.u16.low++;
3351 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3353 #endif
3354 /* This should never happen. */
3355 if (n > CF_COUNT_MASK)
3356 cpu_abort(env, "TB too big during recompile");
3358 cflags = n | CF_LAST_IO;
3359 pc = tb->pc;
3360 cs_base = tb->cs_base;
3361 flags = tb->flags;
3362 tb_phys_invalidate(tb, -1);
3363 /* FIXME: In theory this could raise an exception. In practice
3364 we have already translated the block once so it's probably ok. */
3365 tb_gen_code(env, pc, cs_base, flags, cflags);
3366 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3367 the first in the TB) then we end up generating a whole new TB and
3368 repeating the fault, which is horribly inefficient.
3369 Better would be to execute just this insn uncached, or generate a
3370 second new TB. */
3371 cpu_resume_from_signal(env, NULL);
3374 void dump_exec_info(FILE *f,
3375 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3377 int i, target_code_size, max_target_code_size;
3378 int direct_jmp_count, direct_jmp2_count, cross_page;
3379 TranslationBlock *tb;
3381 target_code_size = 0;
3382 max_target_code_size = 0;
3383 cross_page = 0;
3384 direct_jmp_count = 0;
3385 direct_jmp2_count = 0;
3386 for(i = 0; i < nb_tbs; i++) {
3387 tb = &tbs[i];
3388 target_code_size += tb->size;
3389 if (tb->size > max_target_code_size)
3390 max_target_code_size = tb->size;
3391 if (tb->page_addr[1] != -1)
3392 cross_page++;
3393 if (tb->tb_next_offset[0] != 0xffff) {
3394 direct_jmp_count++;
3395 if (tb->tb_next_offset[1] != 0xffff) {
3396 direct_jmp2_count++;
3400 /* XXX: avoid using doubles ? */
3401 cpu_fprintf(f, "Translation buffer state:\n");
3402 cpu_fprintf(f, "gen code size %ld/%ld\n",
3403 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3404 cpu_fprintf(f, "TB count %d/%d\n",
3405 nb_tbs, code_gen_max_blocks);
3406 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3407 nb_tbs ? target_code_size / nb_tbs : 0,
3408 max_target_code_size);
3409 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3410 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3411 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3412 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3413 cross_page,
3414 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3415 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3416 direct_jmp_count,
3417 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3418 direct_jmp2_count,
3419 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3420 cpu_fprintf(f, "\nStatistics:\n");
3421 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3422 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3423 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3424 tcg_dump_info(f, cpu_fprintf);
3427 #if !defined(CONFIG_USER_ONLY)
3429 #define MMUSUFFIX _cmmu
3430 #define GETPC() NULL
3431 #define env cpu_single_env
3432 #define SOFTMMU_CODE_ACCESS
3434 #define SHIFT 0
3435 #include "softmmu_template.h"
3437 #define SHIFT 1
3438 #include "softmmu_template.h"
3440 #define SHIFT 2
3441 #include "softmmu_template.h"
3443 #define SHIFT 3
3444 #include "softmmu_template.h"
3446 #undef env
3448 #endif