Avoid running audio ctl's when vm is not running
[qemu-kvm/fedora.git] / exec.c
blob7ed7e3ed32654ca512f5482f6f5d156a692a1028
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #include "hw/hw.h"
41 #include "osdep.h"
42 #include "kvm.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #endif
47 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_FLUSH
49 //#define DEBUG_TLB
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
61 #undef DEBUG_TB_CHECK
62 #endif
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 #else
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #endif
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 static int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #else
102 #define code_gen_section \
103 __attribute__((aligned (32)))
104 #endif
106 uint8_t code_gen_prologue[1024] code_gen_section;
107 static uint8_t *code_gen_buffer;
108 static unsigned long code_gen_buffer_size;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size;
111 uint8_t *code_gen_ptr;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size;
115 int phys_ram_fd;
116 uint8_t *phys_ram_base;
117 uint8_t *phys_ram_dirty;
118 static int in_migration;
119 static ram_addr_t phys_ram_alloc_offset = 0;
120 #endif
122 CPUState *first_cpu;
123 /* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
125 CPUState *cpu_single_env;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
129 int use_icount = 0;
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132 int64_t qemu_icount;
134 typedef struct PageDesc {
135 /* list of TBs intersecting this ram page */
136 TranslationBlock *first_tb;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141 #if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143 #endif
144 } PageDesc;
146 typedef struct PhysPageDesc {
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset;
149 ram_addr_t region_offset;
150 } PhysPageDesc;
152 #define L2_BITS 10
153 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154 /* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
158 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159 #else
160 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
161 #endif
163 #define L1_SIZE (1 << L1_BITS)
164 #define L2_SIZE (1 << L2_BITS)
166 unsigned long qemu_real_host_page_size;
167 unsigned long qemu_host_page_bits;
168 unsigned long qemu_host_page_size;
169 unsigned long qemu_host_page_mask;
171 /* XXX: for system emulation, it could just be an array */
172 static PageDesc *l1_map[L1_SIZE];
173 static PhysPageDesc **l1_phys_map;
175 #if !defined(CONFIG_USER_ONLY)
176 static void io_mem_init(void);
178 /* io memory support */
179 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
181 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
182 char io_mem_used[IO_MEM_NB_ENTRIES];
183 static int io_mem_watch;
184 #endif
186 /* log support */
187 static const char *logfilename = "/tmp/qemu.log";
188 FILE *logfile;
189 int loglevel;
190 static int log_append = 0;
192 /* statistics */
193 static int tlb_flush_count;
194 static int tb_flush_count;
195 static int tb_phys_invalidate_count;
197 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198 typedef struct subpage_t {
199 target_phys_addr_t base;
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204 } subpage_t;
206 #ifdef _WIN32
207 static void map_exec(void *addr, long size)
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
214 #else
215 static void map_exec(void *addr, long size)
217 unsigned long start, end, page_size;
219 page_size = getpagesize();
220 start = (unsigned long)addr;
221 start &= ~(page_size - 1);
223 end = (unsigned long)addr + size;
224 end += page_size - 1;
225 end &= ~(page_size - 1);
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
230 #endif
232 static void page_init(void)
234 /* NOTE: we can always suppose that qemu_host_page_size >=
235 TARGET_PAGE_SIZE */
236 #ifdef _WIN32
238 SYSTEM_INFO system_info;
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
243 #else
244 qemu_real_host_page_size = getpagesize();
245 #endif
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
257 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
263 mmap_lock();
264 last_brk = (unsigned long)sbrk(0);
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
274 page_set_flags(startaddr & TARGET_PAGE_MASK,
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
278 } while (!feof(f));
279 fclose(f);
281 mmap_unlock();
283 #endif
286 static inline PageDesc **page_l1_map(target_ulong index)
288 #if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
292 return NULL;
293 #endif
294 return &l1_map[index >> L2_BITS];
297 static inline PageDesc *page_find_alloc(target_ulong index)
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
307 #if defined(CONFIG_USER_ONLY)
308 size_t len = sizeof(PageDesc) * L2_SIZE;
309 /* Don't use qemu_malloc because it may recurse. */
310 p = mmap(0, len, PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
312 *lp = p;
313 if (h2g_valid(p)) {
314 unsigned long addr = h2g(p);
315 page_set_flags(addr & TARGET_PAGE_MASK,
316 TARGET_PAGE_ALIGN(addr + len),
317 PAGE_RESERVED);
319 #else
320 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321 *lp = p;
322 #endif
324 return p + (index & (L2_SIZE - 1));
327 static inline PageDesc *page_find(target_ulong index)
329 PageDesc **lp, *p;
330 lp = page_l1_map(index);
331 if (!lp)
332 return NULL;
334 p = *lp;
335 if (!p)
336 return 0;
337 return p + (index & (L2_SIZE - 1));
340 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
342 void **lp, **p;
343 PhysPageDesc *pd;
345 p = (void **)l1_phys_map;
346 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
348 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350 #endif
351 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
352 p = *lp;
353 if (!p) {
354 /* allocate if not found */
355 if (!alloc)
356 return NULL;
357 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358 memset(p, 0, sizeof(void *) * L1_SIZE);
359 *lp = p;
361 #endif
362 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
363 pd = *lp;
364 if (!pd) {
365 int i;
366 /* allocate if not found */
367 if (!alloc)
368 return NULL;
369 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370 *lp = pd;
371 for (i = 0; i < L2_SIZE; i++)
372 pd[i].phys_offset = IO_MEM_UNASSIGNED;
374 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
377 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
379 return phys_page_find_alloc(index, 0);
382 #if !defined(CONFIG_USER_ONLY)
383 static void tlb_protect_code(ram_addr_t ram_addr);
384 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
385 target_ulong vaddr);
386 #define mmap_lock() do { } while(0)
387 #define mmap_unlock() do { } while(0)
388 #endif
390 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
392 #if defined(CONFIG_USER_ONLY)
393 /* Currently it is not recommanded to allocate big chunks of data in
394 user mode. It will change when a dedicated libc will be used */
395 #define USE_STATIC_CODE_GEN_BUFFER
396 #endif
398 #ifdef USE_STATIC_CODE_GEN_BUFFER
399 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
400 #endif
402 static void code_gen_alloc(unsigned long tb_size)
404 #ifdef USE_STATIC_CODE_GEN_BUFFER
405 code_gen_buffer = static_code_gen_buffer;
406 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
407 map_exec(code_gen_buffer, code_gen_buffer_size);
408 #else
409 code_gen_buffer_size = tb_size;
410 if (code_gen_buffer_size == 0) {
411 #if defined(CONFIG_USER_ONLY)
412 /* in user mode, phys_ram_size is not meaningful */
413 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
414 #else
415 /* XXX: needs ajustments */
416 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
417 #endif
419 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
420 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
421 /* The code gen buffer location may have constraints depending on
422 the host cpu and OS */
423 #if defined(__linux__)
425 int flags;
426 void *start = NULL;
428 flags = MAP_PRIVATE | MAP_ANONYMOUS;
429 #if defined(__x86_64__)
430 flags |= MAP_32BIT;
431 /* Cannot map more than that */
432 if (code_gen_buffer_size > (800 * 1024 * 1024))
433 code_gen_buffer_size = (800 * 1024 * 1024);
434 #elif defined(__sparc_v9__)
435 // Map the buffer below 2G, so we can use direct calls and branches
436 flags |= MAP_FIXED;
437 start = (void *) 0x60000000UL;
438 if (code_gen_buffer_size > (512 * 1024 * 1024))
439 code_gen_buffer_size = (512 * 1024 * 1024);
440 #elif defined(__arm__)
441 /* Map the buffer below 32M, so we can use direct calls and branches */
442 flags |= MAP_FIXED;
443 start = (void *) 0x01000000UL;
444 if (code_gen_buffer_size > 16 * 1024 * 1024)
445 code_gen_buffer_size = 16 * 1024 * 1024;
446 #endif
447 code_gen_buffer = mmap(start, code_gen_buffer_size,
448 PROT_WRITE | PROT_READ | PROT_EXEC,
449 flags, -1, 0);
450 if (code_gen_buffer == MAP_FAILED) {
451 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
452 exit(1);
455 #elif defined(__FreeBSD__)
457 int flags;
458 void *addr = NULL;
459 flags = MAP_PRIVATE | MAP_ANONYMOUS;
460 #if defined(__x86_64__)
461 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
462 * 0x40000000 is free */
463 flags |= MAP_FIXED;
464 addr = (void *)0x40000000;
465 /* Cannot map more than that */
466 if (code_gen_buffer_size > (800 * 1024 * 1024))
467 code_gen_buffer_size = (800 * 1024 * 1024);
468 #endif
469 code_gen_buffer = mmap(addr, code_gen_buffer_size,
470 PROT_WRITE | PROT_READ | PROT_EXEC,
471 flags, -1, 0);
472 if (code_gen_buffer == MAP_FAILED) {
473 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
474 exit(1);
477 #else
478 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
479 map_exec(code_gen_buffer, code_gen_buffer_size);
480 #endif
481 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
482 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
483 code_gen_buffer_max_size = code_gen_buffer_size -
484 code_gen_max_block_size();
485 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
486 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
489 /* Must be called before using the QEMU cpus. 'tb_size' is the size
490 (in bytes) allocated to the translation buffer. Zero means default
491 size. */
492 void cpu_exec_init_all(unsigned long tb_size)
494 cpu_gen_init();
495 code_gen_alloc(tb_size);
496 code_gen_ptr = code_gen_buffer;
497 page_init();
498 #if !defined(CONFIG_USER_ONLY)
499 io_mem_init();
500 #endif
503 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
505 #define CPU_COMMON_SAVE_VERSION 1
507 static void cpu_common_save(QEMUFile *f, void *opaque)
509 CPUState *env = opaque;
511 qemu_put_be32s(f, &env->halted);
512 qemu_put_be32s(f, &env->interrupt_request);
515 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
517 CPUState *env = opaque;
519 if (version_id != CPU_COMMON_SAVE_VERSION)
520 return -EINVAL;
522 qemu_get_be32s(f, &env->halted);
523 qemu_get_be32s(f, &env->interrupt_request);
524 tlb_flush(env, 1);
526 return 0;
528 #endif
530 void cpu_exec_init(CPUState *env)
532 CPUState **penv;
533 int cpu_index;
535 env->next_cpu = NULL;
536 penv = &first_cpu;
537 cpu_index = 0;
538 while (*penv != NULL) {
539 penv = (CPUState **)&(*penv)->next_cpu;
540 cpu_index++;
542 env->cpu_index = cpu_index;
543 TAILQ_INIT(&env->breakpoints);
544 TAILQ_INIT(&env->watchpoints);
545 *penv = env;
546 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
547 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
548 cpu_common_save, cpu_common_load, env);
549 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
550 cpu_save, cpu_load, env);
551 #endif
554 static inline void invalidate_page_bitmap(PageDesc *p)
556 if (p->code_bitmap) {
557 qemu_free(p->code_bitmap);
558 p->code_bitmap = NULL;
560 p->code_write_count = 0;
563 /* set to NULL all the 'first_tb' fields in all PageDescs */
564 static void page_flush_tb(void)
566 int i, j;
567 PageDesc *p;
569 for(i = 0; i < L1_SIZE; i++) {
570 p = l1_map[i];
571 if (p) {
572 for(j = 0; j < L2_SIZE; j++) {
573 p->first_tb = NULL;
574 invalidate_page_bitmap(p);
575 p++;
581 /* flush all the translation blocks */
582 /* XXX: tb_flush is currently not thread safe */
583 void tb_flush(CPUState *env1)
585 CPUState *env;
586 #if defined(DEBUG_FLUSH)
587 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
588 (unsigned long)(code_gen_ptr - code_gen_buffer),
589 nb_tbs, nb_tbs > 0 ?
590 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
591 #endif
592 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
593 cpu_abort(env1, "Internal error: code buffer overflow\n");
595 nb_tbs = 0;
597 for(env = first_cpu; env != NULL; env = env->next_cpu) {
598 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
601 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
602 page_flush_tb();
604 code_gen_ptr = code_gen_buffer;
605 /* XXX: flush processor icache at this point if cache flush is
606 expensive */
607 tb_flush_count++;
610 #ifdef DEBUG_TB_CHECK
612 static void tb_invalidate_check(target_ulong address)
614 TranslationBlock *tb;
615 int i;
616 address &= TARGET_PAGE_MASK;
617 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
618 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
619 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
620 address >= tb->pc + tb->size)) {
621 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
622 address, (long)tb->pc, tb->size);
628 /* verify that all the pages have correct rights for code */
629 static void tb_page_check(void)
631 TranslationBlock *tb;
632 int i, flags1, flags2;
634 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
635 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
636 flags1 = page_get_flags(tb->pc);
637 flags2 = page_get_flags(tb->pc + tb->size - 1);
638 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
639 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
640 (long)tb->pc, tb->size, flags1, flags2);
646 static void tb_jmp_check(TranslationBlock *tb)
648 TranslationBlock *tb1;
649 unsigned int n1;
651 /* suppress any remaining jumps to this TB */
652 tb1 = tb->jmp_first;
653 for(;;) {
654 n1 = (long)tb1 & 3;
655 tb1 = (TranslationBlock *)((long)tb1 & ~3);
656 if (n1 == 2)
657 break;
658 tb1 = tb1->jmp_next[n1];
660 /* check end of list */
661 if (tb1 != tb) {
662 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
666 #endif
668 /* invalidate one TB */
669 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
670 int next_offset)
672 TranslationBlock *tb1;
673 for(;;) {
674 tb1 = *ptb;
675 if (tb1 == tb) {
676 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
677 break;
679 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
683 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
685 TranslationBlock *tb1;
686 unsigned int n1;
688 for(;;) {
689 tb1 = *ptb;
690 n1 = (long)tb1 & 3;
691 tb1 = (TranslationBlock *)((long)tb1 & ~3);
692 if (tb1 == tb) {
693 *ptb = tb1->page_next[n1];
694 break;
696 ptb = &tb1->page_next[n1];
700 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
702 TranslationBlock *tb1, **ptb;
703 unsigned int n1;
705 ptb = &tb->jmp_next[n];
706 tb1 = *ptb;
707 if (tb1) {
708 /* find tb(n) in circular list */
709 for(;;) {
710 tb1 = *ptb;
711 n1 = (long)tb1 & 3;
712 tb1 = (TranslationBlock *)((long)tb1 & ~3);
713 if (n1 == n && tb1 == tb)
714 break;
715 if (n1 == 2) {
716 ptb = &tb1->jmp_first;
717 } else {
718 ptb = &tb1->jmp_next[n1];
721 /* now we can suppress tb(n) from the list */
722 *ptb = tb->jmp_next[n];
724 tb->jmp_next[n] = NULL;
728 /* reset the jump entry 'n' of a TB so that it is not chained to
729 another TB */
730 static inline void tb_reset_jump(TranslationBlock *tb, int n)
732 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
735 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
737 CPUState *env;
738 PageDesc *p;
739 unsigned int h, n1;
740 target_phys_addr_t phys_pc;
741 TranslationBlock *tb1, *tb2;
743 /* remove the TB from the hash list */
744 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
745 h = tb_phys_hash_func(phys_pc);
746 tb_remove(&tb_phys_hash[h], tb,
747 offsetof(TranslationBlock, phys_hash_next));
749 /* remove the TB from the page list */
750 if (tb->page_addr[0] != page_addr) {
751 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
752 tb_page_remove(&p->first_tb, tb);
753 invalidate_page_bitmap(p);
755 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
756 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
757 tb_page_remove(&p->first_tb, tb);
758 invalidate_page_bitmap(p);
761 tb_invalidated_flag = 1;
763 /* remove the TB from the hash list */
764 h = tb_jmp_cache_hash_func(tb->pc);
765 for(env = first_cpu; env != NULL; env = env->next_cpu) {
766 if (env->tb_jmp_cache[h] == tb)
767 env->tb_jmp_cache[h] = NULL;
770 /* suppress this TB from the two jump lists */
771 tb_jmp_remove(tb, 0);
772 tb_jmp_remove(tb, 1);
774 /* suppress any remaining jumps to this TB */
775 tb1 = tb->jmp_first;
776 for(;;) {
777 n1 = (long)tb1 & 3;
778 if (n1 == 2)
779 break;
780 tb1 = (TranslationBlock *)((long)tb1 & ~3);
781 tb2 = tb1->jmp_next[n1];
782 tb_reset_jump(tb1, n1);
783 tb1->jmp_next[n1] = NULL;
784 tb1 = tb2;
786 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
788 tb_phys_invalidate_count++;
791 static inline void set_bits(uint8_t *tab, int start, int len)
793 int end, mask, end1;
795 end = start + len;
796 tab += start >> 3;
797 mask = 0xff << (start & 7);
798 if ((start & ~7) == (end & ~7)) {
799 if (start < end) {
800 mask &= ~(0xff << (end & 7));
801 *tab |= mask;
803 } else {
804 *tab++ |= mask;
805 start = (start + 8) & ~7;
806 end1 = end & ~7;
807 while (start < end1) {
808 *tab++ = 0xff;
809 start += 8;
811 if (start < end) {
812 mask = ~(0xff << (end & 7));
813 *tab |= mask;
818 static void build_page_bitmap(PageDesc *p)
820 int n, tb_start, tb_end;
821 TranslationBlock *tb;
823 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
825 tb = p->first_tb;
826 while (tb != NULL) {
827 n = (long)tb & 3;
828 tb = (TranslationBlock *)((long)tb & ~3);
829 /* NOTE: this is subtle as a TB may span two physical pages */
830 if (n == 0) {
831 /* NOTE: tb_end may be after the end of the page, but
832 it is not a problem */
833 tb_start = tb->pc & ~TARGET_PAGE_MASK;
834 tb_end = tb_start + tb->size;
835 if (tb_end > TARGET_PAGE_SIZE)
836 tb_end = TARGET_PAGE_SIZE;
837 } else {
838 tb_start = 0;
839 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
841 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
842 tb = tb->page_next[n];
846 TranslationBlock *tb_gen_code(CPUState *env,
847 target_ulong pc, target_ulong cs_base,
848 int flags, int cflags)
850 TranslationBlock *tb;
851 uint8_t *tc_ptr;
852 target_ulong phys_pc, phys_page2, virt_page2;
853 int code_gen_size;
855 phys_pc = get_phys_addr_code(env, pc);
856 tb = tb_alloc(pc);
857 if (!tb) {
858 /* flush must be done */
859 tb_flush(env);
860 /* cannot fail at this point */
861 tb = tb_alloc(pc);
862 /* Don't forget to invalidate previous TB info. */
863 tb_invalidated_flag = 1;
865 tc_ptr = code_gen_ptr;
866 tb->tc_ptr = tc_ptr;
867 tb->cs_base = cs_base;
868 tb->flags = flags;
869 tb->cflags = cflags;
870 cpu_gen_code(env, tb, &code_gen_size);
871 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
873 /* check next page if needed */
874 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
875 phys_page2 = -1;
876 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
877 phys_page2 = get_phys_addr_code(env, virt_page2);
879 tb_link_phys(tb, phys_pc, phys_page2);
880 return tb;
883 /* invalidate all TBs which intersect with the target physical page
884 starting in range [start;end[. NOTE: start and end must refer to
885 the same physical page. 'is_cpu_write_access' should be true if called
886 from a real cpu write access: the virtual CPU will exit the current
887 TB if code is modified inside this TB. */
888 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
889 int is_cpu_write_access)
891 TranslationBlock *tb, *tb_next, *saved_tb;
892 CPUState *env = cpu_single_env;
893 target_ulong tb_start, tb_end;
894 PageDesc *p;
895 int n;
896 #ifdef TARGET_HAS_PRECISE_SMC
897 int current_tb_not_found = is_cpu_write_access;
898 TranslationBlock *current_tb = NULL;
899 int current_tb_modified = 0;
900 target_ulong current_pc = 0;
901 target_ulong current_cs_base = 0;
902 int current_flags = 0;
903 #endif /* TARGET_HAS_PRECISE_SMC */
905 p = page_find(start >> TARGET_PAGE_BITS);
906 if (!p)
907 return;
908 if (!p->code_bitmap &&
909 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
910 is_cpu_write_access) {
911 /* build code bitmap */
912 build_page_bitmap(p);
915 /* we remove all the TBs in the range [start, end[ */
916 /* XXX: see if in some cases it could be faster to invalidate all the code */
917 tb = p->first_tb;
918 while (tb != NULL) {
919 n = (long)tb & 3;
920 tb = (TranslationBlock *)((long)tb & ~3);
921 tb_next = tb->page_next[n];
922 /* NOTE: this is subtle as a TB may span two physical pages */
923 if (n == 0) {
924 /* NOTE: tb_end may be after the end of the page, but
925 it is not a problem */
926 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
927 tb_end = tb_start + tb->size;
928 } else {
929 tb_start = tb->page_addr[1];
930 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
932 if (!(tb_end <= start || tb_start >= end)) {
933 #ifdef TARGET_HAS_PRECISE_SMC
934 if (current_tb_not_found) {
935 current_tb_not_found = 0;
936 current_tb = NULL;
937 if (env->mem_io_pc) {
938 /* now we have a real cpu fault */
939 current_tb = tb_find_pc(env->mem_io_pc);
942 if (current_tb == tb &&
943 (current_tb->cflags & CF_COUNT_MASK) != 1) {
944 /* If we are modifying the current TB, we must stop
945 its execution. We could be more precise by checking
946 that the modification is after the current PC, but it
947 would require a specialized function to partially
948 restore the CPU state */
950 current_tb_modified = 1;
951 cpu_restore_state(current_tb, env,
952 env->mem_io_pc, NULL);
953 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
954 &current_flags);
956 #endif /* TARGET_HAS_PRECISE_SMC */
957 /* we need to do that to handle the case where a signal
958 occurs while doing tb_phys_invalidate() */
959 saved_tb = NULL;
960 if (env) {
961 saved_tb = env->current_tb;
962 env->current_tb = NULL;
964 tb_phys_invalidate(tb, -1);
965 if (env) {
966 env->current_tb = saved_tb;
967 if (env->interrupt_request && env->current_tb)
968 cpu_interrupt(env, env->interrupt_request);
971 tb = tb_next;
973 #if !defined(CONFIG_USER_ONLY)
974 /* if no code remaining, no need to continue to use slow writes */
975 if (!p->first_tb) {
976 invalidate_page_bitmap(p);
977 if (is_cpu_write_access) {
978 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
981 #endif
982 #ifdef TARGET_HAS_PRECISE_SMC
983 if (current_tb_modified) {
984 /* we generate a block containing just the instruction
985 modifying the memory. It will ensure that it cannot modify
986 itself */
987 env->current_tb = NULL;
988 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
989 cpu_resume_from_signal(env, NULL);
991 #endif
994 /* len must be <= 8 and start must be a multiple of len */
995 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
997 PageDesc *p;
998 int offset, b;
999 #if 0
1000 if (1) {
1001 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1002 cpu_single_env->mem_io_vaddr, len,
1003 cpu_single_env->eip,
1004 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1006 #endif
1007 p = page_find(start >> TARGET_PAGE_BITS);
1008 if (!p)
1009 return;
1010 if (p->code_bitmap) {
1011 offset = start & ~TARGET_PAGE_MASK;
1012 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1013 if (b & ((1 << len) - 1))
1014 goto do_invalidate;
1015 } else {
1016 do_invalidate:
1017 tb_invalidate_phys_page_range(start, start + len, 1);
1021 #if !defined(CONFIG_SOFTMMU)
1022 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1023 unsigned long pc, void *puc)
1025 TranslationBlock *tb;
1026 PageDesc *p;
1027 int n;
1028 #ifdef TARGET_HAS_PRECISE_SMC
1029 TranslationBlock *current_tb = NULL;
1030 CPUState *env = cpu_single_env;
1031 int current_tb_modified = 0;
1032 target_ulong current_pc = 0;
1033 target_ulong current_cs_base = 0;
1034 int current_flags = 0;
1035 #endif
1037 addr &= TARGET_PAGE_MASK;
1038 p = page_find(addr >> TARGET_PAGE_BITS);
1039 if (!p)
1040 return;
1041 tb = p->first_tb;
1042 #ifdef TARGET_HAS_PRECISE_SMC
1043 if (tb && pc != 0) {
1044 current_tb = tb_find_pc(pc);
1046 #endif
1047 while (tb != NULL) {
1048 n = (long)tb & 3;
1049 tb = (TranslationBlock *)((long)tb & ~3);
1050 #ifdef TARGET_HAS_PRECISE_SMC
1051 if (current_tb == tb &&
1052 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1053 /* If we are modifying the current TB, we must stop
1054 its execution. We could be more precise by checking
1055 that the modification is after the current PC, but it
1056 would require a specialized function to partially
1057 restore the CPU state */
1059 current_tb_modified = 1;
1060 cpu_restore_state(current_tb, env, pc, puc);
1061 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1062 &current_flags);
1064 #endif /* TARGET_HAS_PRECISE_SMC */
1065 tb_phys_invalidate(tb, addr);
1066 tb = tb->page_next[n];
1068 p->first_tb = NULL;
1069 #ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb_modified) {
1071 /* we generate a block containing just the instruction
1072 modifying the memory. It will ensure that it cannot modify
1073 itself */
1074 env->current_tb = NULL;
1075 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1076 cpu_resume_from_signal(env, puc);
1078 #endif
1080 #endif
1082 /* add the tb in the target page and protect it if necessary */
1083 static inline void tb_alloc_page(TranslationBlock *tb,
1084 unsigned int n, target_ulong page_addr)
1086 PageDesc *p;
1087 TranslationBlock *last_first_tb;
1089 tb->page_addr[n] = page_addr;
1090 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1091 tb->page_next[n] = p->first_tb;
1092 last_first_tb = p->first_tb;
1093 p->first_tb = (TranslationBlock *)((long)tb | n);
1094 invalidate_page_bitmap(p);
1096 #if defined(TARGET_HAS_SMC) || 1
1098 #if defined(CONFIG_USER_ONLY)
1099 if (p->flags & PAGE_WRITE) {
1100 target_ulong addr;
1101 PageDesc *p2;
1102 int prot;
1104 /* force the host page as non writable (writes will have a
1105 page fault + mprotect overhead) */
1106 page_addr &= qemu_host_page_mask;
1107 prot = 0;
1108 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1109 addr += TARGET_PAGE_SIZE) {
1111 p2 = page_find (addr >> TARGET_PAGE_BITS);
1112 if (!p2)
1113 continue;
1114 prot |= p2->flags;
1115 p2->flags &= ~PAGE_WRITE;
1116 page_get_flags(addr);
1118 mprotect(g2h(page_addr), qemu_host_page_size,
1119 (prot & PAGE_BITS) & ~PAGE_WRITE);
1120 #ifdef DEBUG_TB_INVALIDATE
1121 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1122 page_addr);
1123 #endif
1125 #else
1126 /* if some code is already present, then the pages are already
1127 protected. So we handle the case where only the first TB is
1128 allocated in a physical page */
1129 if (!last_first_tb) {
1130 tlb_protect_code(page_addr);
1132 #endif
1134 #endif /* TARGET_HAS_SMC */
1137 /* Allocate a new translation block. Flush the translation buffer if
1138 too many translation blocks or too much generated code. */
1139 TranslationBlock *tb_alloc(target_ulong pc)
1141 TranslationBlock *tb;
1143 if (nb_tbs >= code_gen_max_blocks ||
1144 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1145 return NULL;
1146 tb = &tbs[nb_tbs++];
1147 tb->pc = pc;
1148 tb->cflags = 0;
1149 return tb;
1152 void tb_free(TranslationBlock *tb)
1154 /* In practice this is mostly used for single use temporary TB
1155 Ignore the hard cases and just back up if this TB happens to
1156 be the last one generated. */
1157 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1158 code_gen_ptr = tb->tc_ptr;
1159 nb_tbs--;
1163 /* add a new TB and link it to the physical page tables. phys_page2 is
1164 (-1) to indicate that only one page contains the TB. */
1165 void tb_link_phys(TranslationBlock *tb,
1166 target_ulong phys_pc, target_ulong phys_page2)
1168 unsigned int h;
1169 TranslationBlock **ptb;
1171 /* Grab the mmap lock to stop another thread invalidating this TB
1172 before we are done. */
1173 mmap_lock();
1174 /* add in the physical hash table */
1175 h = tb_phys_hash_func(phys_pc);
1176 ptb = &tb_phys_hash[h];
1177 tb->phys_hash_next = *ptb;
1178 *ptb = tb;
1180 /* add in the page list */
1181 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1182 if (phys_page2 != -1)
1183 tb_alloc_page(tb, 1, phys_page2);
1184 else
1185 tb->page_addr[1] = -1;
1187 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1188 tb->jmp_next[0] = NULL;
1189 tb->jmp_next[1] = NULL;
1191 /* init original jump addresses */
1192 if (tb->tb_next_offset[0] != 0xffff)
1193 tb_reset_jump(tb, 0);
1194 if (tb->tb_next_offset[1] != 0xffff)
1195 tb_reset_jump(tb, 1);
1197 #ifdef DEBUG_TB_CHECK
1198 tb_page_check();
1199 #endif
1200 mmap_unlock();
1203 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1204 tb[1].tc_ptr. Return NULL if not found */
1205 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1207 int m_min, m_max, m;
1208 unsigned long v;
1209 TranslationBlock *tb;
1211 if (nb_tbs <= 0)
1212 return NULL;
1213 if (tc_ptr < (unsigned long)code_gen_buffer ||
1214 tc_ptr >= (unsigned long)code_gen_ptr)
1215 return NULL;
1216 /* binary search (cf Knuth) */
1217 m_min = 0;
1218 m_max = nb_tbs - 1;
1219 while (m_min <= m_max) {
1220 m = (m_min + m_max) >> 1;
1221 tb = &tbs[m];
1222 v = (unsigned long)tb->tc_ptr;
1223 if (v == tc_ptr)
1224 return tb;
1225 else if (tc_ptr < v) {
1226 m_max = m - 1;
1227 } else {
1228 m_min = m + 1;
1231 return &tbs[m_max];
1234 static void tb_reset_jump_recursive(TranslationBlock *tb);
1236 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1238 TranslationBlock *tb1, *tb_next, **ptb;
1239 unsigned int n1;
1241 tb1 = tb->jmp_next[n];
1242 if (tb1 != NULL) {
1243 /* find head of list */
1244 for(;;) {
1245 n1 = (long)tb1 & 3;
1246 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1247 if (n1 == 2)
1248 break;
1249 tb1 = tb1->jmp_next[n1];
1251 /* we are now sure now that tb jumps to tb1 */
1252 tb_next = tb1;
1254 /* remove tb from the jmp_first list */
1255 ptb = &tb_next->jmp_first;
1256 for(;;) {
1257 tb1 = *ptb;
1258 n1 = (long)tb1 & 3;
1259 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1260 if (n1 == n && tb1 == tb)
1261 break;
1262 ptb = &tb1->jmp_next[n1];
1264 *ptb = tb->jmp_next[n];
1265 tb->jmp_next[n] = NULL;
1267 /* suppress the jump to next tb in generated code */
1268 tb_reset_jump(tb, n);
1270 /* suppress jumps in the tb on which we could have jumped */
1271 tb_reset_jump_recursive(tb_next);
1275 static void tb_reset_jump_recursive(TranslationBlock *tb)
1277 tb_reset_jump_recursive2(tb, 0);
1278 tb_reset_jump_recursive2(tb, 1);
1281 #if defined(TARGET_HAS_ICE)
1282 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1284 target_phys_addr_t addr;
1285 target_ulong pd;
1286 ram_addr_t ram_addr;
1287 PhysPageDesc *p;
1289 addr = cpu_get_phys_page_debug(env, pc);
1290 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1291 if (!p) {
1292 pd = IO_MEM_UNASSIGNED;
1293 } else {
1294 pd = p->phys_offset;
1296 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1297 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1299 #endif
1301 /* Add a watchpoint. */
1302 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1303 int flags, CPUWatchpoint **watchpoint)
1305 target_ulong len_mask = ~(len - 1);
1306 CPUWatchpoint *wp;
1308 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1309 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1310 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1311 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1312 return -EINVAL;
1314 wp = qemu_malloc(sizeof(*wp));
1316 wp->vaddr = addr;
1317 wp->len_mask = len_mask;
1318 wp->flags = flags;
1320 /* keep all GDB-injected watchpoints in front */
1321 if (flags & BP_GDB)
1322 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1323 else
1324 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1326 tlb_flush_page(env, addr);
1328 if (watchpoint)
1329 *watchpoint = wp;
1330 return 0;
1333 /* Remove a specific watchpoint. */
1334 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1335 int flags)
1337 target_ulong len_mask = ~(len - 1);
1338 CPUWatchpoint *wp;
1340 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1341 if (addr == wp->vaddr && len_mask == wp->len_mask
1342 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1343 cpu_watchpoint_remove_by_ref(env, wp);
1344 return 0;
1347 return -ENOENT;
1350 /* Remove a specific watchpoint by reference. */
1351 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1353 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1355 tlb_flush_page(env, watchpoint->vaddr);
1357 qemu_free(watchpoint);
1360 /* Remove all matching watchpoints. */
1361 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1363 CPUWatchpoint *wp, *next;
1365 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1366 if (wp->flags & mask)
1367 cpu_watchpoint_remove_by_ref(env, wp);
1371 /* Add a breakpoint. */
1372 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1373 CPUBreakpoint **breakpoint)
1375 #if defined(TARGET_HAS_ICE)
1376 CPUBreakpoint *bp;
1378 bp = qemu_malloc(sizeof(*bp));
1380 bp->pc = pc;
1381 bp->flags = flags;
1383 /* keep all GDB-injected breakpoints in front */
1384 if (flags & BP_GDB)
1385 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1386 else
1387 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1389 breakpoint_invalidate(env, pc);
1391 if (breakpoint)
1392 *breakpoint = bp;
1393 return 0;
1394 #else
1395 return -ENOSYS;
1396 #endif
1399 /* Remove a specific breakpoint. */
1400 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1402 #if defined(TARGET_HAS_ICE)
1403 CPUBreakpoint *bp;
1405 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1406 if (bp->pc == pc && bp->flags == flags) {
1407 cpu_breakpoint_remove_by_ref(env, bp);
1408 return 0;
1411 return -ENOENT;
1412 #else
1413 return -ENOSYS;
1414 #endif
1417 /* Remove a specific breakpoint by reference. */
1418 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1420 #if defined(TARGET_HAS_ICE)
1421 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1423 breakpoint_invalidate(env, breakpoint->pc);
1425 qemu_free(breakpoint);
1426 #endif
1429 /* Remove all matching breakpoints. */
1430 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1432 #if defined(TARGET_HAS_ICE)
1433 CPUBreakpoint *bp, *next;
1435 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1436 if (bp->flags & mask)
1437 cpu_breakpoint_remove_by_ref(env, bp);
1439 #endif
1442 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1443 CPU loop after each instruction */
1444 void cpu_single_step(CPUState *env, int enabled)
1446 #if defined(TARGET_HAS_ICE)
1447 if (env->singlestep_enabled != enabled) {
1448 env->singlestep_enabled = enabled;
1449 /* must flush all the translated code to avoid inconsistancies */
1450 /* XXX: only flush what is necessary */
1451 tb_flush(env);
1453 #endif
1456 /* enable or disable low levels log */
1457 void cpu_set_log(int log_flags)
1459 loglevel = log_flags;
1460 if (loglevel && !logfile) {
1461 logfile = fopen(logfilename, log_append ? "a" : "w");
1462 if (!logfile) {
1463 perror(logfilename);
1464 _exit(1);
1466 #if !defined(CONFIG_SOFTMMU)
1467 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1469 static char logfile_buf[4096];
1470 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1472 #else
1473 setvbuf(logfile, NULL, _IOLBF, 0);
1474 #endif
1475 log_append = 1;
1477 if (!loglevel && logfile) {
1478 fclose(logfile);
1479 logfile = NULL;
1483 void cpu_set_log_filename(const char *filename)
1485 logfilename = strdup(filename);
1486 if (logfile) {
1487 fclose(logfile);
1488 logfile = NULL;
1490 cpu_set_log(loglevel);
1493 /* mask must never be zero, except for A20 change call */
1494 void cpu_interrupt(CPUState *env, int mask)
1496 #if !defined(USE_NPTL)
1497 TranslationBlock *tb;
1498 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1499 #endif
1500 int old_mask;
1502 old_mask = env->interrupt_request;
1503 /* FIXME: This is probably not threadsafe. A different thread could
1504 be in the middle of a read-modify-write operation. */
1505 env->interrupt_request |= mask;
1506 #if defined(USE_NPTL)
1507 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1508 problem and hope the cpu will stop of its own accord. For userspace
1509 emulation this often isn't actually as bad as it sounds. Often
1510 signals are used primarily to interrupt blocking syscalls. */
1511 #else
1512 if (use_icount) {
1513 env->icount_decr.u16.high = 0xffff;
1514 #ifndef CONFIG_USER_ONLY
1515 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1516 an async event happened and we need to process it. */
1517 if (!can_do_io(env)
1518 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1519 cpu_abort(env, "Raised interrupt while not in I/O function");
1521 #endif
1522 } else {
1523 tb = env->current_tb;
1524 /* if the cpu is currently executing code, we must unlink it and
1525 all the potentially executing TB */
1526 if (tb && !testandset(&interrupt_lock)) {
1527 env->current_tb = NULL;
1528 tb_reset_jump_recursive(tb);
1529 resetlock(&interrupt_lock);
1532 #endif
1535 void cpu_reset_interrupt(CPUState *env, int mask)
1537 env->interrupt_request &= ~mask;
1540 const CPULogItem cpu_log_items[] = {
1541 { CPU_LOG_TB_OUT_ASM, "out_asm",
1542 "show generated host assembly code for each compiled TB" },
1543 { CPU_LOG_TB_IN_ASM, "in_asm",
1544 "show target assembly code for each compiled TB" },
1545 { CPU_LOG_TB_OP, "op",
1546 "show micro ops for each compiled TB" },
1547 { CPU_LOG_TB_OP_OPT, "op_opt",
1548 "show micro ops "
1549 #ifdef TARGET_I386
1550 "before eflags optimization and "
1551 #endif
1552 "after liveness analysis" },
1553 { CPU_LOG_INT, "int",
1554 "show interrupts/exceptions in short format" },
1555 { CPU_LOG_EXEC, "exec",
1556 "show trace before each executed TB (lots of logs)" },
1557 { CPU_LOG_TB_CPU, "cpu",
1558 "show CPU state before block translation" },
1559 #ifdef TARGET_I386
1560 { CPU_LOG_PCALL, "pcall",
1561 "show protected mode far calls/returns/exceptions" },
1562 { CPU_LOG_RESET, "cpu_reset",
1563 "show CPU state before CPU resets" },
1564 #endif
1565 #ifdef DEBUG_IOPORT
1566 { CPU_LOG_IOPORT, "ioport",
1567 "show all i/o ports accesses" },
1568 #endif
1569 { 0, NULL, NULL },
1572 static int cmp1(const char *s1, int n, const char *s2)
1574 if (strlen(s2) != n)
1575 return 0;
1576 return memcmp(s1, s2, n) == 0;
1579 /* takes a comma separated list of log masks. Return 0 if error. */
1580 int cpu_str_to_log_mask(const char *str)
1582 const CPULogItem *item;
1583 int mask;
1584 const char *p, *p1;
1586 p = str;
1587 mask = 0;
1588 for(;;) {
1589 p1 = strchr(p, ',');
1590 if (!p1)
1591 p1 = p + strlen(p);
1592 if(cmp1(p,p1-p,"all")) {
1593 for(item = cpu_log_items; item->mask != 0; item++) {
1594 mask |= item->mask;
1596 } else {
1597 for(item = cpu_log_items; item->mask != 0; item++) {
1598 if (cmp1(p, p1 - p, item->name))
1599 goto found;
1601 return 0;
1603 found:
1604 mask |= item->mask;
1605 if (*p1 != ',')
1606 break;
1607 p = p1 + 1;
1609 return mask;
1612 void cpu_abort(CPUState *env, const char *fmt, ...)
1614 va_list ap;
1615 va_list ap2;
1617 va_start(ap, fmt);
1618 va_copy(ap2, ap);
1619 fprintf(stderr, "qemu: fatal: ");
1620 vfprintf(stderr, fmt, ap);
1621 fprintf(stderr, "\n");
1622 #ifdef TARGET_I386
1623 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1624 #else
1625 cpu_dump_state(env, stderr, fprintf, 0);
1626 #endif
1627 if (qemu_log_enabled()) {
1628 qemu_log("qemu: fatal: ");
1629 qemu_log_vprintf(fmt, ap2);
1630 qemu_log("\n");
1631 #ifdef TARGET_I386
1632 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1633 #else
1634 log_cpu_state(env, 0);
1635 #endif
1636 qemu_log_flush();
1637 qemu_log_close();
1639 va_end(ap2);
1640 va_end(ap);
1641 abort();
1644 CPUState *cpu_copy(CPUState *env)
1646 CPUState *new_env = cpu_init(env->cpu_model_str);
1647 CPUState *next_cpu = new_env->next_cpu;
1648 int cpu_index = new_env->cpu_index;
1649 #if defined(TARGET_HAS_ICE)
1650 CPUBreakpoint *bp;
1651 CPUWatchpoint *wp;
1652 #endif
1654 memcpy(new_env, env, sizeof(CPUState));
1656 /* Preserve chaining and index. */
1657 new_env->next_cpu = next_cpu;
1658 new_env->cpu_index = cpu_index;
1660 /* Clone all break/watchpoints.
1661 Note: Once we support ptrace with hw-debug register access, make sure
1662 BP_CPU break/watchpoints are handled correctly on clone. */
1663 TAILQ_INIT(&env->breakpoints);
1664 TAILQ_INIT(&env->watchpoints);
1665 #if defined(TARGET_HAS_ICE)
1666 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1667 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1669 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1670 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1671 wp->flags, NULL);
1673 #endif
1675 return new_env;
1678 #if !defined(CONFIG_USER_ONLY)
1680 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1682 unsigned int i;
1684 /* Discard jump cache entries for any tb which might potentially
1685 overlap the flushed page. */
1686 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1687 memset (&env->tb_jmp_cache[i], 0,
1688 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1690 i = tb_jmp_cache_hash_page(addr);
1691 memset (&env->tb_jmp_cache[i], 0,
1692 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1695 /* NOTE: if flush_global is true, also flush global entries (not
1696 implemented yet) */
1697 void tlb_flush(CPUState *env, int flush_global)
1699 int i;
1701 #if defined(DEBUG_TLB)
1702 printf("tlb_flush:\n");
1703 #endif
1704 /* must reset current TB so that interrupts cannot modify the
1705 links while we are modifying them */
1706 env->current_tb = NULL;
1708 for(i = 0; i < CPU_TLB_SIZE; i++) {
1709 env->tlb_table[0][i].addr_read = -1;
1710 env->tlb_table[0][i].addr_write = -1;
1711 env->tlb_table[0][i].addr_code = -1;
1712 env->tlb_table[1][i].addr_read = -1;
1713 env->tlb_table[1][i].addr_write = -1;
1714 env->tlb_table[1][i].addr_code = -1;
1715 #if (NB_MMU_MODES >= 3)
1716 env->tlb_table[2][i].addr_read = -1;
1717 env->tlb_table[2][i].addr_write = -1;
1718 env->tlb_table[2][i].addr_code = -1;
1719 #if (NB_MMU_MODES == 4)
1720 env->tlb_table[3][i].addr_read = -1;
1721 env->tlb_table[3][i].addr_write = -1;
1722 env->tlb_table[3][i].addr_code = -1;
1723 #endif
1724 #endif
1727 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1729 #ifdef USE_KQEMU
1730 if (env->kqemu_enabled) {
1731 kqemu_flush(env, flush_global);
1733 #endif
1734 tlb_flush_count++;
1737 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1739 if (addr == (tlb_entry->addr_read &
1740 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1741 addr == (tlb_entry->addr_write &
1742 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1743 addr == (tlb_entry->addr_code &
1744 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1745 tlb_entry->addr_read = -1;
1746 tlb_entry->addr_write = -1;
1747 tlb_entry->addr_code = -1;
1751 void tlb_flush_page(CPUState *env, target_ulong addr)
1753 int i;
1755 #if defined(DEBUG_TLB)
1756 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1757 #endif
1758 /* must reset current TB so that interrupts cannot modify the
1759 links while we are modifying them */
1760 env->current_tb = NULL;
1762 addr &= TARGET_PAGE_MASK;
1763 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1764 tlb_flush_entry(&env->tlb_table[0][i], addr);
1765 tlb_flush_entry(&env->tlb_table[1][i], addr);
1766 #if (NB_MMU_MODES >= 3)
1767 tlb_flush_entry(&env->tlb_table[2][i], addr);
1768 #if (NB_MMU_MODES == 4)
1769 tlb_flush_entry(&env->tlb_table[3][i], addr);
1770 #endif
1771 #endif
1773 tlb_flush_jmp_cache(env, addr);
1775 #ifdef USE_KQEMU
1776 if (env->kqemu_enabled) {
1777 kqemu_flush_page(env, addr);
1779 #endif
1782 /* update the TLBs so that writes to code in the virtual page 'addr'
1783 can be detected */
1784 static void tlb_protect_code(ram_addr_t ram_addr)
1786 cpu_physical_memory_reset_dirty(ram_addr,
1787 ram_addr + TARGET_PAGE_SIZE,
1788 CODE_DIRTY_FLAG);
1791 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1792 tested for self modifying code */
1793 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1794 target_ulong vaddr)
1796 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1799 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1800 unsigned long start, unsigned long length)
1802 unsigned long addr;
1803 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1804 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1805 if ((addr - start) < length) {
1806 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1811 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1812 int dirty_flags)
1814 CPUState *env;
1815 unsigned long length, start1;
1816 int i, mask, len;
1817 uint8_t *p;
1819 start &= TARGET_PAGE_MASK;
1820 end = TARGET_PAGE_ALIGN(end);
1822 length = end - start;
1823 if (length == 0)
1824 return;
1825 len = length >> TARGET_PAGE_BITS;
1826 #ifdef USE_KQEMU
1827 /* XXX: should not depend on cpu context */
1828 env = first_cpu;
1829 if (env->kqemu_enabled) {
1830 ram_addr_t addr;
1831 addr = start;
1832 for(i = 0; i < len; i++) {
1833 kqemu_set_notdirty(env, addr);
1834 addr += TARGET_PAGE_SIZE;
1837 #endif
1838 mask = ~dirty_flags;
1839 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1840 for(i = 0; i < len; i++)
1841 p[i] &= mask;
1843 /* we modify the TLB cache so that the dirty bit will be set again
1844 when accessing the range */
1845 start1 = start + (unsigned long)phys_ram_base;
1846 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1847 for(i = 0; i < CPU_TLB_SIZE; i++)
1848 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1849 for(i = 0; i < CPU_TLB_SIZE; i++)
1850 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1851 #if (NB_MMU_MODES >= 3)
1852 for(i = 0; i < CPU_TLB_SIZE; i++)
1853 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1854 #if (NB_MMU_MODES == 4)
1855 for(i = 0; i < CPU_TLB_SIZE; i++)
1856 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1857 #endif
1858 #endif
1862 int cpu_physical_memory_set_dirty_tracking(int enable)
1864 in_migration = enable;
1865 return 0;
1868 int cpu_physical_memory_get_dirty_tracking(void)
1870 return in_migration;
1873 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1875 if (kvm_enabled())
1876 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1879 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1881 ram_addr_t ram_addr;
1883 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1884 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1885 tlb_entry->addend - (unsigned long)phys_ram_base;
1886 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1887 tlb_entry->addr_write |= TLB_NOTDIRTY;
1892 /* update the TLB according to the current state of the dirty bits */
1893 void cpu_tlb_update_dirty(CPUState *env)
1895 int i;
1896 for(i = 0; i < CPU_TLB_SIZE; i++)
1897 tlb_update_dirty(&env->tlb_table[0][i]);
1898 for(i = 0; i < CPU_TLB_SIZE; i++)
1899 tlb_update_dirty(&env->tlb_table[1][i]);
1900 #if (NB_MMU_MODES >= 3)
1901 for(i = 0; i < CPU_TLB_SIZE; i++)
1902 tlb_update_dirty(&env->tlb_table[2][i]);
1903 #if (NB_MMU_MODES == 4)
1904 for(i = 0; i < CPU_TLB_SIZE; i++)
1905 tlb_update_dirty(&env->tlb_table[3][i]);
1906 #endif
1907 #endif
1910 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1912 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1913 tlb_entry->addr_write = vaddr;
1916 /* update the TLB corresponding to virtual page vaddr
1917 so that it is no longer dirty */
1918 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1920 int i;
1922 vaddr &= TARGET_PAGE_MASK;
1923 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1924 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1925 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1926 #if (NB_MMU_MODES >= 3)
1927 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1928 #if (NB_MMU_MODES == 4)
1929 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1930 #endif
1931 #endif
1934 /* add a new TLB entry. At most one entry for a given virtual address
1935 is permitted. Return 0 if OK or 2 if the page could not be mapped
1936 (can only happen in non SOFTMMU mode for I/O pages or pages
1937 conflicting with the host address space). */
1938 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1939 target_phys_addr_t paddr, int prot,
1940 int mmu_idx, int is_softmmu)
1942 PhysPageDesc *p;
1943 unsigned long pd;
1944 unsigned int index;
1945 target_ulong address;
1946 target_ulong code_address;
1947 target_phys_addr_t addend;
1948 int ret;
1949 CPUTLBEntry *te;
1950 CPUWatchpoint *wp;
1951 target_phys_addr_t iotlb;
1953 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1954 if (!p) {
1955 pd = IO_MEM_UNASSIGNED;
1956 } else {
1957 pd = p->phys_offset;
1959 #if defined(DEBUG_TLB)
1960 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1961 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1962 #endif
1964 ret = 0;
1965 address = vaddr;
1966 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1967 /* IO memory case (romd handled later) */
1968 address |= TLB_MMIO;
1970 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1971 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1972 /* Normal RAM. */
1973 iotlb = pd & TARGET_PAGE_MASK;
1974 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1975 iotlb |= IO_MEM_NOTDIRTY;
1976 else
1977 iotlb |= IO_MEM_ROM;
1978 } else {
1979 /* IO handlers are currently passed a phsical address.
1980 It would be nice to pass an offset from the base address
1981 of that region. This would avoid having to special case RAM,
1982 and avoid full address decoding in every device.
1983 We can't use the high bits of pd for this because
1984 IO_MEM_ROMD uses these as a ram address. */
1985 iotlb = (pd & ~TARGET_PAGE_MASK);
1986 if (p) {
1987 iotlb += p->region_offset;
1988 } else {
1989 iotlb += paddr;
1993 code_address = address;
1994 /* Make accesses to pages with watchpoints go via the
1995 watchpoint trap routines. */
1996 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1997 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1998 iotlb = io_mem_watch + paddr;
1999 /* TODO: The memory case can be optimized by not trapping
2000 reads of pages with a write breakpoint. */
2001 address |= TLB_MMIO;
2005 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2006 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2007 te = &env->tlb_table[mmu_idx][index];
2008 te->addend = addend - vaddr;
2009 if (prot & PAGE_READ) {
2010 te->addr_read = address;
2011 } else {
2012 te->addr_read = -1;
2015 if (prot & PAGE_EXEC) {
2016 te->addr_code = code_address;
2017 } else {
2018 te->addr_code = -1;
2020 if (prot & PAGE_WRITE) {
2021 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2022 (pd & IO_MEM_ROMD)) {
2023 /* Write access calls the I/O callback. */
2024 te->addr_write = address | TLB_MMIO;
2025 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2026 !cpu_physical_memory_is_dirty(pd)) {
2027 te->addr_write = address | TLB_NOTDIRTY;
2028 } else {
2029 te->addr_write = address;
2031 } else {
2032 te->addr_write = -1;
2034 return ret;
2037 #else
2039 void tlb_flush(CPUState *env, int flush_global)
2043 void tlb_flush_page(CPUState *env, target_ulong addr)
2047 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2048 target_phys_addr_t paddr, int prot,
2049 int mmu_idx, int is_softmmu)
2051 return 0;
2054 /* dump memory mappings */
2055 void page_dump(FILE *f)
2057 unsigned long start, end;
2058 int i, j, prot, prot1;
2059 PageDesc *p;
2061 fprintf(f, "%-8s %-8s %-8s %s\n",
2062 "start", "end", "size", "prot");
2063 start = -1;
2064 end = -1;
2065 prot = 0;
2066 for(i = 0; i <= L1_SIZE; i++) {
2067 if (i < L1_SIZE)
2068 p = l1_map[i];
2069 else
2070 p = NULL;
2071 for(j = 0;j < L2_SIZE; j++) {
2072 if (!p)
2073 prot1 = 0;
2074 else
2075 prot1 = p[j].flags;
2076 if (prot1 != prot) {
2077 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2078 if (start != -1) {
2079 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2080 start, end, end - start,
2081 prot & PAGE_READ ? 'r' : '-',
2082 prot & PAGE_WRITE ? 'w' : '-',
2083 prot & PAGE_EXEC ? 'x' : '-');
2085 if (prot1 != 0)
2086 start = end;
2087 else
2088 start = -1;
2089 prot = prot1;
2091 if (!p)
2092 break;
2097 int page_get_flags(target_ulong address)
2099 PageDesc *p;
2101 p = page_find(address >> TARGET_PAGE_BITS);
2102 if (!p)
2103 return 0;
2104 return p->flags;
2107 /* modify the flags of a page and invalidate the code if
2108 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2109 depending on PAGE_WRITE */
2110 void page_set_flags(target_ulong start, target_ulong end, int flags)
2112 PageDesc *p;
2113 target_ulong addr;
2115 /* mmap_lock should already be held. */
2116 start = start & TARGET_PAGE_MASK;
2117 end = TARGET_PAGE_ALIGN(end);
2118 if (flags & PAGE_WRITE)
2119 flags |= PAGE_WRITE_ORG;
2120 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2121 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2122 /* We may be called for host regions that are outside guest
2123 address space. */
2124 if (!p)
2125 return;
2126 /* if the write protection is set, then we invalidate the code
2127 inside */
2128 if (!(p->flags & PAGE_WRITE) &&
2129 (flags & PAGE_WRITE) &&
2130 p->first_tb) {
2131 tb_invalidate_phys_page(addr, 0, NULL);
2133 p->flags = flags;
2137 int page_check_range(target_ulong start, target_ulong len, int flags)
2139 PageDesc *p;
2140 target_ulong end;
2141 target_ulong addr;
2143 if (start + len < start)
2144 /* we've wrapped around */
2145 return -1;
2147 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2148 start = start & TARGET_PAGE_MASK;
2150 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2151 p = page_find(addr >> TARGET_PAGE_BITS);
2152 if( !p )
2153 return -1;
2154 if( !(p->flags & PAGE_VALID) )
2155 return -1;
2157 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2158 return -1;
2159 if (flags & PAGE_WRITE) {
2160 if (!(p->flags & PAGE_WRITE_ORG))
2161 return -1;
2162 /* unprotect the page if it was put read-only because it
2163 contains translated code */
2164 if (!(p->flags & PAGE_WRITE)) {
2165 if (!page_unprotect(addr, 0, NULL))
2166 return -1;
2168 return 0;
2171 return 0;
2174 /* called from signal handler: invalidate the code and unprotect the
2175 page. Return TRUE if the fault was succesfully handled. */
2176 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2178 unsigned int page_index, prot, pindex;
2179 PageDesc *p, *p1;
2180 target_ulong host_start, host_end, addr;
2182 /* Technically this isn't safe inside a signal handler. However we
2183 know this only ever happens in a synchronous SEGV handler, so in
2184 practice it seems to be ok. */
2185 mmap_lock();
2187 host_start = address & qemu_host_page_mask;
2188 page_index = host_start >> TARGET_PAGE_BITS;
2189 p1 = page_find(page_index);
2190 if (!p1) {
2191 mmap_unlock();
2192 return 0;
2194 host_end = host_start + qemu_host_page_size;
2195 p = p1;
2196 prot = 0;
2197 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2198 prot |= p->flags;
2199 p++;
2201 /* if the page was really writable, then we change its
2202 protection back to writable */
2203 if (prot & PAGE_WRITE_ORG) {
2204 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2205 if (!(p1[pindex].flags & PAGE_WRITE)) {
2206 mprotect((void *)g2h(host_start), qemu_host_page_size,
2207 (prot & PAGE_BITS) | PAGE_WRITE);
2208 p1[pindex].flags |= PAGE_WRITE;
2209 /* and since the content will be modified, we must invalidate
2210 the corresponding translated code. */
2211 tb_invalidate_phys_page(address, pc, puc);
2212 #ifdef DEBUG_TB_CHECK
2213 tb_invalidate_check(address);
2214 #endif
2215 mmap_unlock();
2216 return 1;
2219 mmap_unlock();
2220 return 0;
2223 static inline void tlb_set_dirty(CPUState *env,
2224 unsigned long addr, target_ulong vaddr)
2227 #endif /* defined(CONFIG_USER_ONLY) */
2229 #if !defined(CONFIG_USER_ONLY)
2231 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2232 ram_addr_t memory, ram_addr_t region_offset);
2233 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2234 ram_addr_t orig_memory, ram_addr_t region_offset);
2235 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2236 need_subpage) \
2237 do { \
2238 if (addr > start_addr) \
2239 start_addr2 = 0; \
2240 else { \
2241 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2242 if (start_addr2 > 0) \
2243 need_subpage = 1; \
2246 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2247 end_addr2 = TARGET_PAGE_SIZE - 1; \
2248 else { \
2249 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2250 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2251 need_subpage = 1; \
2253 } while (0)
2255 /* register physical memory. 'size' must be a multiple of the target
2256 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2257 io memory page. The address used when calling the IO function is
2258 the offset from the start of the region, plus region_offset. Both
2259 start_region and regon_offset are rounded down to a page boundary
2260 before calculating this offset. This should not be a problem unless
2261 the low bits of start_addr and region_offset differ. */
2262 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2263 ram_addr_t size,
2264 ram_addr_t phys_offset,
2265 ram_addr_t region_offset)
2267 target_phys_addr_t addr, end_addr;
2268 PhysPageDesc *p;
2269 CPUState *env;
2270 ram_addr_t orig_size = size;
2271 void *subpage;
2273 #ifdef USE_KQEMU
2274 /* XXX: should not depend on cpu context */
2275 env = first_cpu;
2276 if (env->kqemu_enabled) {
2277 kqemu_set_phys_mem(start_addr, size, phys_offset);
2279 #endif
2280 if (kvm_enabled())
2281 kvm_set_phys_mem(start_addr, size, phys_offset);
2283 region_offset &= TARGET_PAGE_MASK;
2284 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2285 end_addr = start_addr + (target_phys_addr_t)size;
2286 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2287 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2288 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2289 ram_addr_t orig_memory = p->phys_offset;
2290 target_phys_addr_t start_addr2, end_addr2;
2291 int need_subpage = 0;
2293 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2294 need_subpage);
2295 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2296 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2297 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2298 &p->phys_offset, orig_memory,
2299 p->region_offset);
2300 } else {
2301 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2302 >> IO_MEM_SHIFT];
2304 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2305 region_offset);
2306 p->region_offset = 0;
2307 } else {
2308 p->phys_offset = phys_offset;
2309 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2310 (phys_offset & IO_MEM_ROMD))
2311 phys_offset += TARGET_PAGE_SIZE;
2313 } else {
2314 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2315 p->phys_offset = phys_offset;
2316 p->region_offset = region_offset;
2317 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2318 (phys_offset & IO_MEM_ROMD)) {
2319 phys_offset += TARGET_PAGE_SIZE;
2320 } else {
2321 target_phys_addr_t start_addr2, end_addr2;
2322 int need_subpage = 0;
2324 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2325 end_addr2, need_subpage);
2327 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2328 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2329 &p->phys_offset, IO_MEM_UNASSIGNED,
2331 subpage_register(subpage, start_addr2, end_addr2,
2332 phys_offset, region_offset);
2333 p->region_offset = 0;
2337 region_offset += TARGET_PAGE_SIZE;
2340 /* since each CPU stores ram addresses in its TLB cache, we must
2341 reset the modified entries */
2342 /* XXX: slow ! */
2343 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2344 tlb_flush(env, 1);
2348 /* XXX: temporary until new memory mapping API */
2349 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2351 PhysPageDesc *p;
2353 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2354 if (!p)
2355 return IO_MEM_UNASSIGNED;
2356 return p->phys_offset;
2359 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2361 if (kvm_enabled())
2362 kvm_coalesce_mmio_region(addr, size);
2365 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2367 if (kvm_enabled())
2368 kvm_uncoalesce_mmio_region(addr, size);
2371 /* XXX: better than nothing */
2372 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2374 ram_addr_t addr;
2375 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2376 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2377 (uint64_t)size, (uint64_t)phys_ram_size);
2378 abort();
2380 addr = phys_ram_alloc_offset;
2381 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2382 return addr;
2385 void qemu_ram_free(ram_addr_t addr)
2389 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2391 #ifdef DEBUG_UNASSIGNED
2392 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2393 #endif
2394 #if defined(TARGET_SPARC)
2395 do_unassigned_access(addr, 0, 0, 0, 1);
2396 #endif
2397 return 0;
2400 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2402 #ifdef DEBUG_UNASSIGNED
2403 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2404 #endif
2405 #if defined(TARGET_SPARC)
2406 do_unassigned_access(addr, 0, 0, 0, 2);
2407 #endif
2408 return 0;
2411 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2413 #ifdef DEBUG_UNASSIGNED
2414 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2415 #endif
2416 #if defined(TARGET_SPARC)
2417 do_unassigned_access(addr, 0, 0, 0, 4);
2418 #endif
2419 return 0;
2422 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2424 #ifdef DEBUG_UNASSIGNED
2425 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2426 #endif
2427 #if defined(TARGET_SPARC)
2428 do_unassigned_access(addr, 1, 0, 0, 1);
2429 #endif
2432 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2434 #ifdef DEBUG_UNASSIGNED
2435 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2436 #endif
2437 #if defined(TARGET_SPARC)
2438 do_unassigned_access(addr, 1, 0, 0, 2);
2439 #endif
2442 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2444 #ifdef DEBUG_UNASSIGNED
2445 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2446 #endif
2447 #if defined(TARGET_SPARC)
2448 do_unassigned_access(addr, 1, 0, 0, 4);
2449 #endif
2452 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2453 unassigned_mem_readb,
2454 unassigned_mem_readw,
2455 unassigned_mem_readl,
2458 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2459 unassigned_mem_writeb,
2460 unassigned_mem_writew,
2461 unassigned_mem_writel,
2464 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2465 uint32_t val)
2467 int dirty_flags;
2468 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2469 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2470 #if !defined(CONFIG_USER_ONLY)
2471 tb_invalidate_phys_page_fast(ram_addr, 1);
2472 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2473 #endif
2475 stb_p(phys_ram_base + ram_addr, val);
2476 #ifdef USE_KQEMU
2477 if (cpu_single_env->kqemu_enabled &&
2478 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2479 kqemu_modify_page(cpu_single_env, ram_addr);
2480 #endif
2481 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2482 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2483 /* we remove the notdirty callback only if the code has been
2484 flushed */
2485 if (dirty_flags == 0xff)
2486 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2489 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2490 uint32_t val)
2492 int dirty_flags;
2493 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2494 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2495 #if !defined(CONFIG_USER_ONLY)
2496 tb_invalidate_phys_page_fast(ram_addr, 2);
2497 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2498 #endif
2500 stw_p(phys_ram_base + ram_addr, val);
2501 #ifdef USE_KQEMU
2502 if (cpu_single_env->kqemu_enabled &&
2503 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2504 kqemu_modify_page(cpu_single_env, ram_addr);
2505 #endif
2506 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2507 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2508 /* we remove the notdirty callback only if the code has been
2509 flushed */
2510 if (dirty_flags == 0xff)
2511 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2514 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2515 uint32_t val)
2517 int dirty_flags;
2518 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2519 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2520 #if !defined(CONFIG_USER_ONLY)
2521 tb_invalidate_phys_page_fast(ram_addr, 4);
2522 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2523 #endif
2525 stl_p(phys_ram_base + ram_addr, val);
2526 #ifdef USE_KQEMU
2527 if (cpu_single_env->kqemu_enabled &&
2528 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2529 kqemu_modify_page(cpu_single_env, ram_addr);
2530 #endif
2531 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2532 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2533 /* we remove the notdirty callback only if the code has been
2534 flushed */
2535 if (dirty_flags == 0xff)
2536 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2539 static CPUReadMemoryFunc *error_mem_read[3] = {
2540 NULL, /* never used */
2541 NULL, /* never used */
2542 NULL, /* never used */
2545 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2546 notdirty_mem_writeb,
2547 notdirty_mem_writew,
2548 notdirty_mem_writel,
2551 /* Generate a debug exception if a watchpoint has been hit. */
2552 static void check_watchpoint(int offset, int len_mask, int flags)
2554 CPUState *env = cpu_single_env;
2555 target_ulong pc, cs_base;
2556 TranslationBlock *tb;
2557 target_ulong vaddr;
2558 CPUWatchpoint *wp;
2559 int cpu_flags;
2561 if (env->watchpoint_hit) {
2562 /* We re-entered the check after replacing the TB. Now raise
2563 * the debug interrupt so that is will trigger after the
2564 * current instruction. */
2565 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2566 return;
2568 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2569 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2570 if ((vaddr == (wp->vaddr & len_mask) ||
2571 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2572 wp->flags |= BP_WATCHPOINT_HIT;
2573 if (!env->watchpoint_hit) {
2574 env->watchpoint_hit = wp;
2575 tb = tb_find_pc(env->mem_io_pc);
2576 if (!tb) {
2577 cpu_abort(env, "check_watchpoint: could not find TB for "
2578 "pc=%p", (void *)env->mem_io_pc);
2580 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2581 tb_phys_invalidate(tb, -1);
2582 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2583 env->exception_index = EXCP_DEBUG;
2584 } else {
2585 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2586 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2588 cpu_resume_from_signal(env, NULL);
2590 } else {
2591 wp->flags &= ~BP_WATCHPOINT_HIT;
2596 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2597 so these check for a hit then pass through to the normal out-of-line
2598 phys routines. */
2599 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2601 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2602 return ldub_phys(addr);
2605 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2607 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2608 return lduw_phys(addr);
2611 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2613 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2614 return ldl_phys(addr);
2617 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2618 uint32_t val)
2620 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2621 stb_phys(addr, val);
2624 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2625 uint32_t val)
2627 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2628 stw_phys(addr, val);
2631 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2632 uint32_t val)
2634 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2635 stl_phys(addr, val);
2638 static CPUReadMemoryFunc *watch_mem_read[3] = {
2639 watch_mem_readb,
2640 watch_mem_readw,
2641 watch_mem_readl,
2644 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2645 watch_mem_writeb,
2646 watch_mem_writew,
2647 watch_mem_writel,
2650 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2651 unsigned int len)
2653 uint32_t ret;
2654 unsigned int idx;
2656 idx = SUBPAGE_IDX(addr);
2657 #if defined(DEBUG_SUBPAGE)
2658 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2659 mmio, len, addr, idx);
2660 #endif
2661 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2662 addr + mmio->region_offset[idx][0][len]);
2664 return ret;
2667 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2668 uint32_t value, unsigned int len)
2670 unsigned int idx;
2672 idx = SUBPAGE_IDX(addr);
2673 #if defined(DEBUG_SUBPAGE)
2674 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2675 mmio, len, addr, idx, value);
2676 #endif
2677 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2678 addr + mmio->region_offset[idx][1][len],
2679 value);
2682 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2684 #if defined(DEBUG_SUBPAGE)
2685 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2686 #endif
2688 return subpage_readlen(opaque, addr, 0);
2691 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2692 uint32_t value)
2694 #if defined(DEBUG_SUBPAGE)
2695 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2696 #endif
2697 subpage_writelen(opaque, addr, value, 0);
2700 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2702 #if defined(DEBUG_SUBPAGE)
2703 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2704 #endif
2706 return subpage_readlen(opaque, addr, 1);
2709 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2710 uint32_t value)
2712 #if defined(DEBUG_SUBPAGE)
2713 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2714 #endif
2715 subpage_writelen(opaque, addr, value, 1);
2718 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2720 #if defined(DEBUG_SUBPAGE)
2721 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2722 #endif
2724 return subpage_readlen(opaque, addr, 2);
2727 static void subpage_writel (void *opaque,
2728 target_phys_addr_t addr, uint32_t value)
2730 #if defined(DEBUG_SUBPAGE)
2731 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2732 #endif
2733 subpage_writelen(opaque, addr, value, 2);
2736 static CPUReadMemoryFunc *subpage_read[] = {
2737 &subpage_readb,
2738 &subpage_readw,
2739 &subpage_readl,
2742 static CPUWriteMemoryFunc *subpage_write[] = {
2743 &subpage_writeb,
2744 &subpage_writew,
2745 &subpage_writel,
2748 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2749 ram_addr_t memory, ram_addr_t region_offset)
2751 int idx, eidx;
2752 unsigned int i;
2754 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2755 return -1;
2756 idx = SUBPAGE_IDX(start);
2757 eidx = SUBPAGE_IDX(end);
2758 #if defined(DEBUG_SUBPAGE)
2759 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2760 mmio, start, end, idx, eidx, memory);
2761 #endif
2762 memory >>= IO_MEM_SHIFT;
2763 for (; idx <= eidx; idx++) {
2764 for (i = 0; i < 4; i++) {
2765 if (io_mem_read[memory][i]) {
2766 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2767 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2768 mmio->region_offset[idx][0][i] = region_offset;
2770 if (io_mem_write[memory][i]) {
2771 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2772 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2773 mmio->region_offset[idx][1][i] = region_offset;
2778 return 0;
2781 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2782 ram_addr_t orig_memory, ram_addr_t region_offset)
2784 subpage_t *mmio;
2785 int subpage_memory;
2787 mmio = qemu_mallocz(sizeof(subpage_t));
2789 mmio->base = base;
2790 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2791 #if defined(DEBUG_SUBPAGE)
2792 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2793 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2794 #endif
2795 *phys = subpage_memory | IO_MEM_SUBPAGE;
2796 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2797 region_offset);
2799 return mmio;
2802 static int get_free_io_mem_idx(void)
2804 int i;
2806 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2807 if (!io_mem_used[i]) {
2808 io_mem_used[i] = 1;
2809 return i;
2812 return -1;
2815 static void io_mem_init(void)
2817 int i;
2819 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2820 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2821 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2822 for (i=0; i<5; i++)
2823 io_mem_used[i] = 1;
2825 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2826 watch_mem_write, NULL);
2827 /* alloc dirty bits array */
2828 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2829 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2832 /* mem_read and mem_write are arrays of functions containing the
2833 function to access byte (index 0), word (index 1) and dword (index
2834 2). Functions can be omitted with a NULL function pointer. The
2835 registered functions may be modified dynamically later.
2836 If io_index is non zero, the corresponding io zone is
2837 modified. If it is zero, a new io zone is allocated. The return
2838 value can be used with cpu_register_physical_memory(). (-1) is
2839 returned if error. */
2840 int cpu_register_io_memory(int io_index,
2841 CPUReadMemoryFunc **mem_read,
2842 CPUWriteMemoryFunc **mem_write,
2843 void *opaque)
2845 int i, subwidth = 0;
2847 if (io_index <= 0) {
2848 io_index = get_free_io_mem_idx();
2849 if (io_index == -1)
2850 return io_index;
2851 } else {
2852 if (io_index >= IO_MEM_NB_ENTRIES)
2853 return -1;
2856 for(i = 0;i < 3; i++) {
2857 if (!mem_read[i] || !mem_write[i])
2858 subwidth = IO_MEM_SUBWIDTH;
2859 io_mem_read[io_index][i] = mem_read[i];
2860 io_mem_write[io_index][i] = mem_write[i];
2862 io_mem_opaque[io_index] = opaque;
2863 return (io_index << IO_MEM_SHIFT) | subwidth;
2866 void cpu_unregister_io_memory(int io_table_address)
2868 int i;
2869 int io_index = io_table_address >> IO_MEM_SHIFT;
2871 for (i=0;i < 3; i++) {
2872 io_mem_read[io_index][i] = unassigned_mem_read[i];
2873 io_mem_write[io_index][i] = unassigned_mem_write[i];
2875 io_mem_opaque[io_index] = NULL;
2876 io_mem_used[io_index] = 0;
2879 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2881 return io_mem_write[io_index >> IO_MEM_SHIFT];
2884 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2886 return io_mem_read[io_index >> IO_MEM_SHIFT];
2889 #endif /* !defined(CONFIG_USER_ONLY) */
2891 /* physical memory access (slow version, mainly for debug) */
2892 #if defined(CONFIG_USER_ONLY)
2893 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2894 int len, int is_write)
2896 int l, flags;
2897 target_ulong page;
2898 void * p;
2900 while (len > 0) {
2901 page = addr & TARGET_PAGE_MASK;
2902 l = (page + TARGET_PAGE_SIZE) - addr;
2903 if (l > len)
2904 l = len;
2905 flags = page_get_flags(page);
2906 if (!(flags & PAGE_VALID))
2907 return;
2908 if (is_write) {
2909 if (!(flags & PAGE_WRITE))
2910 return;
2911 /* XXX: this code should not depend on lock_user */
2912 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2913 /* FIXME - should this return an error rather than just fail? */
2914 return;
2915 memcpy(p, buf, l);
2916 unlock_user(p, addr, l);
2917 } else {
2918 if (!(flags & PAGE_READ))
2919 return;
2920 /* XXX: this code should not depend on lock_user */
2921 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2922 /* FIXME - should this return an error rather than just fail? */
2923 return;
2924 memcpy(buf, p, l);
2925 unlock_user(p, addr, 0);
2927 len -= l;
2928 buf += l;
2929 addr += l;
2933 #else
2934 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2935 int len, int is_write)
2937 int l, io_index;
2938 uint8_t *ptr;
2939 uint32_t val;
2940 target_phys_addr_t page;
2941 unsigned long pd;
2942 PhysPageDesc *p;
2944 while (len > 0) {
2945 page = addr & TARGET_PAGE_MASK;
2946 l = (page + TARGET_PAGE_SIZE) - addr;
2947 if (l > len)
2948 l = len;
2949 p = phys_page_find(page >> TARGET_PAGE_BITS);
2950 if (!p) {
2951 pd = IO_MEM_UNASSIGNED;
2952 } else {
2953 pd = p->phys_offset;
2956 if (is_write) {
2957 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2958 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2959 if (p)
2960 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2961 /* XXX: could force cpu_single_env to NULL to avoid
2962 potential bugs */
2963 if (l >= 4 && ((addr & 3) == 0)) {
2964 /* 32 bit write access */
2965 val = ldl_p(buf);
2966 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2967 l = 4;
2968 } else if (l >= 2 && ((addr & 1) == 0)) {
2969 /* 16 bit write access */
2970 val = lduw_p(buf);
2971 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2972 l = 2;
2973 } else {
2974 /* 8 bit write access */
2975 val = ldub_p(buf);
2976 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2977 l = 1;
2979 } else {
2980 unsigned long addr1;
2981 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2982 /* RAM case */
2983 ptr = phys_ram_base + addr1;
2984 memcpy(ptr, buf, l);
2985 if (!cpu_physical_memory_is_dirty(addr1)) {
2986 /* invalidate code */
2987 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2988 /* set dirty bit */
2989 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2990 (0xff & ~CODE_DIRTY_FLAG);
2993 } else {
2994 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2995 !(pd & IO_MEM_ROMD)) {
2996 /* I/O case */
2997 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2998 if (p)
2999 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3000 if (l >= 4 && ((addr & 3) == 0)) {
3001 /* 32 bit read access */
3002 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3003 stl_p(buf, val);
3004 l = 4;
3005 } else if (l >= 2 && ((addr & 1) == 0)) {
3006 /* 16 bit read access */
3007 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3008 stw_p(buf, val);
3009 l = 2;
3010 } else {
3011 /* 8 bit read access */
3012 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3013 stb_p(buf, val);
3014 l = 1;
3016 } else {
3017 /* RAM case */
3018 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3019 (addr & ~TARGET_PAGE_MASK);
3020 memcpy(buf, ptr, l);
3023 len -= l;
3024 buf += l;
3025 addr += l;
3029 /* used for ROM loading : can write in RAM and ROM */
3030 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3031 const uint8_t *buf, int len)
3033 int l;
3034 uint8_t *ptr;
3035 target_phys_addr_t page;
3036 unsigned long pd;
3037 PhysPageDesc *p;
3039 while (len > 0) {
3040 page = addr & TARGET_PAGE_MASK;
3041 l = (page + TARGET_PAGE_SIZE) - addr;
3042 if (l > len)
3043 l = len;
3044 p = phys_page_find(page >> TARGET_PAGE_BITS);
3045 if (!p) {
3046 pd = IO_MEM_UNASSIGNED;
3047 } else {
3048 pd = p->phys_offset;
3051 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3052 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3053 !(pd & IO_MEM_ROMD)) {
3054 /* do nothing */
3055 } else {
3056 unsigned long addr1;
3057 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3058 /* ROM/RAM case */
3059 ptr = phys_ram_base + addr1;
3060 memcpy(ptr, buf, l);
3062 len -= l;
3063 buf += l;
3064 addr += l;
3068 typedef struct {
3069 void *buffer;
3070 target_phys_addr_t addr;
3071 target_phys_addr_t len;
3072 } BounceBuffer;
3074 static BounceBuffer bounce;
3076 typedef struct MapClient {
3077 void *opaque;
3078 void (*callback)(void *opaque);
3079 LIST_ENTRY(MapClient) link;
3080 } MapClient;
3082 static LIST_HEAD(map_client_list, MapClient) map_client_list
3083 = LIST_HEAD_INITIALIZER(map_client_list);
3085 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3087 MapClient *client = qemu_malloc(sizeof(*client));
3089 client->opaque = opaque;
3090 client->callback = callback;
3091 LIST_INSERT_HEAD(&map_client_list, client, link);
3092 return client;
3095 void cpu_unregister_map_client(void *_client)
3097 MapClient *client = (MapClient *)_client;
3099 LIST_REMOVE(client, link);
3102 static void cpu_notify_map_clients(void)
3104 MapClient *client;
3106 while (!LIST_EMPTY(&map_client_list)) {
3107 client = LIST_FIRST(&map_client_list);
3108 client->callback(client->opaque);
3109 LIST_REMOVE(client, link);
3113 /* Map a physical memory region into a host virtual address.
3114 * May map a subset of the requested range, given by and returned in *plen.
3115 * May return NULL if resources needed to perform the mapping are exhausted.
3116 * Use only for reads OR writes - not for read-modify-write operations.
3117 * Use cpu_register_map_client() to know when retrying the map operation is
3118 * likely to succeed.
3120 void *cpu_physical_memory_map(target_phys_addr_t addr,
3121 target_phys_addr_t *plen,
3122 int is_write)
3124 target_phys_addr_t len = *plen;
3125 target_phys_addr_t done = 0;
3126 int l;
3127 uint8_t *ret = NULL;
3128 uint8_t *ptr;
3129 target_phys_addr_t page;
3130 unsigned long pd;
3131 PhysPageDesc *p;
3132 unsigned long addr1;
3134 while (len > 0) {
3135 page = addr & TARGET_PAGE_MASK;
3136 l = (page + TARGET_PAGE_SIZE) - addr;
3137 if (l > len)
3138 l = len;
3139 p = phys_page_find(page >> TARGET_PAGE_BITS);
3140 if (!p) {
3141 pd = IO_MEM_UNASSIGNED;
3142 } else {
3143 pd = p->phys_offset;
3146 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3147 if (done || bounce.buffer) {
3148 break;
3150 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3151 bounce.addr = addr;
3152 bounce.len = l;
3153 if (!is_write) {
3154 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3156 ptr = bounce.buffer;
3157 } else {
3158 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3159 ptr = phys_ram_base + addr1;
3161 if (!done) {
3162 ret = ptr;
3163 } else if (ret + done != ptr) {
3164 break;
3167 len -= l;
3168 addr += l;
3169 done += l;
3171 *plen = done;
3172 return ret;
3175 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3176 * Will also mark the memory as dirty if is_write == 1. access_len gives
3177 * the amount of memory that was actually read or written by the caller.
3179 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3180 int is_write, target_phys_addr_t access_len)
3182 if (buffer != bounce.buffer) {
3183 if (is_write) {
3184 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3185 while (access_len) {
3186 unsigned l;
3187 l = TARGET_PAGE_SIZE;
3188 if (l > access_len)
3189 l = access_len;
3190 if (!cpu_physical_memory_is_dirty(addr1)) {
3191 /* invalidate code */
3192 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3193 /* set dirty bit */
3194 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3195 (0xff & ~CODE_DIRTY_FLAG);
3197 addr1 += l;
3198 access_len -= l;
3201 return;
3203 if (is_write) {
3204 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3206 qemu_free(bounce.buffer);
3207 bounce.buffer = NULL;
3208 cpu_notify_map_clients();
3211 /* warning: addr must be aligned */
3212 uint32_t ldl_phys(target_phys_addr_t addr)
3214 int io_index;
3215 uint8_t *ptr;
3216 uint32_t val;
3217 unsigned long pd;
3218 PhysPageDesc *p;
3220 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3221 if (!p) {
3222 pd = IO_MEM_UNASSIGNED;
3223 } else {
3224 pd = p->phys_offset;
3227 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3228 !(pd & IO_MEM_ROMD)) {
3229 /* I/O case */
3230 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3231 if (p)
3232 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3233 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3234 } else {
3235 /* RAM case */
3236 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3237 (addr & ~TARGET_PAGE_MASK);
3238 val = ldl_p(ptr);
3240 return val;
3243 /* warning: addr must be aligned */
3244 uint64_t ldq_phys(target_phys_addr_t addr)
3246 int io_index;
3247 uint8_t *ptr;
3248 uint64_t val;
3249 unsigned long pd;
3250 PhysPageDesc *p;
3252 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3253 if (!p) {
3254 pd = IO_MEM_UNASSIGNED;
3255 } else {
3256 pd = p->phys_offset;
3259 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3260 !(pd & IO_MEM_ROMD)) {
3261 /* I/O case */
3262 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3263 if (p)
3264 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3265 #ifdef TARGET_WORDS_BIGENDIAN
3266 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3267 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3268 #else
3269 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3270 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3271 #endif
3272 } else {
3273 /* RAM case */
3274 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3275 (addr & ~TARGET_PAGE_MASK);
3276 val = ldq_p(ptr);
3278 return val;
3281 /* XXX: optimize */
3282 uint32_t ldub_phys(target_phys_addr_t addr)
3284 uint8_t val;
3285 cpu_physical_memory_read(addr, &val, 1);
3286 return val;
3289 /* XXX: optimize */
3290 uint32_t lduw_phys(target_phys_addr_t addr)
3292 uint16_t val;
3293 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3294 return tswap16(val);
3297 /* warning: addr must be aligned. The ram page is not masked as dirty
3298 and the code inside is not invalidated. It is useful if the dirty
3299 bits are used to track modified PTEs */
3300 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3302 int io_index;
3303 uint8_t *ptr;
3304 unsigned long pd;
3305 PhysPageDesc *p;
3307 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3308 if (!p) {
3309 pd = IO_MEM_UNASSIGNED;
3310 } else {
3311 pd = p->phys_offset;
3314 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3315 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3316 if (p)
3317 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3318 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3319 } else {
3320 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3321 ptr = phys_ram_base + addr1;
3322 stl_p(ptr, val);
3324 if (unlikely(in_migration)) {
3325 if (!cpu_physical_memory_is_dirty(addr1)) {
3326 /* invalidate code */
3327 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3328 /* set dirty bit */
3329 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3330 (0xff & ~CODE_DIRTY_FLAG);
3336 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3338 int io_index;
3339 uint8_t *ptr;
3340 unsigned long pd;
3341 PhysPageDesc *p;
3343 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3344 if (!p) {
3345 pd = IO_MEM_UNASSIGNED;
3346 } else {
3347 pd = p->phys_offset;
3350 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3351 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3352 if (p)
3353 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3354 #ifdef TARGET_WORDS_BIGENDIAN
3355 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3356 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3357 #else
3358 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3359 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3360 #endif
3361 } else {
3362 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3363 (addr & ~TARGET_PAGE_MASK);
3364 stq_p(ptr, val);
3368 /* warning: addr must be aligned */
3369 void stl_phys(target_phys_addr_t addr, uint32_t val)
3371 int io_index;
3372 uint8_t *ptr;
3373 unsigned long pd;
3374 PhysPageDesc *p;
3376 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3377 if (!p) {
3378 pd = IO_MEM_UNASSIGNED;
3379 } else {
3380 pd = p->phys_offset;
3383 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3384 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3385 if (p)
3386 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3387 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3388 } else {
3389 unsigned long addr1;
3390 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3391 /* RAM case */
3392 ptr = phys_ram_base + addr1;
3393 stl_p(ptr, val);
3394 if (!cpu_physical_memory_is_dirty(addr1)) {
3395 /* invalidate code */
3396 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3397 /* set dirty bit */
3398 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3399 (0xff & ~CODE_DIRTY_FLAG);
3404 /* XXX: optimize */
3405 void stb_phys(target_phys_addr_t addr, uint32_t val)
3407 uint8_t v = val;
3408 cpu_physical_memory_write(addr, &v, 1);
3411 /* XXX: optimize */
3412 void stw_phys(target_phys_addr_t addr, uint32_t val)
3414 uint16_t v = tswap16(val);
3415 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3418 /* XXX: optimize */
3419 void stq_phys(target_phys_addr_t addr, uint64_t val)
3421 val = tswap64(val);
3422 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3425 #endif
3427 /* virtual memory access for debug */
3428 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3429 uint8_t *buf, int len, int is_write)
3431 int l;
3432 target_phys_addr_t phys_addr;
3433 target_ulong page;
3435 while (len > 0) {
3436 page = addr & TARGET_PAGE_MASK;
3437 phys_addr = cpu_get_phys_page_debug(env, page);
3438 /* if no physical page mapped, return an error */
3439 if (phys_addr == -1)
3440 return -1;
3441 l = (page + TARGET_PAGE_SIZE) - addr;
3442 if (l > len)
3443 l = len;
3444 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3445 buf, l, is_write);
3446 len -= l;
3447 buf += l;
3448 addr += l;
3450 return 0;
3453 /* in deterministic execution mode, instructions doing device I/Os
3454 must be at the end of the TB */
3455 void cpu_io_recompile(CPUState *env, void *retaddr)
3457 TranslationBlock *tb;
3458 uint32_t n, cflags;
3459 target_ulong pc, cs_base;
3460 uint64_t flags;
3462 tb = tb_find_pc((unsigned long)retaddr);
3463 if (!tb) {
3464 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3465 retaddr);
3467 n = env->icount_decr.u16.low + tb->icount;
3468 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3469 /* Calculate how many instructions had been executed before the fault
3470 occurred. */
3471 n = n - env->icount_decr.u16.low;
3472 /* Generate a new TB ending on the I/O insn. */
3473 n++;
3474 /* On MIPS and SH, delay slot instructions can only be restarted if
3475 they were already the first instruction in the TB. If this is not
3476 the first instruction in a TB then re-execute the preceding
3477 branch. */
3478 #if defined(TARGET_MIPS)
3479 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3480 env->active_tc.PC -= 4;
3481 env->icount_decr.u16.low++;
3482 env->hflags &= ~MIPS_HFLAG_BMASK;
3484 #elif defined(TARGET_SH4)
3485 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3486 && n > 1) {
3487 env->pc -= 2;
3488 env->icount_decr.u16.low++;
3489 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3491 #endif
3492 /* This should never happen. */
3493 if (n > CF_COUNT_MASK)
3494 cpu_abort(env, "TB too big during recompile");
3496 cflags = n | CF_LAST_IO;
3497 pc = tb->pc;
3498 cs_base = tb->cs_base;
3499 flags = tb->flags;
3500 tb_phys_invalidate(tb, -1);
3501 /* FIXME: In theory this could raise an exception. In practice
3502 we have already translated the block once so it's probably ok. */
3503 tb_gen_code(env, pc, cs_base, flags, cflags);
3504 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3505 the first in the TB) then we end up generating a whole new TB and
3506 repeating the fault, which is horribly inefficient.
3507 Better would be to execute just this insn uncached, or generate a
3508 second new TB. */
3509 cpu_resume_from_signal(env, NULL);
3512 void dump_exec_info(FILE *f,
3513 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3515 int i, target_code_size, max_target_code_size;
3516 int direct_jmp_count, direct_jmp2_count, cross_page;
3517 TranslationBlock *tb;
3519 target_code_size = 0;
3520 max_target_code_size = 0;
3521 cross_page = 0;
3522 direct_jmp_count = 0;
3523 direct_jmp2_count = 0;
3524 for(i = 0; i < nb_tbs; i++) {
3525 tb = &tbs[i];
3526 target_code_size += tb->size;
3527 if (tb->size > max_target_code_size)
3528 max_target_code_size = tb->size;
3529 if (tb->page_addr[1] != -1)
3530 cross_page++;
3531 if (tb->tb_next_offset[0] != 0xffff) {
3532 direct_jmp_count++;
3533 if (tb->tb_next_offset[1] != 0xffff) {
3534 direct_jmp2_count++;
3538 /* XXX: avoid using doubles ? */
3539 cpu_fprintf(f, "Translation buffer state:\n");
3540 cpu_fprintf(f, "gen code size %ld/%ld\n",
3541 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3542 cpu_fprintf(f, "TB count %d/%d\n",
3543 nb_tbs, code_gen_max_blocks);
3544 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3545 nb_tbs ? target_code_size / nb_tbs : 0,
3546 max_target_code_size);
3547 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3548 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3549 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3550 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3551 cross_page,
3552 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3553 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3554 direct_jmp_count,
3555 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3556 direct_jmp2_count,
3557 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3558 cpu_fprintf(f, "\nStatistics:\n");
3559 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3560 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3561 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3562 tcg_dump_info(f, cpu_fprintf);
3565 #if !defined(CONFIG_USER_ONLY)
3567 #define MMUSUFFIX _cmmu
3568 #define GETPC() NULL
3569 #define env cpu_single_env
3570 #define SOFTMMU_CODE_ACCESS
3572 #define SHIFT 0
3573 #include "softmmu_template.h"
3575 #define SHIFT 1
3576 #include "softmmu_template.h"
3578 #define SHIFT 2
3579 #include "softmmu_template.h"
3581 #define SHIFT 3
3582 #include "softmmu_template.h"
3584 #undef env
3586 #endif