Make sure to use SDL_CFLAGS everywhere we include SDL headers
[qemu-kvm/fedora.git] / exec.c
blob61a55325ef8c0257eff5374903bd89a1248e2c9b
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #include "hw/hw.h"
41 #include "osdep.h"
42 #include "kvm.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #endif
47 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_FLUSH
49 //#define DEBUG_TLB
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
61 #undef DEBUG_TB_CHECK
62 #endif
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 #else
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #endif
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 static int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #else
102 #define code_gen_section \
103 __attribute__((aligned (32)))
104 #endif
106 uint8_t code_gen_prologue[1024] code_gen_section;
107 static uint8_t *code_gen_buffer;
108 static unsigned long code_gen_buffer_size;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size;
111 uint8_t *code_gen_ptr;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size;
115 int phys_ram_fd;
116 uint8_t *phys_ram_base;
117 uint8_t *phys_ram_dirty;
118 static int in_migration;
119 static ram_addr_t phys_ram_alloc_offset = 0;
120 #endif
122 CPUState *first_cpu;
123 /* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
125 CPUState *cpu_single_env;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
129 int use_icount = 0;
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132 int64_t qemu_icount;
134 typedef struct PageDesc {
135 /* list of TBs intersecting this ram page */
136 TranslationBlock *first_tb;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141 #if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143 #endif
144 } PageDesc;
146 typedef struct PhysPageDesc {
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset;
149 ram_addr_t region_offset;
150 } PhysPageDesc;
152 #define L2_BITS 10
153 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154 /* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
158 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159 #else
160 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
161 #endif
163 #define L1_SIZE (1 << L1_BITS)
164 #define L2_SIZE (1 << L2_BITS)
166 unsigned long qemu_real_host_page_size;
167 unsigned long qemu_host_page_bits;
168 unsigned long qemu_host_page_size;
169 unsigned long qemu_host_page_mask;
171 /* XXX: for system emulation, it could just be an array */
172 static PageDesc *l1_map[L1_SIZE];
173 static PhysPageDesc **l1_phys_map;
175 #if !defined(CONFIG_USER_ONLY)
176 static void io_mem_init(void);
178 /* io memory support */
179 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
181 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
182 char io_mem_used[IO_MEM_NB_ENTRIES];
183 static int io_mem_watch;
184 #endif
186 /* log support */
187 static const char *logfilename = "/tmp/qemu.log";
188 FILE *logfile;
189 int loglevel;
190 static int log_append = 0;
192 /* statistics */
193 static int tlb_flush_count;
194 static int tb_flush_count;
195 static int tb_phys_invalidate_count;
197 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198 typedef struct subpage_t {
199 target_phys_addr_t base;
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204 } subpage_t;
206 #ifdef _WIN32
207 static void map_exec(void *addr, long size)
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
214 #else
215 static void map_exec(void *addr, long size)
217 unsigned long start, end, page_size;
219 page_size = getpagesize();
220 start = (unsigned long)addr;
221 start &= ~(page_size - 1);
223 end = (unsigned long)addr + size;
224 end += page_size - 1;
225 end &= ~(page_size - 1);
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
230 #endif
232 static void page_init(void)
234 /* NOTE: we can always suppose that qemu_host_page_size >=
235 TARGET_PAGE_SIZE */
236 #ifdef _WIN32
238 SYSTEM_INFO system_info;
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
243 #else
244 qemu_real_host_page_size = getpagesize();
245 #endif
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
257 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
263 mmap_lock();
264 last_brk = (unsigned long)sbrk(0);
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
274 page_set_flags(startaddr & TARGET_PAGE_MASK,
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
278 } while (!feof(f));
279 fclose(f);
281 mmap_unlock();
283 #endif
286 static inline PageDesc **page_l1_map(target_ulong index)
288 #if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
292 return NULL;
293 #endif
294 return &l1_map[index >> L2_BITS];
297 static inline PageDesc *page_find_alloc(target_ulong index)
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
307 #if defined(CONFIG_USER_ONLY)
308 size_t len = sizeof(PageDesc) * L2_SIZE;
309 /* Don't use qemu_malloc because it may recurse. */
310 p = mmap(0, len, PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
312 *lp = p;
313 if (h2g_valid(p)) {
314 unsigned long addr = h2g(p);
315 page_set_flags(addr & TARGET_PAGE_MASK,
316 TARGET_PAGE_ALIGN(addr + len),
317 PAGE_RESERVED);
319 #else
320 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321 *lp = p;
322 #endif
324 return p + (index & (L2_SIZE - 1));
327 static inline PageDesc *page_find(target_ulong index)
329 PageDesc **lp, *p;
330 lp = page_l1_map(index);
331 if (!lp)
332 return NULL;
334 p = *lp;
335 if (!p)
336 return 0;
337 return p + (index & (L2_SIZE - 1));
340 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
342 void **lp, **p;
343 PhysPageDesc *pd;
345 p = (void **)l1_phys_map;
346 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
348 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350 #endif
351 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
352 p = *lp;
353 if (!p) {
354 /* allocate if not found */
355 if (!alloc)
356 return NULL;
357 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358 memset(p, 0, sizeof(void *) * L1_SIZE);
359 *lp = p;
361 #endif
362 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
363 pd = *lp;
364 if (!pd) {
365 int i;
366 /* allocate if not found */
367 if (!alloc)
368 return NULL;
369 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370 *lp = pd;
371 for (i = 0; i < L2_SIZE; i++) {
372 pd[i].phys_offset = IO_MEM_UNASSIGNED;
373 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
376 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
379 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
381 return phys_page_find_alloc(index, 0);
384 #if !defined(CONFIG_USER_ONLY)
385 static void tlb_protect_code(ram_addr_t ram_addr);
386 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
387 target_ulong vaddr);
388 #define mmap_lock() do { } while(0)
389 #define mmap_unlock() do { } while(0)
390 #endif
392 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
394 #if defined(CONFIG_USER_ONLY)
395 /* Currently it is not recommanded to allocate big chunks of data in
396 user mode. It will change when a dedicated libc will be used */
397 #define USE_STATIC_CODE_GEN_BUFFER
398 #endif
400 #ifdef USE_STATIC_CODE_GEN_BUFFER
401 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
402 #endif
404 static void code_gen_alloc(unsigned long tb_size)
406 #ifdef USE_STATIC_CODE_GEN_BUFFER
407 code_gen_buffer = static_code_gen_buffer;
408 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
409 map_exec(code_gen_buffer, code_gen_buffer_size);
410 #else
411 code_gen_buffer_size = tb_size;
412 if (code_gen_buffer_size == 0) {
413 #if defined(CONFIG_USER_ONLY)
414 /* in user mode, phys_ram_size is not meaningful */
415 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416 #else
417 /* XXX: needs ajustments */
418 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
419 #endif
421 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
422 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
423 /* The code gen buffer location may have constraints depending on
424 the host cpu and OS */
425 #if defined(__linux__)
427 int flags;
428 void *start = NULL;
430 flags = MAP_PRIVATE | MAP_ANONYMOUS;
431 #if defined(__x86_64__)
432 flags |= MAP_32BIT;
433 /* Cannot map more than that */
434 if (code_gen_buffer_size > (800 * 1024 * 1024))
435 code_gen_buffer_size = (800 * 1024 * 1024);
436 #elif defined(__sparc_v9__)
437 // Map the buffer below 2G, so we can use direct calls and branches
438 flags |= MAP_FIXED;
439 start = (void *) 0x60000000UL;
440 if (code_gen_buffer_size > (512 * 1024 * 1024))
441 code_gen_buffer_size = (512 * 1024 * 1024);
442 #elif defined(__arm__)
443 /* Map the buffer below 32M, so we can use direct calls and branches */
444 flags |= MAP_FIXED;
445 start = (void *) 0x01000000UL;
446 if (code_gen_buffer_size > 16 * 1024 * 1024)
447 code_gen_buffer_size = 16 * 1024 * 1024;
448 #endif
449 code_gen_buffer = mmap(start, code_gen_buffer_size,
450 PROT_WRITE | PROT_READ | PROT_EXEC,
451 flags, -1, 0);
452 if (code_gen_buffer == MAP_FAILED) {
453 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
454 exit(1);
457 #elif defined(__FreeBSD__)
459 int flags;
460 void *addr = NULL;
461 flags = MAP_PRIVATE | MAP_ANONYMOUS;
462 #if defined(__x86_64__)
463 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
464 * 0x40000000 is free */
465 flags |= MAP_FIXED;
466 addr = (void *)0x40000000;
467 /* Cannot map more than that */
468 if (code_gen_buffer_size > (800 * 1024 * 1024))
469 code_gen_buffer_size = (800 * 1024 * 1024);
470 #endif
471 code_gen_buffer = mmap(addr, code_gen_buffer_size,
472 PROT_WRITE | PROT_READ | PROT_EXEC,
473 flags, -1, 0);
474 if (code_gen_buffer == MAP_FAILED) {
475 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
476 exit(1);
479 #else
480 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482 #endif
483 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
484 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
485 code_gen_buffer_max_size = code_gen_buffer_size -
486 code_gen_max_block_size();
487 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
488 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
491 /* Must be called before using the QEMU cpus. 'tb_size' is the size
492 (in bytes) allocated to the translation buffer. Zero means default
493 size. */
494 void cpu_exec_init_all(unsigned long tb_size)
496 cpu_gen_init();
497 code_gen_alloc(tb_size);
498 code_gen_ptr = code_gen_buffer;
499 page_init();
500 #if !defined(CONFIG_USER_ONLY)
501 io_mem_init();
502 #endif
505 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
507 #define CPU_COMMON_SAVE_VERSION 1
509 static void cpu_common_save(QEMUFile *f, void *opaque)
511 CPUState *env = opaque;
513 qemu_put_be32s(f, &env->halted);
514 qemu_put_be32s(f, &env->interrupt_request);
517 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
519 CPUState *env = opaque;
521 if (version_id != CPU_COMMON_SAVE_VERSION)
522 return -EINVAL;
524 qemu_get_be32s(f, &env->halted);
525 qemu_get_be32s(f, &env->interrupt_request);
526 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
527 tlb_flush(env, 1);
529 return 0;
531 #endif
533 void cpu_exec_init(CPUState *env)
535 CPUState **penv;
536 int cpu_index;
538 env->next_cpu = NULL;
539 penv = &first_cpu;
540 cpu_index = 0;
541 while (*penv != NULL) {
542 penv = (CPUState **)&(*penv)->next_cpu;
543 cpu_index++;
545 env->cpu_index = cpu_index;
546 TAILQ_INIT(&env->breakpoints);
547 TAILQ_INIT(&env->watchpoints);
548 *penv = env;
549 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
550 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
551 cpu_common_save, cpu_common_load, env);
552 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
553 cpu_save, cpu_load, env);
554 #endif
557 static inline void invalidate_page_bitmap(PageDesc *p)
559 if (p->code_bitmap) {
560 qemu_free(p->code_bitmap);
561 p->code_bitmap = NULL;
563 p->code_write_count = 0;
566 /* set to NULL all the 'first_tb' fields in all PageDescs */
567 static void page_flush_tb(void)
569 int i, j;
570 PageDesc *p;
572 for(i = 0; i < L1_SIZE; i++) {
573 p = l1_map[i];
574 if (p) {
575 for(j = 0; j < L2_SIZE; j++) {
576 p->first_tb = NULL;
577 invalidate_page_bitmap(p);
578 p++;
584 /* flush all the translation blocks */
585 /* XXX: tb_flush is currently not thread safe */
586 void tb_flush(CPUState *env1)
588 CPUState *env;
589 #if defined(DEBUG_FLUSH)
590 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
591 (unsigned long)(code_gen_ptr - code_gen_buffer),
592 nb_tbs, nb_tbs > 0 ?
593 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
594 #endif
595 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
596 cpu_abort(env1, "Internal error: code buffer overflow\n");
598 nb_tbs = 0;
600 for(env = first_cpu; env != NULL; env = env->next_cpu) {
601 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
604 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
605 page_flush_tb();
607 code_gen_ptr = code_gen_buffer;
608 /* XXX: flush processor icache at this point if cache flush is
609 expensive */
610 tb_flush_count++;
613 #ifdef DEBUG_TB_CHECK
615 static void tb_invalidate_check(target_ulong address)
617 TranslationBlock *tb;
618 int i;
619 address &= TARGET_PAGE_MASK;
620 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
621 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
622 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
623 address >= tb->pc + tb->size)) {
624 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
625 address, (long)tb->pc, tb->size);
631 /* verify that all the pages have correct rights for code */
632 static void tb_page_check(void)
634 TranslationBlock *tb;
635 int i, flags1, flags2;
637 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
638 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
639 flags1 = page_get_flags(tb->pc);
640 flags2 = page_get_flags(tb->pc + tb->size - 1);
641 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
642 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
643 (long)tb->pc, tb->size, flags1, flags2);
649 static void tb_jmp_check(TranslationBlock *tb)
651 TranslationBlock *tb1;
652 unsigned int n1;
654 /* suppress any remaining jumps to this TB */
655 tb1 = tb->jmp_first;
656 for(;;) {
657 n1 = (long)tb1 & 3;
658 tb1 = (TranslationBlock *)((long)tb1 & ~3);
659 if (n1 == 2)
660 break;
661 tb1 = tb1->jmp_next[n1];
663 /* check end of list */
664 if (tb1 != tb) {
665 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
669 #endif
671 /* invalidate one TB */
672 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
673 int next_offset)
675 TranslationBlock *tb1;
676 for(;;) {
677 tb1 = *ptb;
678 if (tb1 == tb) {
679 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
680 break;
682 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
686 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
688 TranslationBlock *tb1;
689 unsigned int n1;
691 for(;;) {
692 tb1 = *ptb;
693 n1 = (long)tb1 & 3;
694 tb1 = (TranslationBlock *)((long)tb1 & ~3);
695 if (tb1 == tb) {
696 *ptb = tb1->page_next[n1];
697 break;
699 ptb = &tb1->page_next[n1];
703 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
705 TranslationBlock *tb1, **ptb;
706 unsigned int n1;
708 ptb = &tb->jmp_next[n];
709 tb1 = *ptb;
710 if (tb1) {
711 /* find tb(n) in circular list */
712 for(;;) {
713 tb1 = *ptb;
714 n1 = (long)tb1 & 3;
715 tb1 = (TranslationBlock *)((long)tb1 & ~3);
716 if (n1 == n && tb1 == tb)
717 break;
718 if (n1 == 2) {
719 ptb = &tb1->jmp_first;
720 } else {
721 ptb = &tb1->jmp_next[n1];
724 /* now we can suppress tb(n) from the list */
725 *ptb = tb->jmp_next[n];
727 tb->jmp_next[n] = NULL;
731 /* reset the jump entry 'n' of a TB so that it is not chained to
732 another TB */
733 static inline void tb_reset_jump(TranslationBlock *tb, int n)
735 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
738 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
740 CPUState *env;
741 PageDesc *p;
742 unsigned int h, n1;
743 target_phys_addr_t phys_pc;
744 TranslationBlock *tb1, *tb2;
746 /* remove the TB from the hash list */
747 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
748 h = tb_phys_hash_func(phys_pc);
749 tb_remove(&tb_phys_hash[h], tb,
750 offsetof(TranslationBlock, phys_hash_next));
752 /* remove the TB from the page list */
753 if (tb->page_addr[0] != page_addr) {
754 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
755 tb_page_remove(&p->first_tb, tb);
756 invalidate_page_bitmap(p);
758 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
759 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
760 tb_page_remove(&p->first_tb, tb);
761 invalidate_page_bitmap(p);
764 tb_invalidated_flag = 1;
766 /* remove the TB from the hash list */
767 h = tb_jmp_cache_hash_func(tb->pc);
768 for(env = first_cpu; env != NULL; env = env->next_cpu) {
769 if (env->tb_jmp_cache[h] == tb)
770 env->tb_jmp_cache[h] = NULL;
773 /* suppress this TB from the two jump lists */
774 tb_jmp_remove(tb, 0);
775 tb_jmp_remove(tb, 1);
777 /* suppress any remaining jumps to this TB */
778 tb1 = tb->jmp_first;
779 for(;;) {
780 n1 = (long)tb1 & 3;
781 if (n1 == 2)
782 break;
783 tb1 = (TranslationBlock *)((long)tb1 & ~3);
784 tb2 = tb1->jmp_next[n1];
785 tb_reset_jump(tb1, n1);
786 tb1->jmp_next[n1] = NULL;
787 tb1 = tb2;
789 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
791 tb_phys_invalidate_count++;
794 static inline void set_bits(uint8_t *tab, int start, int len)
796 int end, mask, end1;
798 end = start + len;
799 tab += start >> 3;
800 mask = 0xff << (start & 7);
801 if ((start & ~7) == (end & ~7)) {
802 if (start < end) {
803 mask &= ~(0xff << (end & 7));
804 *tab |= mask;
806 } else {
807 *tab++ |= mask;
808 start = (start + 8) & ~7;
809 end1 = end & ~7;
810 while (start < end1) {
811 *tab++ = 0xff;
812 start += 8;
814 if (start < end) {
815 mask = ~(0xff << (end & 7));
816 *tab |= mask;
821 static void build_page_bitmap(PageDesc *p)
823 int n, tb_start, tb_end;
824 TranslationBlock *tb;
826 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
828 tb = p->first_tb;
829 while (tb != NULL) {
830 n = (long)tb & 3;
831 tb = (TranslationBlock *)((long)tb & ~3);
832 /* NOTE: this is subtle as a TB may span two physical pages */
833 if (n == 0) {
834 /* NOTE: tb_end may be after the end of the page, but
835 it is not a problem */
836 tb_start = tb->pc & ~TARGET_PAGE_MASK;
837 tb_end = tb_start + tb->size;
838 if (tb_end > TARGET_PAGE_SIZE)
839 tb_end = TARGET_PAGE_SIZE;
840 } else {
841 tb_start = 0;
842 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
844 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
845 tb = tb->page_next[n];
849 TranslationBlock *tb_gen_code(CPUState *env,
850 target_ulong pc, target_ulong cs_base,
851 int flags, int cflags)
853 TranslationBlock *tb;
854 uint8_t *tc_ptr;
855 target_ulong phys_pc, phys_page2, virt_page2;
856 int code_gen_size;
858 phys_pc = get_phys_addr_code(env, pc);
859 tb = tb_alloc(pc);
860 if (!tb) {
861 /* flush must be done */
862 tb_flush(env);
863 /* cannot fail at this point */
864 tb = tb_alloc(pc);
865 /* Don't forget to invalidate previous TB info. */
866 tb_invalidated_flag = 1;
868 tc_ptr = code_gen_ptr;
869 tb->tc_ptr = tc_ptr;
870 tb->cs_base = cs_base;
871 tb->flags = flags;
872 tb->cflags = cflags;
873 cpu_gen_code(env, tb, &code_gen_size);
874 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
876 /* check next page if needed */
877 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
878 phys_page2 = -1;
879 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
880 phys_page2 = get_phys_addr_code(env, virt_page2);
882 tb_link_phys(tb, phys_pc, phys_page2);
883 return tb;
886 /* invalidate all TBs which intersect with the target physical page
887 starting in range [start;end[. NOTE: start and end must refer to
888 the same physical page. 'is_cpu_write_access' should be true if called
889 from a real cpu write access: the virtual CPU will exit the current
890 TB if code is modified inside this TB. */
891 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
892 int is_cpu_write_access)
894 TranslationBlock *tb, *tb_next, *saved_tb;
895 CPUState *env = cpu_single_env;
896 target_ulong tb_start, tb_end;
897 PageDesc *p;
898 int n;
899 #ifdef TARGET_HAS_PRECISE_SMC
900 int current_tb_not_found = is_cpu_write_access;
901 TranslationBlock *current_tb = NULL;
902 int current_tb_modified = 0;
903 target_ulong current_pc = 0;
904 target_ulong current_cs_base = 0;
905 int current_flags = 0;
906 #endif /* TARGET_HAS_PRECISE_SMC */
908 p = page_find(start >> TARGET_PAGE_BITS);
909 if (!p)
910 return;
911 if (!p->code_bitmap &&
912 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
913 is_cpu_write_access) {
914 /* build code bitmap */
915 build_page_bitmap(p);
918 /* we remove all the TBs in the range [start, end[ */
919 /* XXX: see if in some cases it could be faster to invalidate all the code */
920 tb = p->first_tb;
921 while (tb != NULL) {
922 n = (long)tb & 3;
923 tb = (TranslationBlock *)((long)tb & ~3);
924 tb_next = tb->page_next[n];
925 /* NOTE: this is subtle as a TB may span two physical pages */
926 if (n == 0) {
927 /* NOTE: tb_end may be after the end of the page, but
928 it is not a problem */
929 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
930 tb_end = tb_start + tb->size;
931 } else {
932 tb_start = tb->page_addr[1];
933 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
935 if (!(tb_end <= start || tb_start >= end)) {
936 #ifdef TARGET_HAS_PRECISE_SMC
937 if (current_tb_not_found) {
938 current_tb_not_found = 0;
939 current_tb = NULL;
940 if (env->mem_io_pc) {
941 /* now we have a real cpu fault */
942 current_tb = tb_find_pc(env->mem_io_pc);
945 if (current_tb == tb &&
946 (current_tb->cflags & CF_COUNT_MASK) != 1) {
947 /* If we are modifying the current TB, we must stop
948 its execution. We could be more precise by checking
949 that the modification is after the current PC, but it
950 would require a specialized function to partially
951 restore the CPU state */
953 current_tb_modified = 1;
954 cpu_restore_state(current_tb, env,
955 env->mem_io_pc, NULL);
956 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
957 &current_flags);
959 #endif /* TARGET_HAS_PRECISE_SMC */
960 /* we need to do that to handle the case where a signal
961 occurs while doing tb_phys_invalidate() */
962 saved_tb = NULL;
963 if (env) {
964 saved_tb = env->current_tb;
965 env->current_tb = NULL;
967 tb_phys_invalidate(tb, -1);
968 if (env) {
969 env->current_tb = saved_tb;
970 if (env->interrupt_request && env->current_tb)
971 cpu_interrupt(env, env->interrupt_request);
974 tb = tb_next;
976 #if !defined(CONFIG_USER_ONLY)
977 /* if no code remaining, no need to continue to use slow writes */
978 if (!p->first_tb) {
979 invalidate_page_bitmap(p);
980 if (is_cpu_write_access) {
981 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
984 #endif
985 #ifdef TARGET_HAS_PRECISE_SMC
986 if (current_tb_modified) {
987 /* we generate a block containing just the instruction
988 modifying the memory. It will ensure that it cannot modify
989 itself */
990 env->current_tb = NULL;
991 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
992 cpu_resume_from_signal(env, NULL);
994 #endif
997 /* len must be <= 8 and start must be a multiple of len */
998 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1000 PageDesc *p;
1001 int offset, b;
1002 #if 0
1003 if (1) {
1004 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1005 cpu_single_env->mem_io_vaddr, len,
1006 cpu_single_env->eip,
1007 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1009 #endif
1010 p = page_find(start >> TARGET_PAGE_BITS);
1011 if (!p)
1012 return;
1013 if (p->code_bitmap) {
1014 offset = start & ~TARGET_PAGE_MASK;
1015 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1016 if (b & ((1 << len) - 1))
1017 goto do_invalidate;
1018 } else {
1019 do_invalidate:
1020 tb_invalidate_phys_page_range(start, start + len, 1);
1024 #if !defined(CONFIG_SOFTMMU)
1025 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1026 unsigned long pc, void *puc)
1028 TranslationBlock *tb;
1029 PageDesc *p;
1030 int n;
1031 #ifdef TARGET_HAS_PRECISE_SMC
1032 TranslationBlock *current_tb = NULL;
1033 CPUState *env = cpu_single_env;
1034 int current_tb_modified = 0;
1035 target_ulong current_pc = 0;
1036 target_ulong current_cs_base = 0;
1037 int current_flags = 0;
1038 #endif
1040 addr &= TARGET_PAGE_MASK;
1041 p = page_find(addr >> TARGET_PAGE_BITS);
1042 if (!p)
1043 return;
1044 tb = p->first_tb;
1045 #ifdef TARGET_HAS_PRECISE_SMC
1046 if (tb && pc != 0) {
1047 current_tb = tb_find_pc(pc);
1049 #endif
1050 while (tb != NULL) {
1051 n = (long)tb & 3;
1052 tb = (TranslationBlock *)((long)tb & ~3);
1053 #ifdef TARGET_HAS_PRECISE_SMC
1054 if (current_tb == tb &&
1055 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1056 /* If we are modifying the current TB, we must stop
1057 its execution. We could be more precise by checking
1058 that the modification is after the current PC, but it
1059 would require a specialized function to partially
1060 restore the CPU state */
1062 current_tb_modified = 1;
1063 cpu_restore_state(current_tb, env, pc, puc);
1064 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1065 &current_flags);
1067 #endif /* TARGET_HAS_PRECISE_SMC */
1068 tb_phys_invalidate(tb, addr);
1069 tb = tb->page_next[n];
1071 p->first_tb = NULL;
1072 #ifdef TARGET_HAS_PRECISE_SMC
1073 if (current_tb_modified) {
1074 /* we generate a block containing just the instruction
1075 modifying the memory. It will ensure that it cannot modify
1076 itself */
1077 env->current_tb = NULL;
1078 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1079 cpu_resume_from_signal(env, puc);
1081 #endif
1083 #endif
1085 /* add the tb in the target page and protect it if necessary */
1086 static inline void tb_alloc_page(TranslationBlock *tb,
1087 unsigned int n, target_ulong page_addr)
1089 PageDesc *p;
1090 TranslationBlock *last_first_tb;
1092 tb->page_addr[n] = page_addr;
1093 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1094 tb->page_next[n] = p->first_tb;
1095 last_first_tb = p->first_tb;
1096 p->first_tb = (TranslationBlock *)((long)tb | n);
1097 invalidate_page_bitmap(p);
1099 #if defined(TARGET_HAS_SMC) || 1
1101 #if defined(CONFIG_USER_ONLY)
1102 if (p->flags & PAGE_WRITE) {
1103 target_ulong addr;
1104 PageDesc *p2;
1105 int prot;
1107 /* force the host page as non writable (writes will have a
1108 page fault + mprotect overhead) */
1109 page_addr &= qemu_host_page_mask;
1110 prot = 0;
1111 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1112 addr += TARGET_PAGE_SIZE) {
1114 p2 = page_find (addr >> TARGET_PAGE_BITS);
1115 if (!p2)
1116 continue;
1117 prot |= p2->flags;
1118 p2->flags &= ~PAGE_WRITE;
1119 page_get_flags(addr);
1121 mprotect(g2h(page_addr), qemu_host_page_size,
1122 (prot & PAGE_BITS) & ~PAGE_WRITE);
1123 #ifdef DEBUG_TB_INVALIDATE
1124 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1125 page_addr);
1126 #endif
1128 #else
1129 /* if some code is already present, then the pages are already
1130 protected. So we handle the case where only the first TB is
1131 allocated in a physical page */
1132 if (!last_first_tb) {
1133 tlb_protect_code(page_addr);
1135 #endif
1137 #endif /* TARGET_HAS_SMC */
1140 /* Allocate a new translation block. Flush the translation buffer if
1141 too many translation blocks or too much generated code. */
1142 TranslationBlock *tb_alloc(target_ulong pc)
1144 TranslationBlock *tb;
1146 if (nb_tbs >= code_gen_max_blocks ||
1147 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1148 return NULL;
1149 tb = &tbs[nb_tbs++];
1150 tb->pc = pc;
1151 tb->cflags = 0;
1152 return tb;
1155 void tb_free(TranslationBlock *tb)
1157 /* In practice this is mostly used for single use temporary TB
1158 Ignore the hard cases and just back up if this TB happens to
1159 be the last one generated. */
1160 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1161 code_gen_ptr = tb->tc_ptr;
1162 nb_tbs--;
1166 /* add a new TB and link it to the physical page tables. phys_page2 is
1167 (-1) to indicate that only one page contains the TB. */
1168 void tb_link_phys(TranslationBlock *tb,
1169 target_ulong phys_pc, target_ulong phys_page2)
1171 unsigned int h;
1172 TranslationBlock **ptb;
1174 /* Grab the mmap lock to stop another thread invalidating this TB
1175 before we are done. */
1176 mmap_lock();
1177 /* add in the physical hash table */
1178 h = tb_phys_hash_func(phys_pc);
1179 ptb = &tb_phys_hash[h];
1180 tb->phys_hash_next = *ptb;
1181 *ptb = tb;
1183 /* add in the page list */
1184 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1185 if (phys_page2 != -1)
1186 tb_alloc_page(tb, 1, phys_page2);
1187 else
1188 tb->page_addr[1] = -1;
1190 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1191 tb->jmp_next[0] = NULL;
1192 tb->jmp_next[1] = NULL;
1194 /* init original jump addresses */
1195 if (tb->tb_next_offset[0] != 0xffff)
1196 tb_reset_jump(tb, 0);
1197 if (tb->tb_next_offset[1] != 0xffff)
1198 tb_reset_jump(tb, 1);
1200 #ifdef DEBUG_TB_CHECK
1201 tb_page_check();
1202 #endif
1203 mmap_unlock();
1206 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1207 tb[1].tc_ptr. Return NULL if not found */
1208 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1210 int m_min, m_max, m;
1211 unsigned long v;
1212 TranslationBlock *tb;
1214 if (nb_tbs <= 0)
1215 return NULL;
1216 if (tc_ptr < (unsigned long)code_gen_buffer ||
1217 tc_ptr >= (unsigned long)code_gen_ptr)
1218 return NULL;
1219 /* binary search (cf Knuth) */
1220 m_min = 0;
1221 m_max = nb_tbs - 1;
1222 while (m_min <= m_max) {
1223 m = (m_min + m_max) >> 1;
1224 tb = &tbs[m];
1225 v = (unsigned long)tb->tc_ptr;
1226 if (v == tc_ptr)
1227 return tb;
1228 else if (tc_ptr < v) {
1229 m_max = m - 1;
1230 } else {
1231 m_min = m + 1;
1234 return &tbs[m_max];
1237 static void tb_reset_jump_recursive(TranslationBlock *tb);
1239 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1241 TranslationBlock *tb1, *tb_next, **ptb;
1242 unsigned int n1;
1244 tb1 = tb->jmp_next[n];
1245 if (tb1 != NULL) {
1246 /* find head of list */
1247 for(;;) {
1248 n1 = (long)tb1 & 3;
1249 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1250 if (n1 == 2)
1251 break;
1252 tb1 = tb1->jmp_next[n1];
1254 /* we are now sure now that tb jumps to tb1 */
1255 tb_next = tb1;
1257 /* remove tb from the jmp_first list */
1258 ptb = &tb_next->jmp_first;
1259 for(;;) {
1260 tb1 = *ptb;
1261 n1 = (long)tb1 & 3;
1262 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1263 if (n1 == n && tb1 == tb)
1264 break;
1265 ptb = &tb1->jmp_next[n1];
1267 *ptb = tb->jmp_next[n];
1268 tb->jmp_next[n] = NULL;
1270 /* suppress the jump to next tb in generated code */
1271 tb_reset_jump(tb, n);
1273 /* suppress jumps in the tb on which we could have jumped */
1274 tb_reset_jump_recursive(tb_next);
1278 static void tb_reset_jump_recursive(TranslationBlock *tb)
1280 tb_reset_jump_recursive2(tb, 0);
1281 tb_reset_jump_recursive2(tb, 1);
1284 #if defined(TARGET_HAS_ICE)
1285 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1287 target_phys_addr_t addr;
1288 target_ulong pd;
1289 ram_addr_t ram_addr;
1290 PhysPageDesc *p;
1292 addr = cpu_get_phys_page_debug(env, pc);
1293 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1294 if (!p) {
1295 pd = IO_MEM_UNASSIGNED;
1296 } else {
1297 pd = p->phys_offset;
1299 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1300 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1302 #endif
1304 /* Add a watchpoint. */
1305 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1306 int flags, CPUWatchpoint **watchpoint)
1308 target_ulong len_mask = ~(len - 1);
1309 CPUWatchpoint *wp;
1311 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1312 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1313 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1314 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1315 return -EINVAL;
1317 wp = qemu_malloc(sizeof(*wp));
1319 wp->vaddr = addr;
1320 wp->len_mask = len_mask;
1321 wp->flags = flags;
1323 /* keep all GDB-injected watchpoints in front */
1324 if (flags & BP_GDB)
1325 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1326 else
1327 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1329 tlb_flush_page(env, addr);
1331 if (watchpoint)
1332 *watchpoint = wp;
1333 return 0;
1336 /* Remove a specific watchpoint. */
1337 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1338 int flags)
1340 target_ulong len_mask = ~(len - 1);
1341 CPUWatchpoint *wp;
1343 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1344 if (addr == wp->vaddr && len_mask == wp->len_mask
1345 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1346 cpu_watchpoint_remove_by_ref(env, wp);
1347 return 0;
1350 return -ENOENT;
1353 /* Remove a specific watchpoint by reference. */
1354 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1356 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1358 tlb_flush_page(env, watchpoint->vaddr);
1360 qemu_free(watchpoint);
1363 /* Remove all matching watchpoints. */
1364 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1366 CPUWatchpoint *wp, *next;
1368 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1369 if (wp->flags & mask)
1370 cpu_watchpoint_remove_by_ref(env, wp);
1374 /* Add a breakpoint. */
1375 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1376 CPUBreakpoint **breakpoint)
1378 #if defined(TARGET_HAS_ICE)
1379 CPUBreakpoint *bp;
1381 bp = qemu_malloc(sizeof(*bp));
1383 bp->pc = pc;
1384 bp->flags = flags;
1386 /* keep all GDB-injected breakpoints in front */
1387 if (flags & BP_GDB)
1388 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1389 else
1390 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1392 breakpoint_invalidate(env, pc);
1394 if (breakpoint)
1395 *breakpoint = bp;
1396 return 0;
1397 #else
1398 return -ENOSYS;
1399 #endif
1402 /* Remove a specific breakpoint. */
1403 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1405 #if defined(TARGET_HAS_ICE)
1406 CPUBreakpoint *bp;
1408 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1409 if (bp->pc == pc && bp->flags == flags) {
1410 cpu_breakpoint_remove_by_ref(env, bp);
1411 return 0;
1414 return -ENOENT;
1415 #else
1416 return -ENOSYS;
1417 #endif
1420 /* Remove a specific breakpoint by reference. */
1421 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1423 #if defined(TARGET_HAS_ICE)
1424 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1426 breakpoint_invalidate(env, breakpoint->pc);
1428 qemu_free(breakpoint);
1429 #endif
1432 /* Remove all matching breakpoints. */
1433 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1435 #if defined(TARGET_HAS_ICE)
1436 CPUBreakpoint *bp, *next;
1438 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1439 if (bp->flags & mask)
1440 cpu_breakpoint_remove_by_ref(env, bp);
1442 #endif
1445 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1446 CPU loop after each instruction */
1447 void cpu_single_step(CPUState *env, int enabled)
1449 #if defined(TARGET_HAS_ICE)
1450 if (env->singlestep_enabled != enabled) {
1451 env->singlestep_enabled = enabled;
1452 /* must flush all the translated code to avoid inconsistancies */
1453 /* XXX: only flush what is necessary */
1454 tb_flush(env);
1456 #endif
1459 /* enable or disable low levels log */
1460 void cpu_set_log(int log_flags)
1462 loglevel = log_flags;
1463 if (loglevel && !logfile) {
1464 logfile = fopen(logfilename, log_append ? "a" : "w");
1465 if (!logfile) {
1466 perror(logfilename);
1467 _exit(1);
1469 #if !defined(CONFIG_SOFTMMU)
1470 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1472 static char logfile_buf[4096];
1473 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1475 #else
1476 setvbuf(logfile, NULL, _IOLBF, 0);
1477 #endif
1478 log_append = 1;
1480 if (!loglevel && logfile) {
1481 fclose(logfile);
1482 logfile = NULL;
1486 void cpu_set_log_filename(const char *filename)
1488 logfilename = strdup(filename);
1489 if (logfile) {
1490 fclose(logfile);
1491 logfile = NULL;
1493 cpu_set_log(loglevel);
1496 /* mask must never be zero, except for A20 change call */
1497 void cpu_interrupt(CPUState *env, int mask)
1499 #if !defined(USE_NPTL)
1500 TranslationBlock *tb;
1501 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1502 #endif
1503 int old_mask;
1505 if (mask & CPU_INTERRUPT_EXIT) {
1506 env->exit_request = 1;
1507 mask &= ~CPU_INTERRUPT_EXIT;
1510 old_mask = env->interrupt_request;
1511 env->interrupt_request |= mask;
1512 #if defined(USE_NPTL)
1513 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1514 problem and hope the cpu will stop of its own accord. For userspace
1515 emulation this often isn't actually as bad as it sounds. Often
1516 signals are used primarily to interrupt blocking syscalls. */
1517 #else
1518 if (use_icount) {
1519 env->icount_decr.u16.high = 0xffff;
1520 #ifndef CONFIG_USER_ONLY
1521 if (!can_do_io(env)
1522 && (mask & ~old_mask) != 0) {
1523 cpu_abort(env, "Raised interrupt while not in I/O function");
1525 #endif
1526 } else {
1527 tb = env->current_tb;
1528 /* if the cpu is currently executing code, we must unlink it and
1529 all the potentially executing TB */
1530 if (tb && !testandset(&interrupt_lock)) {
1531 env->current_tb = NULL;
1532 tb_reset_jump_recursive(tb);
1533 resetlock(&interrupt_lock);
1536 #endif
1539 void cpu_reset_interrupt(CPUState *env, int mask)
1541 env->interrupt_request &= ~mask;
1544 const CPULogItem cpu_log_items[] = {
1545 { CPU_LOG_TB_OUT_ASM, "out_asm",
1546 "show generated host assembly code for each compiled TB" },
1547 { CPU_LOG_TB_IN_ASM, "in_asm",
1548 "show target assembly code for each compiled TB" },
1549 { CPU_LOG_TB_OP, "op",
1550 "show micro ops for each compiled TB" },
1551 { CPU_LOG_TB_OP_OPT, "op_opt",
1552 "show micro ops "
1553 #ifdef TARGET_I386
1554 "before eflags optimization and "
1555 #endif
1556 "after liveness analysis" },
1557 { CPU_LOG_INT, "int",
1558 "show interrupts/exceptions in short format" },
1559 { CPU_LOG_EXEC, "exec",
1560 "show trace before each executed TB (lots of logs)" },
1561 { CPU_LOG_TB_CPU, "cpu",
1562 "show CPU state before block translation" },
1563 #ifdef TARGET_I386
1564 { CPU_LOG_PCALL, "pcall",
1565 "show protected mode far calls/returns/exceptions" },
1566 { CPU_LOG_RESET, "cpu_reset",
1567 "show CPU state before CPU resets" },
1568 #endif
1569 #ifdef DEBUG_IOPORT
1570 { CPU_LOG_IOPORT, "ioport",
1571 "show all i/o ports accesses" },
1572 #endif
1573 { 0, NULL, NULL },
1576 static int cmp1(const char *s1, int n, const char *s2)
1578 if (strlen(s2) != n)
1579 return 0;
1580 return memcmp(s1, s2, n) == 0;
1583 /* takes a comma separated list of log masks. Return 0 if error. */
1584 int cpu_str_to_log_mask(const char *str)
1586 const CPULogItem *item;
1587 int mask;
1588 const char *p, *p1;
1590 p = str;
1591 mask = 0;
1592 for(;;) {
1593 p1 = strchr(p, ',');
1594 if (!p1)
1595 p1 = p + strlen(p);
1596 if(cmp1(p,p1-p,"all")) {
1597 for(item = cpu_log_items; item->mask != 0; item++) {
1598 mask |= item->mask;
1600 } else {
1601 for(item = cpu_log_items; item->mask != 0; item++) {
1602 if (cmp1(p, p1 - p, item->name))
1603 goto found;
1605 return 0;
1607 found:
1608 mask |= item->mask;
1609 if (*p1 != ',')
1610 break;
1611 p = p1 + 1;
1613 return mask;
1616 void cpu_abort(CPUState *env, const char *fmt, ...)
1618 va_list ap;
1619 va_list ap2;
1621 va_start(ap, fmt);
1622 va_copy(ap2, ap);
1623 fprintf(stderr, "qemu: fatal: ");
1624 vfprintf(stderr, fmt, ap);
1625 fprintf(stderr, "\n");
1626 #ifdef TARGET_I386
1627 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1628 #else
1629 cpu_dump_state(env, stderr, fprintf, 0);
1630 #endif
1631 if (qemu_log_enabled()) {
1632 qemu_log("qemu: fatal: ");
1633 qemu_log_vprintf(fmt, ap2);
1634 qemu_log("\n");
1635 #ifdef TARGET_I386
1636 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1637 #else
1638 log_cpu_state(env, 0);
1639 #endif
1640 qemu_log_flush();
1641 qemu_log_close();
1643 va_end(ap2);
1644 va_end(ap);
1645 abort();
1648 CPUState *cpu_copy(CPUState *env)
1650 CPUState *new_env = cpu_init(env->cpu_model_str);
1651 CPUState *next_cpu = new_env->next_cpu;
1652 int cpu_index = new_env->cpu_index;
1653 #if defined(TARGET_HAS_ICE)
1654 CPUBreakpoint *bp;
1655 CPUWatchpoint *wp;
1656 #endif
1658 memcpy(new_env, env, sizeof(CPUState));
1660 /* Preserve chaining and index. */
1661 new_env->next_cpu = next_cpu;
1662 new_env->cpu_index = cpu_index;
1664 /* Clone all break/watchpoints.
1665 Note: Once we support ptrace with hw-debug register access, make sure
1666 BP_CPU break/watchpoints are handled correctly on clone. */
1667 TAILQ_INIT(&env->breakpoints);
1668 TAILQ_INIT(&env->watchpoints);
1669 #if defined(TARGET_HAS_ICE)
1670 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1671 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1673 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1674 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1675 wp->flags, NULL);
1677 #endif
1679 return new_env;
1682 #if !defined(CONFIG_USER_ONLY)
1684 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1686 unsigned int i;
1688 /* Discard jump cache entries for any tb which might potentially
1689 overlap the flushed page. */
1690 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1691 memset (&env->tb_jmp_cache[i], 0,
1692 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1694 i = tb_jmp_cache_hash_page(addr);
1695 memset (&env->tb_jmp_cache[i], 0,
1696 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1699 /* NOTE: if flush_global is true, also flush global entries (not
1700 implemented yet) */
1701 void tlb_flush(CPUState *env, int flush_global)
1703 int i;
1705 #if defined(DEBUG_TLB)
1706 printf("tlb_flush:\n");
1707 #endif
1708 /* must reset current TB so that interrupts cannot modify the
1709 links while we are modifying them */
1710 env->current_tb = NULL;
1712 for(i = 0; i < CPU_TLB_SIZE; i++) {
1713 env->tlb_table[0][i].addr_read = -1;
1714 env->tlb_table[0][i].addr_write = -1;
1715 env->tlb_table[0][i].addr_code = -1;
1716 env->tlb_table[1][i].addr_read = -1;
1717 env->tlb_table[1][i].addr_write = -1;
1718 env->tlb_table[1][i].addr_code = -1;
1719 #if (NB_MMU_MODES >= 3)
1720 env->tlb_table[2][i].addr_read = -1;
1721 env->tlb_table[2][i].addr_write = -1;
1722 env->tlb_table[2][i].addr_code = -1;
1723 #if (NB_MMU_MODES == 4)
1724 env->tlb_table[3][i].addr_read = -1;
1725 env->tlb_table[3][i].addr_write = -1;
1726 env->tlb_table[3][i].addr_code = -1;
1727 #endif
1728 #endif
1731 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1733 #ifdef USE_KQEMU
1734 if (env->kqemu_enabled) {
1735 kqemu_flush(env, flush_global);
1737 #endif
1738 tlb_flush_count++;
1741 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1743 if (addr == (tlb_entry->addr_read &
1744 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1745 addr == (tlb_entry->addr_write &
1746 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1747 addr == (tlb_entry->addr_code &
1748 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1749 tlb_entry->addr_read = -1;
1750 tlb_entry->addr_write = -1;
1751 tlb_entry->addr_code = -1;
1755 void tlb_flush_page(CPUState *env, target_ulong addr)
1757 int i;
1759 #if defined(DEBUG_TLB)
1760 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1761 #endif
1762 /* must reset current TB so that interrupts cannot modify the
1763 links while we are modifying them */
1764 env->current_tb = NULL;
1766 addr &= TARGET_PAGE_MASK;
1767 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1768 tlb_flush_entry(&env->tlb_table[0][i], addr);
1769 tlb_flush_entry(&env->tlb_table[1][i], addr);
1770 #if (NB_MMU_MODES >= 3)
1771 tlb_flush_entry(&env->tlb_table[2][i], addr);
1772 #if (NB_MMU_MODES == 4)
1773 tlb_flush_entry(&env->tlb_table[3][i], addr);
1774 #endif
1775 #endif
1777 tlb_flush_jmp_cache(env, addr);
1779 #ifdef USE_KQEMU
1780 if (env->kqemu_enabled) {
1781 kqemu_flush_page(env, addr);
1783 #endif
1786 /* update the TLBs so that writes to code in the virtual page 'addr'
1787 can be detected */
1788 static void tlb_protect_code(ram_addr_t ram_addr)
1790 cpu_physical_memory_reset_dirty(ram_addr,
1791 ram_addr + TARGET_PAGE_SIZE,
1792 CODE_DIRTY_FLAG);
1795 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1796 tested for self modifying code */
1797 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1798 target_ulong vaddr)
1800 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1803 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1804 unsigned long start, unsigned long length)
1806 unsigned long addr;
1807 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1808 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1809 if ((addr - start) < length) {
1810 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1815 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1816 int dirty_flags)
1818 CPUState *env;
1819 unsigned long length, start1;
1820 int i, mask, len;
1821 uint8_t *p;
1823 start &= TARGET_PAGE_MASK;
1824 end = TARGET_PAGE_ALIGN(end);
1826 length = end - start;
1827 if (length == 0)
1828 return;
1829 len = length >> TARGET_PAGE_BITS;
1830 #ifdef USE_KQEMU
1831 /* XXX: should not depend on cpu context */
1832 env = first_cpu;
1833 if (env->kqemu_enabled) {
1834 ram_addr_t addr;
1835 addr = start;
1836 for(i = 0; i < len; i++) {
1837 kqemu_set_notdirty(env, addr);
1838 addr += TARGET_PAGE_SIZE;
1841 #endif
1842 mask = ~dirty_flags;
1843 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1844 for(i = 0; i < len; i++)
1845 p[i] &= mask;
1847 /* we modify the TLB cache so that the dirty bit will be set again
1848 when accessing the range */
1849 start1 = start + (unsigned long)phys_ram_base;
1850 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1851 for(i = 0; i < CPU_TLB_SIZE; i++)
1852 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1853 for(i = 0; i < CPU_TLB_SIZE; i++)
1854 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1855 #if (NB_MMU_MODES >= 3)
1856 for(i = 0; i < CPU_TLB_SIZE; i++)
1857 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1858 #if (NB_MMU_MODES == 4)
1859 for(i = 0; i < CPU_TLB_SIZE; i++)
1860 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1861 #endif
1862 #endif
1866 int cpu_physical_memory_set_dirty_tracking(int enable)
1868 in_migration = enable;
1869 return 0;
1872 int cpu_physical_memory_get_dirty_tracking(void)
1874 return in_migration;
1877 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1879 if (kvm_enabled())
1880 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1883 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1885 ram_addr_t ram_addr;
1887 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1888 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1889 tlb_entry->addend - (unsigned long)phys_ram_base;
1890 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1891 tlb_entry->addr_write |= TLB_NOTDIRTY;
1896 /* update the TLB according to the current state of the dirty bits */
1897 void cpu_tlb_update_dirty(CPUState *env)
1899 int i;
1900 for(i = 0; i < CPU_TLB_SIZE; i++)
1901 tlb_update_dirty(&env->tlb_table[0][i]);
1902 for(i = 0; i < CPU_TLB_SIZE; i++)
1903 tlb_update_dirty(&env->tlb_table[1][i]);
1904 #if (NB_MMU_MODES >= 3)
1905 for(i = 0; i < CPU_TLB_SIZE; i++)
1906 tlb_update_dirty(&env->tlb_table[2][i]);
1907 #if (NB_MMU_MODES == 4)
1908 for(i = 0; i < CPU_TLB_SIZE; i++)
1909 tlb_update_dirty(&env->tlb_table[3][i]);
1910 #endif
1911 #endif
1914 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1916 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1917 tlb_entry->addr_write = vaddr;
1920 /* update the TLB corresponding to virtual page vaddr
1921 so that it is no longer dirty */
1922 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1924 int i;
1926 vaddr &= TARGET_PAGE_MASK;
1927 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1928 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1929 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1930 #if (NB_MMU_MODES >= 3)
1931 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1932 #if (NB_MMU_MODES == 4)
1933 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1934 #endif
1935 #endif
1938 /* add a new TLB entry. At most one entry for a given virtual address
1939 is permitted. Return 0 if OK or 2 if the page could not be mapped
1940 (can only happen in non SOFTMMU mode for I/O pages or pages
1941 conflicting with the host address space). */
1942 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1943 target_phys_addr_t paddr, int prot,
1944 int mmu_idx, int is_softmmu)
1946 PhysPageDesc *p;
1947 unsigned long pd;
1948 unsigned int index;
1949 target_ulong address;
1950 target_ulong code_address;
1951 target_phys_addr_t addend;
1952 int ret;
1953 CPUTLBEntry *te;
1954 CPUWatchpoint *wp;
1955 target_phys_addr_t iotlb;
1957 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1958 if (!p) {
1959 pd = IO_MEM_UNASSIGNED;
1960 } else {
1961 pd = p->phys_offset;
1963 #if defined(DEBUG_TLB)
1964 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1965 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1966 #endif
1968 ret = 0;
1969 address = vaddr;
1970 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1971 /* IO memory case (romd handled later) */
1972 address |= TLB_MMIO;
1974 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1975 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1976 /* Normal RAM. */
1977 iotlb = pd & TARGET_PAGE_MASK;
1978 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1979 iotlb |= IO_MEM_NOTDIRTY;
1980 else
1981 iotlb |= IO_MEM_ROM;
1982 } else {
1983 /* IO handlers are currently passed a phsical address.
1984 It would be nice to pass an offset from the base address
1985 of that region. This would avoid having to special case RAM,
1986 and avoid full address decoding in every device.
1987 We can't use the high bits of pd for this because
1988 IO_MEM_ROMD uses these as a ram address. */
1989 iotlb = (pd & ~TARGET_PAGE_MASK);
1990 if (p) {
1991 iotlb += p->region_offset;
1992 } else {
1993 iotlb += paddr;
1997 code_address = address;
1998 /* Make accesses to pages with watchpoints go via the
1999 watchpoint trap routines. */
2000 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2001 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2002 iotlb = io_mem_watch + paddr;
2003 /* TODO: The memory case can be optimized by not trapping
2004 reads of pages with a write breakpoint. */
2005 address |= TLB_MMIO;
2009 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2010 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2011 te = &env->tlb_table[mmu_idx][index];
2012 te->addend = addend - vaddr;
2013 if (prot & PAGE_READ) {
2014 te->addr_read = address;
2015 } else {
2016 te->addr_read = -1;
2019 if (prot & PAGE_EXEC) {
2020 te->addr_code = code_address;
2021 } else {
2022 te->addr_code = -1;
2024 if (prot & PAGE_WRITE) {
2025 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2026 (pd & IO_MEM_ROMD)) {
2027 /* Write access calls the I/O callback. */
2028 te->addr_write = address | TLB_MMIO;
2029 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2030 !cpu_physical_memory_is_dirty(pd)) {
2031 te->addr_write = address | TLB_NOTDIRTY;
2032 } else {
2033 te->addr_write = address;
2035 } else {
2036 te->addr_write = -1;
2038 return ret;
2041 #else
2043 void tlb_flush(CPUState *env, int flush_global)
2047 void tlb_flush_page(CPUState *env, target_ulong addr)
2051 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2052 target_phys_addr_t paddr, int prot,
2053 int mmu_idx, int is_softmmu)
2055 return 0;
2058 /* dump memory mappings */
2059 void page_dump(FILE *f)
2061 unsigned long start, end;
2062 int i, j, prot, prot1;
2063 PageDesc *p;
2065 fprintf(f, "%-8s %-8s %-8s %s\n",
2066 "start", "end", "size", "prot");
2067 start = -1;
2068 end = -1;
2069 prot = 0;
2070 for(i = 0; i <= L1_SIZE; i++) {
2071 if (i < L1_SIZE)
2072 p = l1_map[i];
2073 else
2074 p = NULL;
2075 for(j = 0;j < L2_SIZE; j++) {
2076 if (!p)
2077 prot1 = 0;
2078 else
2079 prot1 = p[j].flags;
2080 if (prot1 != prot) {
2081 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2082 if (start != -1) {
2083 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2084 start, end, end - start,
2085 prot & PAGE_READ ? 'r' : '-',
2086 prot & PAGE_WRITE ? 'w' : '-',
2087 prot & PAGE_EXEC ? 'x' : '-');
2089 if (prot1 != 0)
2090 start = end;
2091 else
2092 start = -1;
2093 prot = prot1;
2095 if (!p)
2096 break;
2101 int page_get_flags(target_ulong address)
2103 PageDesc *p;
2105 p = page_find(address >> TARGET_PAGE_BITS);
2106 if (!p)
2107 return 0;
2108 return p->flags;
2111 /* modify the flags of a page and invalidate the code if
2112 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2113 depending on PAGE_WRITE */
2114 void page_set_flags(target_ulong start, target_ulong end, int flags)
2116 PageDesc *p;
2117 target_ulong addr;
2119 /* mmap_lock should already be held. */
2120 start = start & TARGET_PAGE_MASK;
2121 end = TARGET_PAGE_ALIGN(end);
2122 if (flags & PAGE_WRITE)
2123 flags |= PAGE_WRITE_ORG;
2124 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2125 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2126 /* We may be called for host regions that are outside guest
2127 address space. */
2128 if (!p)
2129 return;
2130 /* if the write protection is set, then we invalidate the code
2131 inside */
2132 if (!(p->flags & PAGE_WRITE) &&
2133 (flags & PAGE_WRITE) &&
2134 p->first_tb) {
2135 tb_invalidate_phys_page(addr, 0, NULL);
2137 p->flags = flags;
2141 int page_check_range(target_ulong start, target_ulong len, int flags)
2143 PageDesc *p;
2144 target_ulong end;
2145 target_ulong addr;
2147 if (start + len < start)
2148 /* we've wrapped around */
2149 return -1;
2151 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2152 start = start & TARGET_PAGE_MASK;
2154 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2155 p = page_find(addr >> TARGET_PAGE_BITS);
2156 if( !p )
2157 return -1;
2158 if( !(p->flags & PAGE_VALID) )
2159 return -1;
2161 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2162 return -1;
2163 if (flags & PAGE_WRITE) {
2164 if (!(p->flags & PAGE_WRITE_ORG))
2165 return -1;
2166 /* unprotect the page if it was put read-only because it
2167 contains translated code */
2168 if (!(p->flags & PAGE_WRITE)) {
2169 if (!page_unprotect(addr, 0, NULL))
2170 return -1;
2172 return 0;
2175 return 0;
2178 /* called from signal handler: invalidate the code and unprotect the
2179 page. Return TRUE if the fault was succesfully handled. */
2180 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2182 unsigned int page_index, prot, pindex;
2183 PageDesc *p, *p1;
2184 target_ulong host_start, host_end, addr;
2186 /* Technically this isn't safe inside a signal handler. However we
2187 know this only ever happens in a synchronous SEGV handler, so in
2188 practice it seems to be ok. */
2189 mmap_lock();
2191 host_start = address & qemu_host_page_mask;
2192 page_index = host_start >> TARGET_PAGE_BITS;
2193 p1 = page_find(page_index);
2194 if (!p1) {
2195 mmap_unlock();
2196 return 0;
2198 host_end = host_start + qemu_host_page_size;
2199 p = p1;
2200 prot = 0;
2201 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2202 prot |= p->flags;
2203 p++;
2205 /* if the page was really writable, then we change its
2206 protection back to writable */
2207 if (prot & PAGE_WRITE_ORG) {
2208 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2209 if (!(p1[pindex].flags & PAGE_WRITE)) {
2210 mprotect((void *)g2h(host_start), qemu_host_page_size,
2211 (prot & PAGE_BITS) | PAGE_WRITE);
2212 p1[pindex].flags |= PAGE_WRITE;
2213 /* and since the content will be modified, we must invalidate
2214 the corresponding translated code. */
2215 tb_invalidate_phys_page(address, pc, puc);
2216 #ifdef DEBUG_TB_CHECK
2217 tb_invalidate_check(address);
2218 #endif
2219 mmap_unlock();
2220 return 1;
2223 mmap_unlock();
2224 return 0;
2227 static inline void tlb_set_dirty(CPUState *env,
2228 unsigned long addr, target_ulong vaddr)
2231 #endif /* defined(CONFIG_USER_ONLY) */
2233 #if !defined(CONFIG_USER_ONLY)
2235 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2236 ram_addr_t memory, ram_addr_t region_offset);
2237 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2238 ram_addr_t orig_memory, ram_addr_t region_offset);
2239 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2240 need_subpage) \
2241 do { \
2242 if (addr > start_addr) \
2243 start_addr2 = 0; \
2244 else { \
2245 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2246 if (start_addr2 > 0) \
2247 need_subpage = 1; \
2250 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2251 end_addr2 = TARGET_PAGE_SIZE - 1; \
2252 else { \
2253 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2254 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2255 need_subpage = 1; \
2257 } while (0)
2259 /* register physical memory. 'size' must be a multiple of the target
2260 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2261 io memory page. The address used when calling the IO function is
2262 the offset from the start of the region, plus region_offset. Both
2263 start_region and regon_offset are rounded down to a page boundary
2264 before calculating this offset. This should not be a problem unless
2265 the low bits of start_addr and region_offset differ. */
2266 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2267 ram_addr_t size,
2268 ram_addr_t phys_offset,
2269 ram_addr_t region_offset)
2271 target_phys_addr_t addr, end_addr;
2272 PhysPageDesc *p;
2273 CPUState *env;
2274 ram_addr_t orig_size = size;
2275 void *subpage;
2277 #ifdef USE_KQEMU
2278 /* XXX: should not depend on cpu context */
2279 env = first_cpu;
2280 if (env->kqemu_enabled) {
2281 kqemu_set_phys_mem(start_addr, size, phys_offset);
2283 #endif
2284 if (kvm_enabled())
2285 kvm_set_phys_mem(start_addr, size, phys_offset);
2287 if (phys_offset == IO_MEM_UNASSIGNED) {
2288 region_offset = start_addr;
2290 region_offset &= TARGET_PAGE_MASK;
2291 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2292 end_addr = start_addr + (target_phys_addr_t)size;
2293 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2294 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2295 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2296 ram_addr_t orig_memory = p->phys_offset;
2297 target_phys_addr_t start_addr2, end_addr2;
2298 int need_subpage = 0;
2300 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2301 need_subpage);
2302 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2303 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2304 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2305 &p->phys_offset, orig_memory,
2306 p->region_offset);
2307 } else {
2308 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2309 >> IO_MEM_SHIFT];
2311 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2312 region_offset);
2313 p->region_offset = 0;
2314 } else {
2315 p->phys_offset = phys_offset;
2316 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2317 (phys_offset & IO_MEM_ROMD))
2318 phys_offset += TARGET_PAGE_SIZE;
2320 } else {
2321 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2322 p->phys_offset = phys_offset;
2323 p->region_offset = region_offset;
2324 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2325 (phys_offset & IO_MEM_ROMD)) {
2326 phys_offset += TARGET_PAGE_SIZE;
2327 } else {
2328 target_phys_addr_t start_addr2, end_addr2;
2329 int need_subpage = 0;
2331 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2332 end_addr2, need_subpage);
2334 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2335 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2336 &p->phys_offset, IO_MEM_UNASSIGNED,
2337 addr & TARGET_PAGE_MASK);
2338 subpage_register(subpage, start_addr2, end_addr2,
2339 phys_offset, region_offset);
2340 p->region_offset = 0;
2344 region_offset += TARGET_PAGE_SIZE;
2347 /* since each CPU stores ram addresses in its TLB cache, we must
2348 reset the modified entries */
2349 /* XXX: slow ! */
2350 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2351 tlb_flush(env, 1);
2355 /* XXX: temporary until new memory mapping API */
2356 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2358 PhysPageDesc *p;
2360 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2361 if (!p)
2362 return IO_MEM_UNASSIGNED;
2363 return p->phys_offset;
2366 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2368 if (kvm_enabled())
2369 kvm_coalesce_mmio_region(addr, size);
2372 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2374 if (kvm_enabled())
2375 kvm_uncoalesce_mmio_region(addr, size);
2378 /* XXX: better than nothing */
2379 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2381 ram_addr_t addr;
2382 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2383 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2384 (uint64_t)size, (uint64_t)phys_ram_size);
2385 abort();
2387 addr = phys_ram_alloc_offset;
2388 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2390 if (kvm_enabled())
2391 kvm_setup_guest_memory(phys_ram_base + addr, size);
2393 return addr;
2396 void qemu_ram_free(ram_addr_t addr)
2400 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2402 #ifdef DEBUG_UNASSIGNED
2403 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2404 #endif
2405 #if defined(TARGET_SPARC)
2406 do_unassigned_access(addr, 0, 0, 0, 1);
2407 #endif
2408 return 0;
2411 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2413 #ifdef DEBUG_UNASSIGNED
2414 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2415 #endif
2416 #if defined(TARGET_SPARC)
2417 do_unassigned_access(addr, 0, 0, 0, 2);
2418 #endif
2419 return 0;
2422 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2424 #ifdef DEBUG_UNASSIGNED
2425 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2426 #endif
2427 #if defined(TARGET_SPARC)
2428 do_unassigned_access(addr, 0, 0, 0, 4);
2429 #endif
2430 return 0;
2433 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2435 #ifdef DEBUG_UNASSIGNED
2436 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2437 #endif
2438 #if defined(TARGET_SPARC)
2439 do_unassigned_access(addr, 1, 0, 0, 1);
2440 #endif
2443 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2445 #ifdef DEBUG_UNASSIGNED
2446 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2447 #endif
2448 #if defined(TARGET_SPARC)
2449 do_unassigned_access(addr, 1, 0, 0, 2);
2450 #endif
2453 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2455 #ifdef DEBUG_UNASSIGNED
2456 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2457 #endif
2458 #if defined(TARGET_SPARC)
2459 do_unassigned_access(addr, 1, 0, 0, 4);
2460 #endif
2463 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2464 unassigned_mem_readb,
2465 unassigned_mem_readw,
2466 unassigned_mem_readl,
2469 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2470 unassigned_mem_writeb,
2471 unassigned_mem_writew,
2472 unassigned_mem_writel,
2475 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2476 uint32_t val)
2478 int dirty_flags;
2479 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2480 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2481 #if !defined(CONFIG_USER_ONLY)
2482 tb_invalidate_phys_page_fast(ram_addr, 1);
2483 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2484 #endif
2486 stb_p(phys_ram_base + ram_addr, val);
2487 #ifdef USE_KQEMU
2488 if (cpu_single_env->kqemu_enabled &&
2489 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2490 kqemu_modify_page(cpu_single_env, ram_addr);
2491 #endif
2492 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2493 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2494 /* we remove the notdirty callback only if the code has been
2495 flushed */
2496 if (dirty_flags == 0xff)
2497 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2500 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2501 uint32_t val)
2503 int dirty_flags;
2504 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2505 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2506 #if !defined(CONFIG_USER_ONLY)
2507 tb_invalidate_phys_page_fast(ram_addr, 2);
2508 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2509 #endif
2511 stw_p(phys_ram_base + ram_addr, val);
2512 #ifdef USE_KQEMU
2513 if (cpu_single_env->kqemu_enabled &&
2514 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2515 kqemu_modify_page(cpu_single_env, ram_addr);
2516 #endif
2517 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2518 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2519 /* we remove the notdirty callback only if the code has been
2520 flushed */
2521 if (dirty_flags == 0xff)
2522 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2525 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2526 uint32_t val)
2528 int dirty_flags;
2529 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2530 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2531 #if !defined(CONFIG_USER_ONLY)
2532 tb_invalidate_phys_page_fast(ram_addr, 4);
2533 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2534 #endif
2536 stl_p(phys_ram_base + ram_addr, val);
2537 #ifdef USE_KQEMU
2538 if (cpu_single_env->kqemu_enabled &&
2539 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2540 kqemu_modify_page(cpu_single_env, ram_addr);
2541 #endif
2542 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2543 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2544 /* we remove the notdirty callback only if the code has been
2545 flushed */
2546 if (dirty_flags == 0xff)
2547 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2550 static CPUReadMemoryFunc *error_mem_read[3] = {
2551 NULL, /* never used */
2552 NULL, /* never used */
2553 NULL, /* never used */
2556 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2557 notdirty_mem_writeb,
2558 notdirty_mem_writew,
2559 notdirty_mem_writel,
2562 /* Generate a debug exception if a watchpoint has been hit. */
2563 static void check_watchpoint(int offset, int len_mask, int flags)
2565 CPUState *env = cpu_single_env;
2566 target_ulong pc, cs_base;
2567 TranslationBlock *tb;
2568 target_ulong vaddr;
2569 CPUWatchpoint *wp;
2570 int cpu_flags;
2572 if (env->watchpoint_hit) {
2573 /* We re-entered the check after replacing the TB. Now raise
2574 * the debug interrupt so that is will trigger after the
2575 * current instruction. */
2576 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2577 return;
2579 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2580 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2581 if ((vaddr == (wp->vaddr & len_mask) ||
2582 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2583 wp->flags |= BP_WATCHPOINT_HIT;
2584 if (!env->watchpoint_hit) {
2585 env->watchpoint_hit = wp;
2586 tb = tb_find_pc(env->mem_io_pc);
2587 if (!tb) {
2588 cpu_abort(env, "check_watchpoint: could not find TB for "
2589 "pc=%p", (void *)env->mem_io_pc);
2591 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2592 tb_phys_invalidate(tb, -1);
2593 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2594 env->exception_index = EXCP_DEBUG;
2595 } else {
2596 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2597 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2599 cpu_resume_from_signal(env, NULL);
2601 } else {
2602 wp->flags &= ~BP_WATCHPOINT_HIT;
2607 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2608 so these check for a hit then pass through to the normal out-of-line
2609 phys routines. */
2610 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2612 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2613 return ldub_phys(addr);
2616 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2618 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2619 return lduw_phys(addr);
2622 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2624 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2625 return ldl_phys(addr);
2628 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2629 uint32_t val)
2631 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2632 stb_phys(addr, val);
2635 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2636 uint32_t val)
2638 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2639 stw_phys(addr, val);
2642 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2643 uint32_t val)
2645 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2646 stl_phys(addr, val);
2649 static CPUReadMemoryFunc *watch_mem_read[3] = {
2650 watch_mem_readb,
2651 watch_mem_readw,
2652 watch_mem_readl,
2655 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2656 watch_mem_writeb,
2657 watch_mem_writew,
2658 watch_mem_writel,
2661 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2662 unsigned int len)
2664 uint32_t ret;
2665 unsigned int idx;
2667 idx = SUBPAGE_IDX(addr);
2668 #if defined(DEBUG_SUBPAGE)
2669 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2670 mmio, len, addr, idx);
2671 #endif
2672 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2673 addr + mmio->region_offset[idx][0][len]);
2675 return ret;
2678 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2679 uint32_t value, unsigned int len)
2681 unsigned int idx;
2683 idx = SUBPAGE_IDX(addr);
2684 #if defined(DEBUG_SUBPAGE)
2685 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2686 mmio, len, addr, idx, value);
2687 #endif
2688 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2689 addr + mmio->region_offset[idx][1][len],
2690 value);
2693 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2695 #if defined(DEBUG_SUBPAGE)
2696 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2697 #endif
2699 return subpage_readlen(opaque, addr, 0);
2702 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2703 uint32_t value)
2705 #if defined(DEBUG_SUBPAGE)
2706 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2707 #endif
2708 subpage_writelen(opaque, addr, value, 0);
2711 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2713 #if defined(DEBUG_SUBPAGE)
2714 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2715 #endif
2717 return subpage_readlen(opaque, addr, 1);
2720 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2721 uint32_t value)
2723 #if defined(DEBUG_SUBPAGE)
2724 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2725 #endif
2726 subpage_writelen(opaque, addr, value, 1);
2729 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2731 #if defined(DEBUG_SUBPAGE)
2732 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2733 #endif
2735 return subpage_readlen(opaque, addr, 2);
2738 static void subpage_writel (void *opaque,
2739 target_phys_addr_t addr, uint32_t value)
2741 #if defined(DEBUG_SUBPAGE)
2742 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2743 #endif
2744 subpage_writelen(opaque, addr, value, 2);
2747 static CPUReadMemoryFunc *subpage_read[] = {
2748 &subpage_readb,
2749 &subpage_readw,
2750 &subpage_readl,
2753 static CPUWriteMemoryFunc *subpage_write[] = {
2754 &subpage_writeb,
2755 &subpage_writew,
2756 &subpage_writel,
2759 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2760 ram_addr_t memory, ram_addr_t region_offset)
2762 int idx, eidx;
2763 unsigned int i;
2765 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2766 return -1;
2767 idx = SUBPAGE_IDX(start);
2768 eidx = SUBPAGE_IDX(end);
2769 #if defined(DEBUG_SUBPAGE)
2770 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2771 mmio, start, end, idx, eidx, memory);
2772 #endif
2773 memory >>= IO_MEM_SHIFT;
2774 for (; idx <= eidx; idx++) {
2775 for (i = 0; i < 4; i++) {
2776 if (io_mem_read[memory][i]) {
2777 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2778 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2779 mmio->region_offset[idx][0][i] = region_offset;
2781 if (io_mem_write[memory][i]) {
2782 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2783 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2784 mmio->region_offset[idx][1][i] = region_offset;
2789 return 0;
2792 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2793 ram_addr_t orig_memory, ram_addr_t region_offset)
2795 subpage_t *mmio;
2796 int subpage_memory;
2798 mmio = qemu_mallocz(sizeof(subpage_t));
2800 mmio->base = base;
2801 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2802 #if defined(DEBUG_SUBPAGE)
2803 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2804 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2805 #endif
2806 *phys = subpage_memory | IO_MEM_SUBPAGE;
2807 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2808 region_offset);
2810 return mmio;
2813 static int get_free_io_mem_idx(void)
2815 int i;
2817 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2818 if (!io_mem_used[i]) {
2819 io_mem_used[i] = 1;
2820 return i;
2823 return -1;
2826 static void io_mem_init(void)
2828 int i;
2830 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2831 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2832 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2833 for (i=0; i<5; i++)
2834 io_mem_used[i] = 1;
2836 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2837 watch_mem_write, NULL);
2838 /* alloc dirty bits array */
2839 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2840 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2843 /* mem_read and mem_write are arrays of functions containing the
2844 function to access byte (index 0), word (index 1) and dword (index
2845 2). Functions can be omitted with a NULL function pointer. The
2846 registered functions may be modified dynamically later.
2847 If io_index is non zero, the corresponding io zone is
2848 modified. If it is zero, a new io zone is allocated. The return
2849 value can be used with cpu_register_physical_memory(). (-1) is
2850 returned if error. */
2851 int cpu_register_io_memory(int io_index,
2852 CPUReadMemoryFunc **mem_read,
2853 CPUWriteMemoryFunc **mem_write,
2854 void *opaque)
2856 int i, subwidth = 0;
2858 if (io_index <= 0) {
2859 io_index = get_free_io_mem_idx();
2860 if (io_index == -1)
2861 return io_index;
2862 } else {
2863 if (io_index >= IO_MEM_NB_ENTRIES)
2864 return -1;
2867 for(i = 0;i < 3; i++) {
2868 if (!mem_read[i] || !mem_write[i])
2869 subwidth = IO_MEM_SUBWIDTH;
2870 io_mem_read[io_index][i] = mem_read[i];
2871 io_mem_write[io_index][i] = mem_write[i];
2873 io_mem_opaque[io_index] = opaque;
2874 return (io_index << IO_MEM_SHIFT) | subwidth;
2877 void cpu_unregister_io_memory(int io_table_address)
2879 int i;
2880 int io_index = io_table_address >> IO_MEM_SHIFT;
2882 for (i=0;i < 3; i++) {
2883 io_mem_read[io_index][i] = unassigned_mem_read[i];
2884 io_mem_write[io_index][i] = unassigned_mem_write[i];
2886 io_mem_opaque[io_index] = NULL;
2887 io_mem_used[io_index] = 0;
2890 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2892 return io_mem_write[io_index >> IO_MEM_SHIFT];
2895 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2897 return io_mem_read[io_index >> IO_MEM_SHIFT];
2900 #endif /* !defined(CONFIG_USER_ONLY) */
2902 /* physical memory access (slow version, mainly for debug) */
2903 #if defined(CONFIG_USER_ONLY)
2904 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2905 int len, int is_write)
2907 int l, flags;
2908 target_ulong page;
2909 void * p;
2911 while (len > 0) {
2912 page = addr & TARGET_PAGE_MASK;
2913 l = (page + TARGET_PAGE_SIZE) - addr;
2914 if (l > len)
2915 l = len;
2916 flags = page_get_flags(page);
2917 if (!(flags & PAGE_VALID))
2918 return;
2919 if (is_write) {
2920 if (!(flags & PAGE_WRITE))
2921 return;
2922 /* XXX: this code should not depend on lock_user */
2923 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2924 /* FIXME - should this return an error rather than just fail? */
2925 return;
2926 memcpy(p, buf, l);
2927 unlock_user(p, addr, l);
2928 } else {
2929 if (!(flags & PAGE_READ))
2930 return;
2931 /* XXX: this code should not depend on lock_user */
2932 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2933 /* FIXME - should this return an error rather than just fail? */
2934 return;
2935 memcpy(buf, p, l);
2936 unlock_user(p, addr, 0);
2938 len -= l;
2939 buf += l;
2940 addr += l;
2944 #else
2945 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2946 int len, int is_write)
2948 int l, io_index;
2949 uint8_t *ptr;
2950 uint32_t val;
2951 target_phys_addr_t page;
2952 unsigned long pd;
2953 PhysPageDesc *p;
2955 while (len > 0) {
2956 page = addr & TARGET_PAGE_MASK;
2957 l = (page + TARGET_PAGE_SIZE) - addr;
2958 if (l > len)
2959 l = len;
2960 p = phys_page_find(page >> TARGET_PAGE_BITS);
2961 if (!p) {
2962 pd = IO_MEM_UNASSIGNED;
2963 } else {
2964 pd = p->phys_offset;
2967 if (is_write) {
2968 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2969 target_phys_addr_t addr1 = addr;
2970 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2971 if (p)
2972 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2973 /* XXX: could force cpu_single_env to NULL to avoid
2974 potential bugs */
2975 if (l >= 4 && ((addr1 & 3) == 0)) {
2976 /* 32 bit write access */
2977 val = ldl_p(buf);
2978 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
2979 l = 4;
2980 } else if (l >= 2 && ((addr1 & 1) == 0)) {
2981 /* 16 bit write access */
2982 val = lduw_p(buf);
2983 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
2984 l = 2;
2985 } else {
2986 /* 8 bit write access */
2987 val = ldub_p(buf);
2988 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
2989 l = 1;
2991 } else {
2992 unsigned long addr1;
2993 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2994 /* RAM case */
2995 ptr = phys_ram_base + addr1;
2996 memcpy(ptr, buf, l);
2997 if (!cpu_physical_memory_is_dirty(addr1)) {
2998 /* invalidate code */
2999 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3000 /* set dirty bit */
3001 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3002 (0xff & ~CODE_DIRTY_FLAG);
3005 } else {
3006 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3007 !(pd & IO_MEM_ROMD)) {
3008 target_phys_addr_t addr1 = addr;
3009 /* I/O case */
3010 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3011 if (p)
3012 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3013 if (l >= 4 && ((addr1 & 3) == 0)) {
3014 /* 32 bit read access */
3015 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3016 stl_p(buf, val);
3017 l = 4;
3018 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3019 /* 16 bit read access */
3020 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3021 stw_p(buf, val);
3022 l = 2;
3023 } else {
3024 /* 8 bit read access */
3025 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3026 stb_p(buf, val);
3027 l = 1;
3029 } else {
3030 /* RAM case */
3031 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3032 (addr & ~TARGET_PAGE_MASK);
3033 memcpy(buf, ptr, l);
3036 len -= l;
3037 buf += l;
3038 addr += l;
3042 /* used for ROM loading : can write in RAM and ROM */
3043 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3044 const uint8_t *buf, int len)
3046 int l;
3047 uint8_t *ptr;
3048 target_phys_addr_t page;
3049 unsigned long pd;
3050 PhysPageDesc *p;
3052 while (len > 0) {
3053 page = addr & TARGET_PAGE_MASK;
3054 l = (page + TARGET_PAGE_SIZE) - addr;
3055 if (l > len)
3056 l = len;
3057 p = phys_page_find(page >> TARGET_PAGE_BITS);
3058 if (!p) {
3059 pd = IO_MEM_UNASSIGNED;
3060 } else {
3061 pd = p->phys_offset;
3064 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3065 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3066 !(pd & IO_MEM_ROMD)) {
3067 /* do nothing */
3068 } else {
3069 unsigned long addr1;
3070 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3071 /* ROM/RAM case */
3072 ptr = phys_ram_base + addr1;
3073 memcpy(ptr, buf, l);
3075 len -= l;
3076 buf += l;
3077 addr += l;
3081 typedef struct {
3082 void *buffer;
3083 target_phys_addr_t addr;
3084 target_phys_addr_t len;
3085 } BounceBuffer;
3087 static BounceBuffer bounce;
3089 typedef struct MapClient {
3090 void *opaque;
3091 void (*callback)(void *opaque);
3092 LIST_ENTRY(MapClient) link;
3093 } MapClient;
3095 static LIST_HEAD(map_client_list, MapClient) map_client_list
3096 = LIST_HEAD_INITIALIZER(map_client_list);
3098 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3100 MapClient *client = qemu_malloc(sizeof(*client));
3102 client->opaque = opaque;
3103 client->callback = callback;
3104 LIST_INSERT_HEAD(&map_client_list, client, link);
3105 return client;
3108 void cpu_unregister_map_client(void *_client)
3110 MapClient *client = (MapClient *)_client;
3112 LIST_REMOVE(client, link);
3115 static void cpu_notify_map_clients(void)
3117 MapClient *client;
3119 while (!LIST_EMPTY(&map_client_list)) {
3120 client = LIST_FIRST(&map_client_list);
3121 client->callback(client->opaque);
3122 LIST_REMOVE(client, link);
3126 /* Map a physical memory region into a host virtual address.
3127 * May map a subset of the requested range, given by and returned in *plen.
3128 * May return NULL if resources needed to perform the mapping are exhausted.
3129 * Use only for reads OR writes - not for read-modify-write operations.
3130 * Use cpu_register_map_client() to know when retrying the map operation is
3131 * likely to succeed.
3133 void *cpu_physical_memory_map(target_phys_addr_t addr,
3134 target_phys_addr_t *plen,
3135 int is_write)
3137 target_phys_addr_t len = *plen;
3138 target_phys_addr_t done = 0;
3139 int l;
3140 uint8_t *ret = NULL;
3141 uint8_t *ptr;
3142 target_phys_addr_t page;
3143 unsigned long pd;
3144 PhysPageDesc *p;
3145 unsigned long addr1;
3147 while (len > 0) {
3148 page = addr & TARGET_PAGE_MASK;
3149 l = (page + TARGET_PAGE_SIZE) - addr;
3150 if (l > len)
3151 l = len;
3152 p = phys_page_find(page >> TARGET_PAGE_BITS);
3153 if (!p) {
3154 pd = IO_MEM_UNASSIGNED;
3155 } else {
3156 pd = p->phys_offset;
3159 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3160 if (done || bounce.buffer) {
3161 break;
3163 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3164 bounce.addr = addr;
3165 bounce.len = l;
3166 if (!is_write) {
3167 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3169 ptr = bounce.buffer;
3170 } else {
3171 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3172 ptr = phys_ram_base + addr1;
3174 if (!done) {
3175 ret = ptr;
3176 } else if (ret + done != ptr) {
3177 break;
3180 len -= l;
3181 addr += l;
3182 done += l;
3184 *plen = done;
3185 return ret;
3188 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3189 * Will also mark the memory as dirty if is_write == 1. access_len gives
3190 * the amount of memory that was actually read or written by the caller.
3192 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3193 int is_write, target_phys_addr_t access_len)
3195 if (buffer != bounce.buffer) {
3196 if (is_write) {
3197 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3198 while (access_len) {
3199 unsigned l;
3200 l = TARGET_PAGE_SIZE;
3201 if (l > access_len)
3202 l = access_len;
3203 if (!cpu_physical_memory_is_dirty(addr1)) {
3204 /* invalidate code */
3205 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3206 /* set dirty bit */
3207 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3208 (0xff & ~CODE_DIRTY_FLAG);
3210 addr1 += l;
3211 access_len -= l;
3214 return;
3216 if (is_write) {
3217 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3219 qemu_free(bounce.buffer);
3220 bounce.buffer = NULL;
3221 cpu_notify_map_clients();
3224 /* warning: addr must be aligned */
3225 uint32_t ldl_phys(target_phys_addr_t addr)
3227 int io_index;
3228 uint8_t *ptr;
3229 uint32_t val;
3230 unsigned long pd;
3231 PhysPageDesc *p;
3233 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3234 if (!p) {
3235 pd = IO_MEM_UNASSIGNED;
3236 } else {
3237 pd = p->phys_offset;
3240 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3241 !(pd & IO_MEM_ROMD)) {
3242 /* I/O case */
3243 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3244 if (p)
3245 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3246 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3247 } else {
3248 /* RAM case */
3249 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3250 (addr & ~TARGET_PAGE_MASK);
3251 val = ldl_p(ptr);
3253 return val;
3256 /* warning: addr must be aligned */
3257 uint64_t ldq_phys(target_phys_addr_t addr)
3259 int io_index;
3260 uint8_t *ptr;
3261 uint64_t val;
3262 unsigned long pd;
3263 PhysPageDesc *p;
3265 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3266 if (!p) {
3267 pd = IO_MEM_UNASSIGNED;
3268 } else {
3269 pd = p->phys_offset;
3272 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3273 !(pd & IO_MEM_ROMD)) {
3274 /* I/O case */
3275 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3276 if (p)
3277 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3278 #ifdef TARGET_WORDS_BIGENDIAN
3279 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3280 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3281 #else
3282 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3283 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3284 #endif
3285 } else {
3286 /* RAM case */
3287 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3288 (addr & ~TARGET_PAGE_MASK);
3289 val = ldq_p(ptr);
3291 return val;
3294 /* XXX: optimize */
3295 uint32_t ldub_phys(target_phys_addr_t addr)
3297 uint8_t val;
3298 cpu_physical_memory_read(addr, &val, 1);
3299 return val;
3302 /* XXX: optimize */
3303 uint32_t lduw_phys(target_phys_addr_t addr)
3305 uint16_t val;
3306 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3307 return tswap16(val);
3310 /* warning: addr must be aligned. The ram page is not masked as dirty
3311 and the code inside is not invalidated. It is useful if the dirty
3312 bits are used to track modified PTEs */
3313 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3315 int io_index;
3316 uint8_t *ptr;
3317 unsigned long pd;
3318 PhysPageDesc *p;
3320 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3321 if (!p) {
3322 pd = IO_MEM_UNASSIGNED;
3323 } else {
3324 pd = p->phys_offset;
3327 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3328 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3329 if (p)
3330 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3331 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3332 } else {
3333 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3334 ptr = phys_ram_base + addr1;
3335 stl_p(ptr, val);
3337 if (unlikely(in_migration)) {
3338 if (!cpu_physical_memory_is_dirty(addr1)) {
3339 /* invalidate code */
3340 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3341 /* set dirty bit */
3342 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3343 (0xff & ~CODE_DIRTY_FLAG);
3349 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3351 int io_index;
3352 uint8_t *ptr;
3353 unsigned long pd;
3354 PhysPageDesc *p;
3356 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3357 if (!p) {
3358 pd = IO_MEM_UNASSIGNED;
3359 } else {
3360 pd = p->phys_offset;
3363 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3364 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3365 if (p)
3366 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3367 #ifdef TARGET_WORDS_BIGENDIAN
3368 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3369 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3370 #else
3371 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3372 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3373 #endif
3374 } else {
3375 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3376 (addr & ~TARGET_PAGE_MASK);
3377 stq_p(ptr, val);
3381 /* warning: addr must be aligned */
3382 void stl_phys(target_phys_addr_t addr, uint32_t val)
3384 int io_index;
3385 uint8_t *ptr;
3386 unsigned long pd;
3387 PhysPageDesc *p;
3389 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3390 if (!p) {
3391 pd = IO_MEM_UNASSIGNED;
3392 } else {
3393 pd = p->phys_offset;
3396 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3397 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3398 if (p)
3399 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3400 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3401 } else {
3402 unsigned long addr1;
3403 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3404 /* RAM case */
3405 ptr = phys_ram_base + addr1;
3406 stl_p(ptr, val);
3407 if (!cpu_physical_memory_is_dirty(addr1)) {
3408 /* invalidate code */
3409 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3410 /* set dirty bit */
3411 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3412 (0xff & ~CODE_DIRTY_FLAG);
3417 /* XXX: optimize */
3418 void stb_phys(target_phys_addr_t addr, uint32_t val)
3420 uint8_t v = val;
3421 cpu_physical_memory_write(addr, &v, 1);
3424 /* XXX: optimize */
3425 void stw_phys(target_phys_addr_t addr, uint32_t val)
3427 uint16_t v = tswap16(val);
3428 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3431 /* XXX: optimize */
3432 void stq_phys(target_phys_addr_t addr, uint64_t val)
3434 val = tswap64(val);
3435 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3438 #endif
3440 /* virtual memory access for debug */
3441 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3442 uint8_t *buf, int len, int is_write)
3444 int l;
3445 target_phys_addr_t phys_addr;
3446 target_ulong page;
3448 while (len > 0) {
3449 page = addr & TARGET_PAGE_MASK;
3450 phys_addr = cpu_get_phys_page_debug(env, page);
3451 /* if no physical page mapped, return an error */
3452 if (phys_addr == -1)
3453 return -1;
3454 l = (page + TARGET_PAGE_SIZE) - addr;
3455 if (l > len)
3456 l = len;
3457 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3458 buf, l, is_write);
3459 len -= l;
3460 buf += l;
3461 addr += l;
3463 return 0;
3466 /* in deterministic execution mode, instructions doing device I/Os
3467 must be at the end of the TB */
3468 void cpu_io_recompile(CPUState *env, void *retaddr)
3470 TranslationBlock *tb;
3471 uint32_t n, cflags;
3472 target_ulong pc, cs_base;
3473 uint64_t flags;
3475 tb = tb_find_pc((unsigned long)retaddr);
3476 if (!tb) {
3477 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3478 retaddr);
3480 n = env->icount_decr.u16.low + tb->icount;
3481 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3482 /* Calculate how many instructions had been executed before the fault
3483 occurred. */
3484 n = n - env->icount_decr.u16.low;
3485 /* Generate a new TB ending on the I/O insn. */
3486 n++;
3487 /* On MIPS and SH, delay slot instructions can only be restarted if
3488 they were already the first instruction in the TB. If this is not
3489 the first instruction in a TB then re-execute the preceding
3490 branch. */
3491 #if defined(TARGET_MIPS)
3492 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3493 env->active_tc.PC -= 4;
3494 env->icount_decr.u16.low++;
3495 env->hflags &= ~MIPS_HFLAG_BMASK;
3497 #elif defined(TARGET_SH4)
3498 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3499 && n > 1) {
3500 env->pc -= 2;
3501 env->icount_decr.u16.low++;
3502 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3504 #endif
3505 /* This should never happen. */
3506 if (n > CF_COUNT_MASK)
3507 cpu_abort(env, "TB too big during recompile");
3509 cflags = n | CF_LAST_IO;
3510 pc = tb->pc;
3511 cs_base = tb->cs_base;
3512 flags = tb->flags;
3513 tb_phys_invalidate(tb, -1);
3514 /* FIXME: In theory this could raise an exception. In practice
3515 we have already translated the block once so it's probably ok. */
3516 tb_gen_code(env, pc, cs_base, flags, cflags);
3517 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3518 the first in the TB) then we end up generating a whole new TB and
3519 repeating the fault, which is horribly inefficient.
3520 Better would be to execute just this insn uncached, or generate a
3521 second new TB. */
3522 cpu_resume_from_signal(env, NULL);
3525 void dump_exec_info(FILE *f,
3526 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3528 int i, target_code_size, max_target_code_size;
3529 int direct_jmp_count, direct_jmp2_count, cross_page;
3530 TranslationBlock *tb;
3532 target_code_size = 0;
3533 max_target_code_size = 0;
3534 cross_page = 0;
3535 direct_jmp_count = 0;
3536 direct_jmp2_count = 0;
3537 for(i = 0; i < nb_tbs; i++) {
3538 tb = &tbs[i];
3539 target_code_size += tb->size;
3540 if (tb->size > max_target_code_size)
3541 max_target_code_size = tb->size;
3542 if (tb->page_addr[1] != -1)
3543 cross_page++;
3544 if (tb->tb_next_offset[0] != 0xffff) {
3545 direct_jmp_count++;
3546 if (tb->tb_next_offset[1] != 0xffff) {
3547 direct_jmp2_count++;
3551 /* XXX: avoid using doubles ? */
3552 cpu_fprintf(f, "Translation buffer state:\n");
3553 cpu_fprintf(f, "gen code size %ld/%ld\n",
3554 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3555 cpu_fprintf(f, "TB count %d/%d\n",
3556 nb_tbs, code_gen_max_blocks);
3557 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3558 nb_tbs ? target_code_size / nb_tbs : 0,
3559 max_target_code_size);
3560 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3561 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3562 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3563 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3564 cross_page,
3565 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3566 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3567 direct_jmp_count,
3568 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3569 direct_jmp2_count,
3570 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3571 cpu_fprintf(f, "\nStatistics:\n");
3572 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3573 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3574 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3575 tcg_dump_info(f, cpu_fprintf);
3578 #if !defined(CONFIG_USER_ONLY)
3580 #define MMUSUFFIX _cmmu
3581 #define GETPC() NULL
3582 #define env cpu_single_env
3583 #define SOFTMMU_CODE_ACCESS
3585 #define SHIFT 0
3586 #include "softmmu_template.h"
3588 #define SHIFT 1
3589 #include "softmmu_template.h"
3591 #define SHIFT 2
3592 #include "softmmu_template.h"
3594 #define SHIFT 3
3595 #include "softmmu_template.h"
3597 #undef env
3599 #endif