Fix race condition on access to env->interrupt_request
[qemu/mini2440/sniper_sniper_test.git] / exec.c
blob902031c48dfeab4ddfbd09b607a97b6be8c64779
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #include "hw/hw.h"
41 #include "osdep.h"
42 #include "kvm.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #endif
47 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_FLUSH
49 //#define DEBUG_TLB
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
61 #undef DEBUG_TB_CHECK
62 #endif
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 #else
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #endif
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 static int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #else
102 #define code_gen_section \
103 __attribute__((aligned (32)))
104 #endif
106 uint8_t code_gen_prologue[1024] code_gen_section;
107 static uint8_t *code_gen_buffer;
108 static unsigned long code_gen_buffer_size;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size;
111 uint8_t *code_gen_ptr;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size;
115 int phys_ram_fd;
116 uint8_t *phys_ram_base;
117 uint8_t *phys_ram_dirty;
118 static int in_migration;
119 static ram_addr_t phys_ram_alloc_offset = 0;
120 #endif
122 CPUState *first_cpu;
123 /* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
125 CPUState *cpu_single_env;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
129 int use_icount = 0;
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132 int64_t qemu_icount;
134 typedef struct PageDesc {
135 /* list of TBs intersecting this ram page */
136 TranslationBlock *first_tb;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141 #if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143 #endif
144 } PageDesc;
146 typedef struct PhysPageDesc {
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset;
149 ram_addr_t region_offset;
150 } PhysPageDesc;
152 #define L2_BITS 10
153 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154 /* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
158 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159 #else
160 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
161 #endif
163 #define L1_SIZE (1 << L1_BITS)
164 #define L2_SIZE (1 << L2_BITS)
166 unsigned long qemu_real_host_page_size;
167 unsigned long qemu_host_page_bits;
168 unsigned long qemu_host_page_size;
169 unsigned long qemu_host_page_mask;
171 /* XXX: for system emulation, it could just be an array */
172 static PageDesc *l1_map[L1_SIZE];
173 static PhysPageDesc **l1_phys_map;
175 #if !defined(CONFIG_USER_ONLY)
176 static void io_mem_init(void);
178 /* io memory support */
179 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
181 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
182 char io_mem_used[IO_MEM_NB_ENTRIES];
183 static int io_mem_watch;
184 #endif
186 /* log support */
187 static const char *logfilename = "/tmp/qemu.log";
188 FILE *logfile;
189 int loglevel;
190 static int log_append = 0;
192 /* statistics */
193 static int tlb_flush_count;
194 static int tb_flush_count;
195 static int tb_phys_invalidate_count;
197 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198 typedef struct subpage_t {
199 target_phys_addr_t base;
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204 } subpage_t;
206 #ifdef _WIN32
207 static void map_exec(void *addr, long size)
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
214 #else
215 static void map_exec(void *addr, long size)
217 unsigned long start, end, page_size;
219 page_size = getpagesize();
220 start = (unsigned long)addr;
221 start &= ~(page_size - 1);
223 end = (unsigned long)addr + size;
224 end += page_size - 1;
225 end &= ~(page_size - 1);
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
230 #endif
232 static void page_init(void)
234 /* NOTE: we can always suppose that qemu_host_page_size >=
235 TARGET_PAGE_SIZE */
236 #ifdef _WIN32
238 SYSTEM_INFO system_info;
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
243 #else
244 qemu_real_host_page_size = getpagesize();
245 #endif
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
257 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
263 mmap_lock();
264 last_brk = (unsigned long)sbrk(0);
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
274 page_set_flags(startaddr & TARGET_PAGE_MASK,
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
278 } while (!feof(f));
279 fclose(f);
281 mmap_unlock();
283 #endif
286 static inline PageDesc **page_l1_map(target_ulong index)
288 #if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
292 return NULL;
293 #endif
294 return &l1_map[index >> L2_BITS];
297 static inline PageDesc *page_find_alloc(target_ulong index)
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
307 #if defined(CONFIG_USER_ONLY)
308 size_t len = sizeof(PageDesc) * L2_SIZE;
309 /* Don't use qemu_malloc because it may recurse. */
310 p = mmap(0, len, PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
312 *lp = p;
313 if (h2g_valid(p)) {
314 unsigned long addr = h2g(p);
315 page_set_flags(addr & TARGET_PAGE_MASK,
316 TARGET_PAGE_ALIGN(addr + len),
317 PAGE_RESERVED);
319 #else
320 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321 *lp = p;
322 #endif
324 return p + (index & (L2_SIZE - 1));
327 static inline PageDesc *page_find(target_ulong index)
329 PageDesc **lp, *p;
330 lp = page_l1_map(index);
331 if (!lp)
332 return NULL;
334 p = *lp;
335 if (!p)
336 return 0;
337 return p + (index & (L2_SIZE - 1));
340 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
342 void **lp, **p;
343 PhysPageDesc *pd;
345 p = (void **)l1_phys_map;
346 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
348 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350 #endif
351 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
352 p = *lp;
353 if (!p) {
354 /* allocate if not found */
355 if (!alloc)
356 return NULL;
357 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358 memset(p, 0, sizeof(void *) * L1_SIZE);
359 *lp = p;
361 #endif
362 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
363 pd = *lp;
364 if (!pd) {
365 int i;
366 /* allocate if not found */
367 if (!alloc)
368 return NULL;
369 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370 *lp = pd;
371 for (i = 0; i < L2_SIZE; i++) {
372 pd[i].phys_offset = IO_MEM_UNASSIGNED;
373 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
376 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
379 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
381 return phys_page_find_alloc(index, 0);
384 #if !defined(CONFIG_USER_ONLY)
385 static void tlb_protect_code(ram_addr_t ram_addr);
386 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
387 target_ulong vaddr);
388 #define mmap_lock() do { } while(0)
389 #define mmap_unlock() do { } while(0)
390 #endif
392 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
394 #if defined(CONFIG_USER_ONLY)
395 /* Currently it is not recommanded to allocate big chunks of data in
396 user mode. It will change when a dedicated libc will be used */
397 #define USE_STATIC_CODE_GEN_BUFFER
398 #endif
400 #ifdef USE_STATIC_CODE_GEN_BUFFER
401 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
402 #endif
404 static void code_gen_alloc(unsigned long tb_size)
406 #ifdef USE_STATIC_CODE_GEN_BUFFER
407 code_gen_buffer = static_code_gen_buffer;
408 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
409 map_exec(code_gen_buffer, code_gen_buffer_size);
410 #else
411 code_gen_buffer_size = tb_size;
412 if (code_gen_buffer_size == 0) {
413 #if defined(CONFIG_USER_ONLY)
414 /* in user mode, phys_ram_size is not meaningful */
415 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416 #else
417 /* XXX: needs ajustments */
418 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
419 #endif
421 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
422 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
423 /* The code gen buffer location may have constraints depending on
424 the host cpu and OS */
425 #if defined(__linux__)
427 int flags;
428 void *start = NULL;
430 flags = MAP_PRIVATE | MAP_ANONYMOUS;
431 #if defined(__x86_64__)
432 flags |= MAP_32BIT;
433 /* Cannot map more than that */
434 if (code_gen_buffer_size > (800 * 1024 * 1024))
435 code_gen_buffer_size = (800 * 1024 * 1024);
436 #elif defined(__sparc_v9__)
437 // Map the buffer below 2G, so we can use direct calls and branches
438 flags |= MAP_FIXED;
439 start = (void *) 0x60000000UL;
440 if (code_gen_buffer_size > (512 * 1024 * 1024))
441 code_gen_buffer_size = (512 * 1024 * 1024);
442 #elif defined(__arm__)
443 /* Map the buffer below 32M, so we can use direct calls and branches */
444 flags |= MAP_FIXED;
445 start = (void *) 0x01000000UL;
446 if (code_gen_buffer_size > 16 * 1024 * 1024)
447 code_gen_buffer_size = 16 * 1024 * 1024;
448 #endif
449 code_gen_buffer = mmap(start, code_gen_buffer_size,
450 PROT_WRITE | PROT_READ | PROT_EXEC,
451 flags, -1, 0);
452 if (code_gen_buffer == MAP_FAILED) {
453 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
454 exit(1);
457 #elif defined(__FreeBSD__)
459 int flags;
460 void *addr = NULL;
461 flags = MAP_PRIVATE | MAP_ANONYMOUS;
462 #if defined(__x86_64__)
463 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
464 * 0x40000000 is free */
465 flags |= MAP_FIXED;
466 addr = (void *)0x40000000;
467 /* Cannot map more than that */
468 if (code_gen_buffer_size > (800 * 1024 * 1024))
469 code_gen_buffer_size = (800 * 1024 * 1024);
470 #endif
471 code_gen_buffer = mmap(addr, code_gen_buffer_size,
472 PROT_WRITE | PROT_READ | PROT_EXEC,
473 flags, -1, 0);
474 if (code_gen_buffer == MAP_FAILED) {
475 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
476 exit(1);
479 #else
480 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482 #endif
483 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
484 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
485 code_gen_buffer_max_size = code_gen_buffer_size -
486 code_gen_max_block_size();
487 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
488 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
491 /* Must be called before using the QEMU cpus. 'tb_size' is the size
492 (in bytes) allocated to the translation buffer. Zero means default
493 size. */
494 void cpu_exec_init_all(unsigned long tb_size)
496 cpu_gen_init();
497 code_gen_alloc(tb_size);
498 code_gen_ptr = code_gen_buffer;
499 page_init();
500 #if !defined(CONFIG_USER_ONLY)
501 io_mem_init();
502 #endif
505 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
507 #define CPU_COMMON_SAVE_VERSION 1
509 static void cpu_common_save(QEMUFile *f, void *opaque)
511 CPUState *env = opaque;
513 qemu_put_be32s(f, &env->halted);
514 qemu_put_be32s(f, &env->interrupt_request);
517 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
519 CPUState *env = opaque;
521 if (version_id != CPU_COMMON_SAVE_VERSION)
522 return -EINVAL;
524 qemu_get_be32s(f, &env->halted);
525 qemu_get_be32s(f, &env->interrupt_request);
526 tlb_flush(env, 1);
528 return 0;
530 #endif
532 void cpu_exec_init(CPUState *env)
534 CPUState **penv;
535 int cpu_index;
537 env->next_cpu = NULL;
538 penv = &first_cpu;
539 cpu_index = 0;
540 while (*penv != NULL) {
541 penv = (CPUState **)&(*penv)->next_cpu;
542 cpu_index++;
544 env->cpu_index = cpu_index;
545 TAILQ_INIT(&env->breakpoints);
546 TAILQ_INIT(&env->watchpoints);
547 *penv = env;
548 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
549 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
550 cpu_common_save, cpu_common_load, env);
551 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
552 cpu_save, cpu_load, env);
553 #endif
556 static inline void invalidate_page_bitmap(PageDesc *p)
558 if (p->code_bitmap) {
559 qemu_free(p->code_bitmap);
560 p->code_bitmap = NULL;
562 p->code_write_count = 0;
565 /* set to NULL all the 'first_tb' fields in all PageDescs */
566 static void page_flush_tb(void)
568 int i, j;
569 PageDesc *p;
571 for(i = 0; i < L1_SIZE; i++) {
572 p = l1_map[i];
573 if (p) {
574 for(j = 0; j < L2_SIZE; j++) {
575 p->first_tb = NULL;
576 invalidate_page_bitmap(p);
577 p++;
583 /* flush all the translation blocks */
584 /* XXX: tb_flush is currently not thread safe */
585 void tb_flush(CPUState *env1)
587 CPUState *env;
588 #if defined(DEBUG_FLUSH)
589 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
590 (unsigned long)(code_gen_ptr - code_gen_buffer),
591 nb_tbs, nb_tbs > 0 ?
592 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
593 #endif
594 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
595 cpu_abort(env1, "Internal error: code buffer overflow\n");
597 nb_tbs = 0;
599 for(env = first_cpu; env != NULL; env = env->next_cpu) {
600 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
603 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
604 page_flush_tb();
606 code_gen_ptr = code_gen_buffer;
607 /* XXX: flush processor icache at this point if cache flush is
608 expensive */
609 tb_flush_count++;
612 #ifdef DEBUG_TB_CHECK
614 static void tb_invalidate_check(target_ulong address)
616 TranslationBlock *tb;
617 int i;
618 address &= TARGET_PAGE_MASK;
619 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
620 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
621 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
622 address >= tb->pc + tb->size)) {
623 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
624 address, (long)tb->pc, tb->size);
630 /* verify that all the pages have correct rights for code */
631 static void tb_page_check(void)
633 TranslationBlock *tb;
634 int i, flags1, flags2;
636 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
637 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
638 flags1 = page_get_flags(tb->pc);
639 flags2 = page_get_flags(tb->pc + tb->size - 1);
640 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
641 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
642 (long)tb->pc, tb->size, flags1, flags2);
648 static void tb_jmp_check(TranslationBlock *tb)
650 TranslationBlock *tb1;
651 unsigned int n1;
653 /* suppress any remaining jumps to this TB */
654 tb1 = tb->jmp_first;
655 for(;;) {
656 n1 = (long)tb1 & 3;
657 tb1 = (TranslationBlock *)((long)tb1 & ~3);
658 if (n1 == 2)
659 break;
660 tb1 = tb1->jmp_next[n1];
662 /* check end of list */
663 if (tb1 != tb) {
664 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
668 #endif
670 /* invalidate one TB */
671 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
672 int next_offset)
674 TranslationBlock *tb1;
675 for(;;) {
676 tb1 = *ptb;
677 if (tb1 == tb) {
678 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
679 break;
681 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
685 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
687 TranslationBlock *tb1;
688 unsigned int n1;
690 for(;;) {
691 tb1 = *ptb;
692 n1 = (long)tb1 & 3;
693 tb1 = (TranslationBlock *)((long)tb1 & ~3);
694 if (tb1 == tb) {
695 *ptb = tb1->page_next[n1];
696 break;
698 ptb = &tb1->page_next[n1];
702 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
704 TranslationBlock *tb1, **ptb;
705 unsigned int n1;
707 ptb = &tb->jmp_next[n];
708 tb1 = *ptb;
709 if (tb1) {
710 /* find tb(n) in circular list */
711 for(;;) {
712 tb1 = *ptb;
713 n1 = (long)tb1 & 3;
714 tb1 = (TranslationBlock *)((long)tb1 & ~3);
715 if (n1 == n && tb1 == tb)
716 break;
717 if (n1 == 2) {
718 ptb = &tb1->jmp_first;
719 } else {
720 ptb = &tb1->jmp_next[n1];
723 /* now we can suppress tb(n) from the list */
724 *ptb = tb->jmp_next[n];
726 tb->jmp_next[n] = NULL;
730 /* reset the jump entry 'n' of a TB so that it is not chained to
731 another TB */
732 static inline void tb_reset_jump(TranslationBlock *tb, int n)
734 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
737 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
739 CPUState *env;
740 PageDesc *p;
741 unsigned int h, n1;
742 target_phys_addr_t phys_pc;
743 TranslationBlock *tb1, *tb2;
745 /* remove the TB from the hash list */
746 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
747 h = tb_phys_hash_func(phys_pc);
748 tb_remove(&tb_phys_hash[h], tb,
749 offsetof(TranslationBlock, phys_hash_next));
751 /* remove the TB from the page list */
752 if (tb->page_addr[0] != page_addr) {
753 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
754 tb_page_remove(&p->first_tb, tb);
755 invalidate_page_bitmap(p);
757 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
758 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
759 tb_page_remove(&p->first_tb, tb);
760 invalidate_page_bitmap(p);
763 tb_invalidated_flag = 1;
765 /* remove the TB from the hash list */
766 h = tb_jmp_cache_hash_func(tb->pc);
767 for(env = first_cpu; env != NULL; env = env->next_cpu) {
768 if (env->tb_jmp_cache[h] == tb)
769 env->tb_jmp_cache[h] = NULL;
772 /* suppress this TB from the two jump lists */
773 tb_jmp_remove(tb, 0);
774 tb_jmp_remove(tb, 1);
776 /* suppress any remaining jumps to this TB */
777 tb1 = tb->jmp_first;
778 for(;;) {
779 n1 = (long)tb1 & 3;
780 if (n1 == 2)
781 break;
782 tb1 = (TranslationBlock *)((long)tb1 & ~3);
783 tb2 = tb1->jmp_next[n1];
784 tb_reset_jump(tb1, n1);
785 tb1->jmp_next[n1] = NULL;
786 tb1 = tb2;
788 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
790 tb_phys_invalidate_count++;
793 static inline void set_bits(uint8_t *tab, int start, int len)
795 int end, mask, end1;
797 end = start + len;
798 tab += start >> 3;
799 mask = 0xff << (start & 7);
800 if ((start & ~7) == (end & ~7)) {
801 if (start < end) {
802 mask &= ~(0xff << (end & 7));
803 *tab |= mask;
805 } else {
806 *tab++ |= mask;
807 start = (start + 8) & ~7;
808 end1 = end & ~7;
809 while (start < end1) {
810 *tab++ = 0xff;
811 start += 8;
813 if (start < end) {
814 mask = ~(0xff << (end & 7));
815 *tab |= mask;
820 static void build_page_bitmap(PageDesc *p)
822 int n, tb_start, tb_end;
823 TranslationBlock *tb;
825 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
827 tb = p->first_tb;
828 while (tb != NULL) {
829 n = (long)tb & 3;
830 tb = (TranslationBlock *)((long)tb & ~3);
831 /* NOTE: this is subtle as a TB may span two physical pages */
832 if (n == 0) {
833 /* NOTE: tb_end may be after the end of the page, but
834 it is not a problem */
835 tb_start = tb->pc & ~TARGET_PAGE_MASK;
836 tb_end = tb_start + tb->size;
837 if (tb_end > TARGET_PAGE_SIZE)
838 tb_end = TARGET_PAGE_SIZE;
839 } else {
840 tb_start = 0;
841 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
843 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
844 tb = tb->page_next[n];
848 TranslationBlock *tb_gen_code(CPUState *env,
849 target_ulong pc, target_ulong cs_base,
850 int flags, int cflags)
852 TranslationBlock *tb;
853 uint8_t *tc_ptr;
854 target_ulong phys_pc, phys_page2, virt_page2;
855 int code_gen_size;
857 phys_pc = get_phys_addr_code(env, pc);
858 tb = tb_alloc(pc);
859 if (!tb) {
860 /* flush must be done */
861 tb_flush(env);
862 /* cannot fail at this point */
863 tb = tb_alloc(pc);
864 /* Don't forget to invalidate previous TB info. */
865 tb_invalidated_flag = 1;
867 tc_ptr = code_gen_ptr;
868 tb->tc_ptr = tc_ptr;
869 tb->cs_base = cs_base;
870 tb->flags = flags;
871 tb->cflags = cflags;
872 cpu_gen_code(env, tb, &code_gen_size);
873 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
875 /* check next page if needed */
876 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
877 phys_page2 = -1;
878 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
879 phys_page2 = get_phys_addr_code(env, virt_page2);
881 tb_link_phys(tb, phys_pc, phys_page2);
882 return tb;
885 /* invalidate all TBs which intersect with the target physical page
886 starting in range [start;end[. NOTE: start and end must refer to
887 the same physical page. 'is_cpu_write_access' should be true if called
888 from a real cpu write access: the virtual CPU will exit the current
889 TB if code is modified inside this TB. */
890 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
891 int is_cpu_write_access)
893 TranslationBlock *tb, *tb_next, *saved_tb;
894 CPUState *env = cpu_single_env;
895 target_ulong tb_start, tb_end;
896 PageDesc *p;
897 int n;
898 #ifdef TARGET_HAS_PRECISE_SMC
899 int current_tb_not_found = is_cpu_write_access;
900 TranslationBlock *current_tb = NULL;
901 int current_tb_modified = 0;
902 target_ulong current_pc = 0;
903 target_ulong current_cs_base = 0;
904 int current_flags = 0;
905 #endif /* TARGET_HAS_PRECISE_SMC */
907 p = page_find(start >> TARGET_PAGE_BITS);
908 if (!p)
909 return;
910 if (!p->code_bitmap &&
911 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
912 is_cpu_write_access) {
913 /* build code bitmap */
914 build_page_bitmap(p);
917 /* we remove all the TBs in the range [start, end[ */
918 /* XXX: see if in some cases it could be faster to invalidate all the code */
919 tb = p->first_tb;
920 while (tb != NULL) {
921 n = (long)tb & 3;
922 tb = (TranslationBlock *)((long)tb & ~3);
923 tb_next = tb->page_next[n];
924 /* NOTE: this is subtle as a TB may span two physical pages */
925 if (n == 0) {
926 /* NOTE: tb_end may be after the end of the page, but
927 it is not a problem */
928 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
929 tb_end = tb_start + tb->size;
930 } else {
931 tb_start = tb->page_addr[1];
932 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
934 if (!(tb_end <= start || tb_start >= end)) {
935 #ifdef TARGET_HAS_PRECISE_SMC
936 if (current_tb_not_found) {
937 current_tb_not_found = 0;
938 current_tb = NULL;
939 if (env->mem_io_pc) {
940 /* now we have a real cpu fault */
941 current_tb = tb_find_pc(env->mem_io_pc);
944 if (current_tb == tb &&
945 (current_tb->cflags & CF_COUNT_MASK) != 1) {
946 /* If we are modifying the current TB, we must stop
947 its execution. We could be more precise by checking
948 that the modification is after the current PC, but it
949 would require a specialized function to partially
950 restore the CPU state */
952 current_tb_modified = 1;
953 cpu_restore_state(current_tb, env,
954 env->mem_io_pc, NULL);
955 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
956 &current_flags);
958 #endif /* TARGET_HAS_PRECISE_SMC */
959 /* we need to do that to handle the case where a signal
960 occurs while doing tb_phys_invalidate() */
961 saved_tb = NULL;
962 if (env) {
963 saved_tb = env->current_tb;
964 env->current_tb = NULL;
966 tb_phys_invalidate(tb, -1);
967 if (env) {
968 env->current_tb = saved_tb;
969 if (env->interrupt_request && env->current_tb)
970 cpu_interrupt(env, env->interrupt_request);
973 tb = tb_next;
975 #if !defined(CONFIG_USER_ONLY)
976 /* if no code remaining, no need to continue to use slow writes */
977 if (!p->first_tb) {
978 invalidate_page_bitmap(p);
979 if (is_cpu_write_access) {
980 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
983 #endif
984 #ifdef TARGET_HAS_PRECISE_SMC
985 if (current_tb_modified) {
986 /* we generate a block containing just the instruction
987 modifying the memory. It will ensure that it cannot modify
988 itself */
989 env->current_tb = NULL;
990 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
991 cpu_resume_from_signal(env, NULL);
993 #endif
996 /* len must be <= 8 and start must be a multiple of len */
997 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
999 PageDesc *p;
1000 int offset, b;
1001 #if 0
1002 if (1) {
1003 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1004 cpu_single_env->mem_io_vaddr, len,
1005 cpu_single_env->eip,
1006 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1008 #endif
1009 p = page_find(start >> TARGET_PAGE_BITS);
1010 if (!p)
1011 return;
1012 if (p->code_bitmap) {
1013 offset = start & ~TARGET_PAGE_MASK;
1014 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1015 if (b & ((1 << len) - 1))
1016 goto do_invalidate;
1017 } else {
1018 do_invalidate:
1019 tb_invalidate_phys_page_range(start, start + len, 1);
1023 #if !defined(CONFIG_SOFTMMU)
1024 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1025 unsigned long pc, void *puc)
1027 TranslationBlock *tb;
1028 PageDesc *p;
1029 int n;
1030 #ifdef TARGET_HAS_PRECISE_SMC
1031 TranslationBlock *current_tb = NULL;
1032 CPUState *env = cpu_single_env;
1033 int current_tb_modified = 0;
1034 target_ulong current_pc = 0;
1035 target_ulong current_cs_base = 0;
1036 int current_flags = 0;
1037 #endif
1039 addr &= TARGET_PAGE_MASK;
1040 p = page_find(addr >> TARGET_PAGE_BITS);
1041 if (!p)
1042 return;
1043 tb = p->first_tb;
1044 #ifdef TARGET_HAS_PRECISE_SMC
1045 if (tb && pc != 0) {
1046 current_tb = tb_find_pc(pc);
1048 #endif
1049 while (tb != NULL) {
1050 n = (long)tb & 3;
1051 tb = (TranslationBlock *)((long)tb & ~3);
1052 #ifdef TARGET_HAS_PRECISE_SMC
1053 if (current_tb == tb &&
1054 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1055 /* If we are modifying the current TB, we must stop
1056 its execution. We could be more precise by checking
1057 that the modification is after the current PC, but it
1058 would require a specialized function to partially
1059 restore the CPU state */
1061 current_tb_modified = 1;
1062 cpu_restore_state(current_tb, env, pc, puc);
1063 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1064 &current_flags);
1066 #endif /* TARGET_HAS_PRECISE_SMC */
1067 tb_phys_invalidate(tb, addr);
1068 tb = tb->page_next[n];
1070 p->first_tb = NULL;
1071 #ifdef TARGET_HAS_PRECISE_SMC
1072 if (current_tb_modified) {
1073 /* we generate a block containing just the instruction
1074 modifying the memory. It will ensure that it cannot modify
1075 itself */
1076 env->current_tb = NULL;
1077 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1078 cpu_resume_from_signal(env, puc);
1080 #endif
1082 #endif
1084 /* add the tb in the target page and protect it if necessary */
1085 static inline void tb_alloc_page(TranslationBlock *tb,
1086 unsigned int n, target_ulong page_addr)
1088 PageDesc *p;
1089 TranslationBlock *last_first_tb;
1091 tb->page_addr[n] = page_addr;
1092 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1093 tb->page_next[n] = p->first_tb;
1094 last_first_tb = p->first_tb;
1095 p->first_tb = (TranslationBlock *)((long)tb | n);
1096 invalidate_page_bitmap(p);
1098 #if defined(TARGET_HAS_SMC) || 1
1100 #if defined(CONFIG_USER_ONLY)
1101 if (p->flags & PAGE_WRITE) {
1102 target_ulong addr;
1103 PageDesc *p2;
1104 int prot;
1106 /* force the host page as non writable (writes will have a
1107 page fault + mprotect overhead) */
1108 page_addr &= qemu_host_page_mask;
1109 prot = 0;
1110 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1111 addr += TARGET_PAGE_SIZE) {
1113 p2 = page_find (addr >> TARGET_PAGE_BITS);
1114 if (!p2)
1115 continue;
1116 prot |= p2->flags;
1117 p2->flags &= ~PAGE_WRITE;
1118 page_get_flags(addr);
1120 mprotect(g2h(page_addr), qemu_host_page_size,
1121 (prot & PAGE_BITS) & ~PAGE_WRITE);
1122 #ifdef DEBUG_TB_INVALIDATE
1123 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1124 page_addr);
1125 #endif
1127 #else
1128 /* if some code is already present, then the pages are already
1129 protected. So we handle the case where only the first TB is
1130 allocated in a physical page */
1131 if (!last_first_tb) {
1132 tlb_protect_code(page_addr);
1134 #endif
1136 #endif /* TARGET_HAS_SMC */
1139 /* Allocate a new translation block. Flush the translation buffer if
1140 too many translation blocks or too much generated code. */
1141 TranslationBlock *tb_alloc(target_ulong pc)
1143 TranslationBlock *tb;
1145 if (nb_tbs >= code_gen_max_blocks ||
1146 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1147 return NULL;
1148 tb = &tbs[nb_tbs++];
1149 tb->pc = pc;
1150 tb->cflags = 0;
1151 return tb;
1154 void tb_free(TranslationBlock *tb)
1156 /* In practice this is mostly used for single use temporary TB
1157 Ignore the hard cases and just back up if this TB happens to
1158 be the last one generated. */
1159 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1160 code_gen_ptr = tb->tc_ptr;
1161 nb_tbs--;
1165 /* add a new TB and link it to the physical page tables. phys_page2 is
1166 (-1) to indicate that only one page contains the TB. */
1167 void tb_link_phys(TranslationBlock *tb,
1168 target_ulong phys_pc, target_ulong phys_page2)
1170 unsigned int h;
1171 TranslationBlock **ptb;
1173 /* Grab the mmap lock to stop another thread invalidating this TB
1174 before we are done. */
1175 mmap_lock();
1176 /* add in the physical hash table */
1177 h = tb_phys_hash_func(phys_pc);
1178 ptb = &tb_phys_hash[h];
1179 tb->phys_hash_next = *ptb;
1180 *ptb = tb;
1182 /* add in the page list */
1183 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1184 if (phys_page2 != -1)
1185 tb_alloc_page(tb, 1, phys_page2);
1186 else
1187 tb->page_addr[1] = -1;
1189 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1190 tb->jmp_next[0] = NULL;
1191 tb->jmp_next[1] = NULL;
1193 /* init original jump addresses */
1194 if (tb->tb_next_offset[0] != 0xffff)
1195 tb_reset_jump(tb, 0);
1196 if (tb->tb_next_offset[1] != 0xffff)
1197 tb_reset_jump(tb, 1);
1199 #ifdef DEBUG_TB_CHECK
1200 tb_page_check();
1201 #endif
1202 mmap_unlock();
1205 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1206 tb[1].tc_ptr. Return NULL if not found */
1207 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1209 int m_min, m_max, m;
1210 unsigned long v;
1211 TranslationBlock *tb;
1213 if (nb_tbs <= 0)
1214 return NULL;
1215 if (tc_ptr < (unsigned long)code_gen_buffer ||
1216 tc_ptr >= (unsigned long)code_gen_ptr)
1217 return NULL;
1218 /* binary search (cf Knuth) */
1219 m_min = 0;
1220 m_max = nb_tbs - 1;
1221 while (m_min <= m_max) {
1222 m = (m_min + m_max) >> 1;
1223 tb = &tbs[m];
1224 v = (unsigned long)tb->tc_ptr;
1225 if (v == tc_ptr)
1226 return tb;
1227 else if (tc_ptr < v) {
1228 m_max = m - 1;
1229 } else {
1230 m_min = m + 1;
1233 return &tbs[m_max];
1236 static void tb_reset_jump_recursive(TranslationBlock *tb);
1238 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1240 TranslationBlock *tb1, *tb_next, **ptb;
1241 unsigned int n1;
1243 tb1 = tb->jmp_next[n];
1244 if (tb1 != NULL) {
1245 /* find head of list */
1246 for(;;) {
1247 n1 = (long)tb1 & 3;
1248 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1249 if (n1 == 2)
1250 break;
1251 tb1 = tb1->jmp_next[n1];
1253 /* we are now sure now that tb jumps to tb1 */
1254 tb_next = tb1;
1256 /* remove tb from the jmp_first list */
1257 ptb = &tb_next->jmp_first;
1258 for(;;) {
1259 tb1 = *ptb;
1260 n1 = (long)tb1 & 3;
1261 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1262 if (n1 == n && tb1 == tb)
1263 break;
1264 ptb = &tb1->jmp_next[n1];
1266 *ptb = tb->jmp_next[n];
1267 tb->jmp_next[n] = NULL;
1269 /* suppress the jump to next tb in generated code */
1270 tb_reset_jump(tb, n);
1272 /* suppress jumps in the tb on which we could have jumped */
1273 tb_reset_jump_recursive(tb_next);
1277 static void tb_reset_jump_recursive(TranslationBlock *tb)
1279 tb_reset_jump_recursive2(tb, 0);
1280 tb_reset_jump_recursive2(tb, 1);
1283 #if defined(TARGET_HAS_ICE)
1284 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1286 target_phys_addr_t addr;
1287 target_ulong pd;
1288 ram_addr_t ram_addr;
1289 PhysPageDesc *p;
1291 addr = cpu_get_phys_page_debug(env, pc);
1292 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1293 if (!p) {
1294 pd = IO_MEM_UNASSIGNED;
1295 } else {
1296 pd = p->phys_offset;
1298 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1299 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1301 #endif
1303 /* Add a watchpoint. */
1304 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1305 int flags, CPUWatchpoint **watchpoint)
1307 target_ulong len_mask = ~(len - 1);
1308 CPUWatchpoint *wp;
1310 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1311 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1312 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1313 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1314 return -EINVAL;
1316 wp = qemu_malloc(sizeof(*wp));
1318 wp->vaddr = addr;
1319 wp->len_mask = len_mask;
1320 wp->flags = flags;
1322 /* keep all GDB-injected watchpoints in front */
1323 if (flags & BP_GDB)
1324 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1325 else
1326 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1328 tlb_flush_page(env, addr);
1330 if (watchpoint)
1331 *watchpoint = wp;
1332 return 0;
1335 /* Remove a specific watchpoint. */
1336 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1337 int flags)
1339 target_ulong len_mask = ~(len - 1);
1340 CPUWatchpoint *wp;
1342 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1343 if (addr == wp->vaddr && len_mask == wp->len_mask
1344 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1345 cpu_watchpoint_remove_by_ref(env, wp);
1346 return 0;
1349 return -ENOENT;
1352 /* Remove a specific watchpoint by reference. */
1353 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1355 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1357 tlb_flush_page(env, watchpoint->vaddr);
1359 qemu_free(watchpoint);
1362 /* Remove all matching watchpoints. */
1363 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1365 CPUWatchpoint *wp, *next;
1367 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1368 if (wp->flags & mask)
1369 cpu_watchpoint_remove_by_ref(env, wp);
1373 /* Add a breakpoint. */
1374 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1375 CPUBreakpoint **breakpoint)
1377 #if defined(TARGET_HAS_ICE)
1378 CPUBreakpoint *bp;
1380 bp = qemu_malloc(sizeof(*bp));
1382 bp->pc = pc;
1383 bp->flags = flags;
1385 /* keep all GDB-injected breakpoints in front */
1386 if (flags & BP_GDB)
1387 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1388 else
1389 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1391 breakpoint_invalidate(env, pc);
1393 if (breakpoint)
1394 *breakpoint = bp;
1395 return 0;
1396 #else
1397 return -ENOSYS;
1398 #endif
1401 /* Remove a specific breakpoint. */
1402 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1404 #if defined(TARGET_HAS_ICE)
1405 CPUBreakpoint *bp;
1407 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1408 if (bp->pc == pc && bp->flags == flags) {
1409 cpu_breakpoint_remove_by_ref(env, bp);
1410 return 0;
1413 return -ENOENT;
1414 #else
1415 return -ENOSYS;
1416 #endif
1419 /* Remove a specific breakpoint by reference. */
1420 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1422 #if defined(TARGET_HAS_ICE)
1423 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1425 breakpoint_invalidate(env, breakpoint->pc);
1427 qemu_free(breakpoint);
1428 #endif
1431 /* Remove all matching breakpoints. */
1432 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1434 #if defined(TARGET_HAS_ICE)
1435 CPUBreakpoint *bp, *next;
1437 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1438 if (bp->flags & mask)
1439 cpu_breakpoint_remove_by_ref(env, bp);
1441 #endif
1444 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1445 CPU loop after each instruction */
1446 void cpu_single_step(CPUState *env, int enabled)
1448 #if defined(TARGET_HAS_ICE)
1449 if (env->singlestep_enabled != enabled) {
1450 env->singlestep_enabled = enabled;
1451 /* must flush all the translated code to avoid inconsistancies */
1452 /* XXX: only flush what is necessary */
1453 tb_flush(env);
1455 #endif
1458 /* enable or disable low levels log */
1459 void cpu_set_log(int log_flags)
1461 loglevel = log_flags;
1462 if (loglevel && !logfile) {
1463 logfile = fopen(logfilename, log_append ? "a" : "w");
1464 if (!logfile) {
1465 perror(logfilename);
1466 _exit(1);
1468 #if !defined(CONFIG_SOFTMMU)
1469 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1471 static char logfile_buf[4096];
1472 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1474 #else
1475 setvbuf(logfile, NULL, _IOLBF, 0);
1476 #endif
1477 log_append = 1;
1479 if (!loglevel && logfile) {
1480 fclose(logfile);
1481 logfile = NULL;
1485 void cpu_set_log_filename(const char *filename)
1487 logfilename = strdup(filename);
1488 if (logfile) {
1489 fclose(logfile);
1490 logfile = NULL;
1492 cpu_set_log(loglevel);
1495 /* mask must never be zero, except for A20 change call */
1496 void cpu_interrupt(CPUState *env, int mask)
1498 #if !defined(USE_NPTL)
1499 TranslationBlock *tb;
1500 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1501 #endif
1502 int old_mask;
1504 if (mask & CPU_INTERRUPT_EXIT) {
1505 env->exit_request = 1;
1506 mask &= ~CPU_INTERRUPT_EXIT;
1509 old_mask = env->interrupt_request;
1510 env->interrupt_request |= mask;
1511 #if defined(USE_NPTL)
1512 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1513 problem and hope the cpu will stop of its own accord. For userspace
1514 emulation this often isn't actually as bad as it sounds. Often
1515 signals are used primarily to interrupt blocking syscalls. */
1516 #else
1517 if (use_icount) {
1518 env->icount_decr.u16.high = 0xffff;
1519 #ifndef CONFIG_USER_ONLY
1520 if (!can_do_io(env)
1521 && (mask & ~old_mask) != 0) {
1522 cpu_abort(env, "Raised interrupt while not in I/O function");
1524 #endif
1525 } else {
1526 tb = env->current_tb;
1527 /* if the cpu is currently executing code, we must unlink it and
1528 all the potentially executing TB */
1529 if (tb && !testandset(&interrupt_lock)) {
1530 env->current_tb = NULL;
1531 tb_reset_jump_recursive(tb);
1532 resetlock(&interrupt_lock);
1535 #endif
1538 void cpu_reset_interrupt(CPUState *env, int mask)
1540 env->interrupt_request &= ~mask;
1543 const CPULogItem cpu_log_items[] = {
1544 { CPU_LOG_TB_OUT_ASM, "out_asm",
1545 "show generated host assembly code for each compiled TB" },
1546 { CPU_LOG_TB_IN_ASM, "in_asm",
1547 "show target assembly code for each compiled TB" },
1548 { CPU_LOG_TB_OP, "op",
1549 "show micro ops for each compiled TB" },
1550 { CPU_LOG_TB_OP_OPT, "op_opt",
1551 "show micro ops "
1552 #ifdef TARGET_I386
1553 "before eflags optimization and "
1554 #endif
1555 "after liveness analysis" },
1556 { CPU_LOG_INT, "int",
1557 "show interrupts/exceptions in short format" },
1558 { CPU_LOG_EXEC, "exec",
1559 "show trace before each executed TB (lots of logs)" },
1560 { CPU_LOG_TB_CPU, "cpu",
1561 "show CPU state before block translation" },
1562 #ifdef TARGET_I386
1563 { CPU_LOG_PCALL, "pcall",
1564 "show protected mode far calls/returns/exceptions" },
1565 { CPU_LOG_RESET, "cpu_reset",
1566 "show CPU state before CPU resets" },
1567 #endif
1568 #ifdef DEBUG_IOPORT
1569 { CPU_LOG_IOPORT, "ioport",
1570 "show all i/o ports accesses" },
1571 #endif
1572 { 0, NULL, NULL },
1575 static int cmp1(const char *s1, int n, const char *s2)
1577 if (strlen(s2) != n)
1578 return 0;
1579 return memcmp(s1, s2, n) == 0;
1582 /* takes a comma separated list of log masks. Return 0 if error. */
1583 int cpu_str_to_log_mask(const char *str)
1585 const CPULogItem *item;
1586 int mask;
1587 const char *p, *p1;
1589 p = str;
1590 mask = 0;
1591 for(;;) {
1592 p1 = strchr(p, ',');
1593 if (!p1)
1594 p1 = p + strlen(p);
1595 if(cmp1(p,p1-p,"all")) {
1596 for(item = cpu_log_items; item->mask != 0; item++) {
1597 mask |= item->mask;
1599 } else {
1600 for(item = cpu_log_items; item->mask != 0; item++) {
1601 if (cmp1(p, p1 - p, item->name))
1602 goto found;
1604 return 0;
1606 found:
1607 mask |= item->mask;
1608 if (*p1 != ',')
1609 break;
1610 p = p1 + 1;
1612 return mask;
1615 void cpu_abort(CPUState *env, const char *fmt, ...)
1617 va_list ap;
1618 va_list ap2;
1620 va_start(ap, fmt);
1621 va_copy(ap2, ap);
1622 fprintf(stderr, "qemu: fatal: ");
1623 vfprintf(stderr, fmt, ap);
1624 fprintf(stderr, "\n");
1625 #ifdef TARGET_I386
1626 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1627 #else
1628 cpu_dump_state(env, stderr, fprintf, 0);
1629 #endif
1630 if (qemu_log_enabled()) {
1631 qemu_log("qemu: fatal: ");
1632 qemu_log_vprintf(fmt, ap2);
1633 qemu_log("\n");
1634 #ifdef TARGET_I386
1635 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1636 #else
1637 log_cpu_state(env, 0);
1638 #endif
1639 qemu_log_flush();
1640 qemu_log_close();
1642 va_end(ap2);
1643 va_end(ap);
1644 abort();
1647 CPUState *cpu_copy(CPUState *env)
1649 CPUState *new_env = cpu_init(env->cpu_model_str);
1650 CPUState *next_cpu = new_env->next_cpu;
1651 int cpu_index = new_env->cpu_index;
1652 #if defined(TARGET_HAS_ICE)
1653 CPUBreakpoint *bp;
1654 CPUWatchpoint *wp;
1655 #endif
1657 memcpy(new_env, env, sizeof(CPUState));
1659 /* Preserve chaining and index. */
1660 new_env->next_cpu = next_cpu;
1661 new_env->cpu_index = cpu_index;
1663 /* Clone all break/watchpoints.
1664 Note: Once we support ptrace with hw-debug register access, make sure
1665 BP_CPU break/watchpoints are handled correctly on clone. */
1666 TAILQ_INIT(&env->breakpoints);
1667 TAILQ_INIT(&env->watchpoints);
1668 #if defined(TARGET_HAS_ICE)
1669 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1670 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1672 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1673 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1674 wp->flags, NULL);
1676 #endif
1678 return new_env;
1681 #if !defined(CONFIG_USER_ONLY)
1683 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1685 unsigned int i;
1687 /* Discard jump cache entries for any tb which might potentially
1688 overlap the flushed page. */
1689 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1690 memset (&env->tb_jmp_cache[i], 0,
1691 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1693 i = tb_jmp_cache_hash_page(addr);
1694 memset (&env->tb_jmp_cache[i], 0,
1695 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1698 /* NOTE: if flush_global is true, also flush global entries (not
1699 implemented yet) */
1700 void tlb_flush(CPUState *env, int flush_global)
1702 int i;
1704 #if defined(DEBUG_TLB)
1705 printf("tlb_flush:\n");
1706 #endif
1707 /* must reset current TB so that interrupts cannot modify the
1708 links while we are modifying them */
1709 env->current_tb = NULL;
1711 for(i = 0; i < CPU_TLB_SIZE; i++) {
1712 env->tlb_table[0][i].addr_read = -1;
1713 env->tlb_table[0][i].addr_write = -1;
1714 env->tlb_table[0][i].addr_code = -1;
1715 env->tlb_table[1][i].addr_read = -1;
1716 env->tlb_table[1][i].addr_write = -1;
1717 env->tlb_table[1][i].addr_code = -1;
1718 #if (NB_MMU_MODES >= 3)
1719 env->tlb_table[2][i].addr_read = -1;
1720 env->tlb_table[2][i].addr_write = -1;
1721 env->tlb_table[2][i].addr_code = -1;
1722 #if (NB_MMU_MODES == 4)
1723 env->tlb_table[3][i].addr_read = -1;
1724 env->tlb_table[3][i].addr_write = -1;
1725 env->tlb_table[3][i].addr_code = -1;
1726 #endif
1727 #endif
1730 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1732 #ifdef USE_KQEMU
1733 if (env->kqemu_enabled) {
1734 kqemu_flush(env, flush_global);
1736 #endif
1737 tlb_flush_count++;
1740 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1742 if (addr == (tlb_entry->addr_read &
1743 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1744 addr == (tlb_entry->addr_write &
1745 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1746 addr == (tlb_entry->addr_code &
1747 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1748 tlb_entry->addr_read = -1;
1749 tlb_entry->addr_write = -1;
1750 tlb_entry->addr_code = -1;
1754 void tlb_flush_page(CPUState *env, target_ulong addr)
1756 int i;
1758 #if defined(DEBUG_TLB)
1759 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1760 #endif
1761 /* must reset current TB so that interrupts cannot modify the
1762 links while we are modifying them */
1763 env->current_tb = NULL;
1765 addr &= TARGET_PAGE_MASK;
1766 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1767 tlb_flush_entry(&env->tlb_table[0][i], addr);
1768 tlb_flush_entry(&env->tlb_table[1][i], addr);
1769 #if (NB_MMU_MODES >= 3)
1770 tlb_flush_entry(&env->tlb_table[2][i], addr);
1771 #if (NB_MMU_MODES == 4)
1772 tlb_flush_entry(&env->tlb_table[3][i], addr);
1773 #endif
1774 #endif
1776 tlb_flush_jmp_cache(env, addr);
1778 #ifdef USE_KQEMU
1779 if (env->kqemu_enabled) {
1780 kqemu_flush_page(env, addr);
1782 #endif
1785 /* update the TLBs so that writes to code in the virtual page 'addr'
1786 can be detected */
1787 static void tlb_protect_code(ram_addr_t ram_addr)
1789 cpu_physical_memory_reset_dirty(ram_addr,
1790 ram_addr + TARGET_PAGE_SIZE,
1791 CODE_DIRTY_FLAG);
1794 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1795 tested for self modifying code */
1796 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1797 target_ulong vaddr)
1799 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1802 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1803 unsigned long start, unsigned long length)
1805 unsigned long addr;
1806 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1807 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1808 if ((addr - start) < length) {
1809 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1814 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1815 int dirty_flags)
1817 CPUState *env;
1818 unsigned long length, start1;
1819 int i, mask, len;
1820 uint8_t *p;
1822 start &= TARGET_PAGE_MASK;
1823 end = TARGET_PAGE_ALIGN(end);
1825 length = end - start;
1826 if (length == 0)
1827 return;
1828 len = length >> TARGET_PAGE_BITS;
1829 #ifdef USE_KQEMU
1830 /* XXX: should not depend on cpu context */
1831 env = first_cpu;
1832 if (env->kqemu_enabled) {
1833 ram_addr_t addr;
1834 addr = start;
1835 for(i = 0; i < len; i++) {
1836 kqemu_set_notdirty(env, addr);
1837 addr += TARGET_PAGE_SIZE;
1840 #endif
1841 mask = ~dirty_flags;
1842 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1843 for(i = 0; i < len; i++)
1844 p[i] &= mask;
1846 /* we modify the TLB cache so that the dirty bit will be set again
1847 when accessing the range */
1848 start1 = start + (unsigned long)phys_ram_base;
1849 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1850 for(i = 0; i < CPU_TLB_SIZE; i++)
1851 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1852 for(i = 0; i < CPU_TLB_SIZE; i++)
1853 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1854 #if (NB_MMU_MODES >= 3)
1855 for(i = 0; i < CPU_TLB_SIZE; i++)
1856 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1857 #if (NB_MMU_MODES == 4)
1858 for(i = 0; i < CPU_TLB_SIZE; i++)
1859 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1860 #endif
1861 #endif
1865 int cpu_physical_memory_set_dirty_tracking(int enable)
1867 in_migration = enable;
1868 return 0;
1871 int cpu_physical_memory_get_dirty_tracking(void)
1873 return in_migration;
1876 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1878 if (kvm_enabled())
1879 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1882 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1884 ram_addr_t ram_addr;
1886 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1887 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1888 tlb_entry->addend - (unsigned long)phys_ram_base;
1889 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1890 tlb_entry->addr_write |= TLB_NOTDIRTY;
1895 /* update the TLB according to the current state of the dirty bits */
1896 void cpu_tlb_update_dirty(CPUState *env)
1898 int i;
1899 for(i = 0; i < CPU_TLB_SIZE; i++)
1900 tlb_update_dirty(&env->tlb_table[0][i]);
1901 for(i = 0; i < CPU_TLB_SIZE; i++)
1902 tlb_update_dirty(&env->tlb_table[1][i]);
1903 #if (NB_MMU_MODES >= 3)
1904 for(i = 0; i < CPU_TLB_SIZE; i++)
1905 tlb_update_dirty(&env->tlb_table[2][i]);
1906 #if (NB_MMU_MODES == 4)
1907 for(i = 0; i < CPU_TLB_SIZE; i++)
1908 tlb_update_dirty(&env->tlb_table[3][i]);
1909 #endif
1910 #endif
1913 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1915 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1916 tlb_entry->addr_write = vaddr;
1919 /* update the TLB corresponding to virtual page vaddr
1920 so that it is no longer dirty */
1921 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1923 int i;
1925 vaddr &= TARGET_PAGE_MASK;
1926 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1927 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1928 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1929 #if (NB_MMU_MODES >= 3)
1930 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1931 #if (NB_MMU_MODES == 4)
1932 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1933 #endif
1934 #endif
1937 /* add a new TLB entry. At most one entry for a given virtual address
1938 is permitted. Return 0 if OK or 2 if the page could not be mapped
1939 (can only happen in non SOFTMMU mode for I/O pages or pages
1940 conflicting with the host address space). */
1941 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1942 target_phys_addr_t paddr, int prot,
1943 int mmu_idx, int is_softmmu)
1945 PhysPageDesc *p;
1946 unsigned long pd;
1947 unsigned int index;
1948 target_ulong address;
1949 target_ulong code_address;
1950 target_phys_addr_t addend;
1951 int ret;
1952 CPUTLBEntry *te;
1953 CPUWatchpoint *wp;
1954 target_phys_addr_t iotlb;
1956 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1957 if (!p) {
1958 pd = IO_MEM_UNASSIGNED;
1959 } else {
1960 pd = p->phys_offset;
1962 #if defined(DEBUG_TLB)
1963 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1964 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1965 #endif
1967 ret = 0;
1968 address = vaddr;
1969 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1970 /* IO memory case (romd handled later) */
1971 address |= TLB_MMIO;
1973 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1974 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1975 /* Normal RAM. */
1976 iotlb = pd & TARGET_PAGE_MASK;
1977 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1978 iotlb |= IO_MEM_NOTDIRTY;
1979 else
1980 iotlb |= IO_MEM_ROM;
1981 } else {
1982 /* IO handlers are currently passed a phsical address.
1983 It would be nice to pass an offset from the base address
1984 of that region. This would avoid having to special case RAM,
1985 and avoid full address decoding in every device.
1986 We can't use the high bits of pd for this because
1987 IO_MEM_ROMD uses these as a ram address. */
1988 iotlb = (pd & ~TARGET_PAGE_MASK);
1989 if (p) {
1990 iotlb += p->region_offset;
1991 } else {
1992 iotlb += paddr;
1996 code_address = address;
1997 /* Make accesses to pages with watchpoints go via the
1998 watchpoint trap routines. */
1999 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2000 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2001 iotlb = io_mem_watch + paddr;
2002 /* TODO: The memory case can be optimized by not trapping
2003 reads of pages with a write breakpoint. */
2004 address |= TLB_MMIO;
2008 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2009 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2010 te = &env->tlb_table[mmu_idx][index];
2011 te->addend = addend - vaddr;
2012 if (prot & PAGE_READ) {
2013 te->addr_read = address;
2014 } else {
2015 te->addr_read = -1;
2018 if (prot & PAGE_EXEC) {
2019 te->addr_code = code_address;
2020 } else {
2021 te->addr_code = -1;
2023 if (prot & PAGE_WRITE) {
2024 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2025 (pd & IO_MEM_ROMD)) {
2026 /* Write access calls the I/O callback. */
2027 te->addr_write = address | TLB_MMIO;
2028 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2029 !cpu_physical_memory_is_dirty(pd)) {
2030 te->addr_write = address | TLB_NOTDIRTY;
2031 } else {
2032 te->addr_write = address;
2034 } else {
2035 te->addr_write = -1;
2037 return ret;
2040 #else
2042 void tlb_flush(CPUState *env, int flush_global)
2046 void tlb_flush_page(CPUState *env, target_ulong addr)
2050 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2051 target_phys_addr_t paddr, int prot,
2052 int mmu_idx, int is_softmmu)
2054 return 0;
2057 /* dump memory mappings */
2058 void page_dump(FILE *f)
2060 unsigned long start, end;
2061 int i, j, prot, prot1;
2062 PageDesc *p;
2064 fprintf(f, "%-8s %-8s %-8s %s\n",
2065 "start", "end", "size", "prot");
2066 start = -1;
2067 end = -1;
2068 prot = 0;
2069 for(i = 0; i <= L1_SIZE; i++) {
2070 if (i < L1_SIZE)
2071 p = l1_map[i];
2072 else
2073 p = NULL;
2074 for(j = 0;j < L2_SIZE; j++) {
2075 if (!p)
2076 prot1 = 0;
2077 else
2078 prot1 = p[j].flags;
2079 if (prot1 != prot) {
2080 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2081 if (start != -1) {
2082 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2083 start, end, end - start,
2084 prot & PAGE_READ ? 'r' : '-',
2085 prot & PAGE_WRITE ? 'w' : '-',
2086 prot & PAGE_EXEC ? 'x' : '-');
2088 if (prot1 != 0)
2089 start = end;
2090 else
2091 start = -1;
2092 prot = prot1;
2094 if (!p)
2095 break;
2100 int page_get_flags(target_ulong address)
2102 PageDesc *p;
2104 p = page_find(address >> TARGET_PAGE_BITS);
2105 if (!p)
2106 return 0;
2107 return p->flags;
2110 /* modify the flags of a page and invalidate the code if
2111 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2112 depending on PAGE_WRITE */
2113 void page_set_flags(target_ulong start, target_ulong end, int flags)
2115 PageDesc *p;
2116 target_ulong addr;
2118 /* mmap_lock should already be held. */
2119 start = start & TARGET_PAGE_MASK;
2120 end = TARGET_PAGE_ALIGN(end);
2121 if (flags & PAGE_WRITE)
2122 flags |= PAGE_WRITE_ORG;
2123 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2124 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2125 /* We may be called for host regions that are outside guest
2126 address space. */
2127 if (!p)
2128 return;
2129 /* if the write protection is set, then we invalidate the code
2130 inside */
2131 if (!(p->flags & PAGE_WRITE) &&
2132 (flags & PAGE_WRITE) &&
2133 p->first_tb) {
2134 tb_invalidate_phys_page(addr, 0, NULL);
2136 p->flags = flags;
2140 int page_check_range(target_ulong start, target_ulong len, int flags)
2142 PageDesc *p;
2143 target_ulong end;
2144 target_ulong addr;
2146 if (start + len < start)
2147 /* we've wrapped around */
2148 return -1;
2150 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2151 start = start & TARGET_PAGE_MASK;
2153 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2154 p = page_find(addr >> TARGET_PAGE_BITS);
2155 if( !p )
2156 return -1;
2157 if( !(p->flags & PAGE_VALID) )
2158 return -1;
2160 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2161 return -1;
2162 if (flags & PAGE_WRITE) {
2163 if (!(p->flags & PAGE_WRITE_ORG))
2164 return -1;
2165 /* unprotect the page if it was put read-only because it
2166 contains translated code */
2167 if (!(p->flags & PAGE_WRITE)) {
2168 if (!page_unprotect(addr, 0, NULL))
2169 return -1;
2171 return 0;
2174 return 0;
2177 /* called from signal handler: invalidate the code and unprotect the
2178 page. Return TRUE if the fault was succesfully handled. */
2179 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2181 unsigned int page_index, prot, pindex;
2182 PageDesc *p, *p1;
2183 target_ulong host_start, host_end, addr;
2185 /* Technically this isn't safe inside a signal handler. However we
2186 know this only ever happens in a synchronous SEGV handler, so in
2187 practice it seems to be ok. */
2188 mmap_lock();
2190 host_start = address & qemu_host_page_mask;
2191 page_index = host_start >> TARGET_PAGE_BITS;
2192 p1 = page_find(page_index);
2193 if (!p1) {
2194 mmap_unlock();
2195 return 0;
2197 host_end = host_start + qemu_host_page_size;
2198 p = p1;
2199 prot = 0;
2200 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2201 prot |= p->flags;
2202 p++;
2204 /* if the page was really writable, then we change its
2205 protection back to writable */
2206 if (prot & PAGE_WRITE_ORG) {
2207 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2208 if (!(p1[pindex].flags & PAGE_WRITE)) {
2209 mprotect((void *)g2h(host_start), qemu_host_page_size,
2210 (prot & PAGE_BITS) | PAGE_WRITE);
2211 p1[pindex].flags |= PAGE_WRITE;
2212 /* and since the content will be modified, we must invalidate
2213 the corresponding translated code. */
2214 tb_invalidate_phys_page(address, pc, puc);
2215 #ifdef DEBUG_TB_CHECK
2216 tb_invalidate_check(address);
2217 #endif
2218 mmap_unlock();
2219 return 1;
2222 mmap_unlock();
2223 return 0;
2226 static inline void tlb_set_dirty(CPUState *env,
2227 unsigned long addr, target_ulong vaddr)
2230 #endif /* defined(CONFIG_USER_ONLY) */
2232 #if !defined(CONFIG_USER_ONLY)
2234 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2235 ram_addr_t memory, ram_addr_t region_offset);
2236 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2237 ram_addr_t orig_memory, ram_addr_t region_offset);
2238 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2239 need_subpage) \
2240 do { \
2241 if (addr > start_addr) \
2242 start_addr2 = 0; \
2243 else { \
2244 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2245 if (start_addr2 > 0) \
2246 need_subpage = 1; \
2249 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2250 end_addr2 = TARGET_PAGE_SIZE - 1; \
2251 else { \
2252 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2253 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2254 need_subpage = 1; \
2256 } while (0)
2258 /* register physical memory. 'size' must be a multiple of the target
2259 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2260 io memory page. The address used when calling the IO function is
2261 the offset from the start of the region, plus region_offset. Both
2262 start_region and regon_offset are rounded down to a page boundary
2263 before calculating this offset. This should not be a problem unless
2264 the low bits of start_addr and region_offset differ. */
2265 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2266 ram_addr_t size,
2267 ram_addr_t phys_offset,
2268 ram_addr_t region_offset)
2270 target_phys_addr_t addr, end_addr;
2271 PhysPageDesc *p;
2272 CPUState *env;
2273 ram_addr_t orig_size = size;
2274 void *subpage;
2276 #ifdef USE_KQEMU
2277 /* XXX: should not depend on cpu context */
2278 env = first_cpu;
2279 if (env->kqemu_enabled) {
2280 kqemu_set_phys_mem(start_addr, size, phys_offset);
2282 #endif
2283 if (kvm_enabled())
2284 kvm_set_phys_mem(start_addr, size, phys_offset);
2286 if (phys_offset == IO_MEM_UNASSIGNED) {
2287 region_offset = start_addr;
2289 region_offset &= TARGET_PAGE_MASK;
2290 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2291 end_addr = start_addr + (target_phys_addr_t)size;
2292 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2293 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2294 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2295 ram_addr_t orig_memory = p->phys_offset;
2296 target_phys_addr_t start_addr2, end_addr2;
2297 int need_subpage = 0;
2299 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2300 need_subpage);
2301 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2302 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2303 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2304 &p->phys_offset, orig_memory,
2305 p->region_offset);
2306 } else {
2307 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2308 >> IO_MEM_SHIFT];
2310 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2311 region_offset);
2312 p->region_offset = 0;
2313 } else {
2314 p->phys_offset = phys_offset;
2315 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2316 (phys_offset & IO_MEM_ROMD))
2317 phys_offset += TARGET_PAGE_SIZE;
2319 } else {
2320 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2321 p->phys_offset = phys_offset;
2322 p->region_offset = region_offset;
2323 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2324 (phys_offset & IO_MEM_ROMD)) {
2325 phys_offset += TARGET_PAGE_SIZE;
2326 } else {
2327 target_phys_addr_t start_addr2, end_addr2;
2328 int need_subpage = 0;
2330 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2331 end_addr2, need_subpage);
2333 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2334 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2335 &p->phys_offset, IO_MEM_UNASSIGNED,
2336 addr & TARGET_PAGE_MASK);
2337 subpage_register(subpage, start_addr2, end_addr2,
2338 phys_offset, region_offset);
2339 p->region_offset = 0;
2343 region_offset += TARGET_PAGE_SIZE;
2346 /* since each CPU stores ram addresses in its TLB cache, we must
2347 reset the modified entries */
2348 /* XXX: slow ! */
2349 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2350 tlb_flush(env, 1);
2354 /* XXX: temporary until new memory mapping API */
2355 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2357 PhysPageDesc *p;
2359 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2360 if (!p)
2361 return IO_MEM_UNASSIGNED;
2362 return p->phys_offset;
2365 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2367 if (kvm_enabled())
2368 kvm_coalesce_mmio_region(addr, size);
2371 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2373 if (kvm_enabled())
2374 kvm_uncoalesce_mmio_region(addr, size);
2377 /* XXX: better than nothing */
2378 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2380 ram_addr_t addr;
2381 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2382 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2383 (uint64_t)size, (uint64_t)phys_ram_size);
2384 abort();
2386 addr = phys_ram_alloc_offset;
2387 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2388 return addr;
2391 void qemu_ram_free(ram_addr_t addr)
2395 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2397 #ifdef DEBUG_UNASSIGNED
2398 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2399 #endif
2400 #if defined(TARGET_SPARC)
2401 do_unassigned_access(addr, 0, 0, 0, 1);
2402 #endif
2403 return 0;
2406 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2408 #ifdef DEBUG_UNASSIGNED
2409 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2410 #endif
2411 #if defined(TARGET_SPARC)
2412 do_unassigned_access(addr, 0, 0, 0, 2);
2413 #endif
2414 return 0;
2417 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2419 #ifdef DEBUG_UNASSIGNED
2420 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2421 #endif
2422 #if defined(TARGET_SPARC)
2423 do_unassigned_access(addr, 0, 0, 0, 4);
2424 #endif
2425 return 0;
2428 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2430 #ifdef DEBUG_UNASSIGNED
2431 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2432 #endif
2433 #if defined(TARGET_SPARC)
2434 do_unassigned_access(addr, 1, 0, 0, 1);
2435 #endif
2438 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2440 #ifdef DEBUG_UNASSIGNED
2441 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2442 #endif
2443 #if defined(TARGET_SPARC)
2444 do_unassigned_access(addr, 1, 0, 0, 2);
2445 #endif
2448 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2450 #ifdef DEBUG_UNASSIGNED
2451 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2452 #endif
2453 #if defined(TARGET_SPARC)
2454 do_unassigned_access(addr, 1, 0, 0, 4);
2455 #endif
2458 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2459 unassigned_mem_readb,
2460 unassigned_mem_readw,
2461 unassigned_mem_readl,
2464 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2465 unassigned_mem_writeb,
2466 unassigned_mem_writew,
2467 unassigned_mem_writel,
2470 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2471 uint32_t val)
2473 int dirty_flags;
2474 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2475 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2476 #if !defined(CONFIG_USER_ONLY)
2477 tb_invalidate_phys_page_fast(ram_addr, 1);
2478 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2479 #endif
2481 stb_p(phys_ram_base + ram_addr, val);
2482 #ifdef USE_KQEMU
2483 if (cpu_single_env->kqemu_enabled &&
2484 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2485 kqemu_modify_page(cpu_single_env, ram_addr);
2486 #endif
2487 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2488 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2489 /* we remove the notdirty callback only if the code has been
2490 flushed */
2491 if (dirty_flags == 0xff)
2492 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2495 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2496 uint32_t val)
2498 int dirty_flags;
2499 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2500 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2501 #if !defined(CONFIG_USER_ONLY)
2502 tb_invalidate_phys_page_fast(ram_addr, 2);
2503 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2504 #endif
2506 stw_p(phys_ram_base + ram_addr, val);
2507 #ifdef USE_KQEMU
2508 if (cpu_single_env->kqemu_enabled &&
2509 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2510 kqemu_modify_page(cpu_single_env, ram_addr);
2511 #endif
2512 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2513 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2514 /* we remove the notdirty callback only if the code has been
2515 flushed */
2516 if (dirty_flags == 0xff)
2517 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2520 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2521 uint32_t val)
2523 int dirty_flags;
2524 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2525 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2526 #if !defined(CONFIG_USER_ONLY)
2527 tb_invalidate_phys_page_fast(ram_addr, 4);
2528 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2529 #endif
2531 stl_p(phys_ram_base + ram_addr, val);
2532 #ifdef USE_KQEMU
2533 if (cpu_single_env->kqemu_enabled &&
2534 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2535 kqemu_modify_page(cpu_single_env, ram_addr);
2536 #endif
2537 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2538 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2539 /* we remove the notdirty callback only if the code has been
2540 flushed */
2541 if (dirty_flags == 0xff)
2542 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2545 static CPUReadMemoryFunc *error_mem_read[3] = {
2546 NULL, /* never used */
2547 NULL, /* never used */
2548 NULL, /* never used */
2551 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2552 notdirty_mem_writeb,
2553 notdirty_mem_writew,
2554 notdirty_mem_writel,
2557 /* Generate a debug exception if a watchpoint has been hit. */
2558 static void check_watchpoint(int offset, int len_mask, int flags)
2560 CPUState *env = cpu_single_env;
2561 target_ulong pc, cs_base;
2562 TranslationBlock *tb;
2563 target_ulong vaddr;
2564 CPUWatchpoint *wp;
2565 int cpu_flags;
2567 if (env->watchpoint_hit) {
2568 /* We re-entered the check after replacing the TB. Now raise
2569 * the debug interrupt so that is will trigger after the
2570 * current instruction. */
2571 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2572 return;
2574 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2575 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2576 if ((vaddr == (wp->vaddr & len_mask) ||
2577 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2578 wp->flags |= BP_WATCHPOINT_HIT;
2579 if (!env->watchpoint_hit) {
2580 env->watchpoint_hit = wp;
2581 tb = tb_find_pc(env->mem_io_pc);
2582 if (!tb) {
2583 cpu_abort(env, "check_watchpoint: could not find TB for "
2584 "pc=%p", (void *)env->mem_io_pc);
2586 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2587 tb_phys_invalidate(tb, -1);
2588 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2589 env->exception_index = EXCP_DEBUG;
2590 } else {
2591 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2592 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2594 cpu_resume_from_signal(env, NULL);
2596 } else {
2597 wp->flags &= ~BP_WATCHPOINT_HIT;
2602 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2603 so these check for a hit then pass through to the normal out-of-line
2604 phys routines. */
2605 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2607 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2608 return ldub_phys(addr);
2611 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2613 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2614 return lduw_phys(addr);
2617 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2619 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2620 return ldl_phys(addr);
2623 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2624 uint32_t val)
2626 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2627 stb_phys(addr, val);
2630 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2631 uint32_t val)
2633 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2634 stw_phys(addr, val);
2637 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2638 uint32_t val)
2640 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2641 stl_phys(addr, val);
2644 static CPUReadMemoryFunc *watch_mem_read[3] = {
2645 watch_mem_readb,
2646 watch_mem_readw,
2647 watch_mem_readl,
2650 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2651 watch_mem_writeb,
2652 watch_mem_writew,
2653 watch_mem_writel,
2656 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2657 unsigned int len)
2659 uint32_t ret;
2660 unsigned int idx;
2662 idx = SUBPAGE_IDX(addr);
2663 #if defined(DEBUG_SUBPAGE)
2664 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2665 mmio, len, addr, idx);
2666 #endif
2667 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2668 addr + mmio->region_offset[idx][0][len]);
2670 return ret;
2673 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2674 uint32_t value, unsigned int len)
2676 unsigned int idx;
2678 idx = SUBPAGE_IDX(addr);
2679 #if defined(DEBUG_SUBPAGE)
2680 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2681 mmio, len, addr, idx, value);
2682 #endif
2683 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2684 addr + mmio->region_offset[idx][1][len],
2685 value);
2688 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2690 #if defined(DEBUG_SUBPAGE)
2691 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2692 #endif
2694 return subpage_readlen(opaque, addr, 0);
2697 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2698 uint32_t value)
2700 #if defined(DEBUG_SUBPAGE)
2701 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2702 #endif
2703 subpage_writelen(opaque, addr, value, 0);
2706 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2708 #if defined(DEBUG_SUBPAGE)
2709 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2710 #endif
2712 return subpage_readlen(opaque, addr, 1);
2715 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2716 uint32_t value)
2718 #if defined(DEBUG_SUBPAGE)
2719 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2720 #endif
2721 subpage_writelen(opaque, addr, value, 1);
2724 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2726 #if defined(DEBUG_SUBPAGE)
2727 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2728 #endif
2730 return subpage_readlen(opaque, addr, 2);
2733 static void subpage_writel (void *opaque,
2734 target_phys_addr_t addr, uint32_t value)
2736 #if defined(DEBUG_SUBPAGE)
2737 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2738 #endif
2739 subpage_writelen(opaque, addr, value, 2);
2742 static CPUReadMemoryFunc *subpage_read[] = {
2743 &subpage_readb,
2744 &subpage_readw,
2745 &subpage_readl,
2748 static CPUWriteMemoryFunc *subpage_write[] = {
2749 &subpage_writeb,
2750 &subpage_writew,
2751 &subpage_writel,
2754 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2755 ram_addr_t memory, ram_addr_t region_offset)
2757 int idx, eidx;
2758 unsigned int i;
2760 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2761 return -1;
2762 idx = SUBPAGE_IDX(start);
2763 eidx = SUBPAGE_IDX(end);
2764 #if defined(DEBUG_SUBPAGE)
2765 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2766 mmio, start, end, idx, eidx, memory);
2767 #endif
2768 memory >>= IO_MEM_SHIFT;
2769 for (; idx <= eidx; idx++) {
2770 for (i = 0; i < 4; i++) {
2771 if (io_mem_read[memory][i]) {
2772 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2773 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2774 mmio->region_offset[idx][0][i] = region_offset;
2776 if (io_mem_write[memory][i]) {
2777 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2778 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2779 mmio->region_offset[idx][1][i] = region_offset;
2784 return 0;
2787 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2788 ram_addr_t orig_memory, ram_addr_t region_offset)
2790 subpage_t *mmio;
2791 int subpage_memory;
2793 mmio = qemu_mallocz(sizeof(subpage_t));
2795 mmio->base = base;
2796 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2797 #if defined(DEBUG_SUBPAGE)
2798 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2799 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2800 #endif
2801 *phys = subpage_memory | IO_MEM_SUBPAGE;
2802 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2803 region_offset);
2805 return mmio;
2808 static int get_free_io_mem_idx(void)
2810 int i;
2812 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2813 if (!io_mem_used[i]) {
2814 io_mem_used[i] = 1;
2815 return i;
2818 return -1;
2821 static void io_mem_init(void)
2823 int i;
2825 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2826 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2827 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2828 for (i=0; i<5; i++)
2829 io_mem_used[i] = 1;
2831 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2832 watch_mem_write, NULL);
2833 /* alloc dirty bits array */
2834 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2835 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2838 /* mem_read and mem_write are arrays of functions containing the
2839 function to access byte (index 0), word (index 1) and dword (index
2840 2). Functions can be omitted with a NULL function pointer. The
2841 registered functions may be modified dynamically later.
2842 If io_index is non zero, the corresponding io zone is
2843 modified. If it is zero, a new io zone is allocated. The return
2844 value can be used with cpu_register_physical_memory(). (-1) is
2845 returned if error. */
2846 int cpu_register_io_memory(int io_index,
2847 CPUReadMemoryFunc **mem_read,
2848 CPUWriteMemoryFunc **mem_write,
2849 void *opaque)
2851 int i, subwidth = 0;
2853 if (io_index <= 0) {
2854 io_index = get_free_io_mem_idx();
2855 if (io_index == -1)
2856 return io_index;
2857 } else {
2858 if (io_index >= IO_MEM_NB_ENTRIES)
2859 return -1;
2862 for(i = 0;i < 3; i++) {
2863 if (!mem_read[i] || !mem_write[i])
2864 subwidth = IO_MEM_SUBWIDTH;
2865 io_mem_read[io_index][i] = mem_read[i];
2866 io_mem_write[io_index][i] = mem_write[i];
2868 io_mem_opaque[io_index] = opaque;
2869 return (io_index << IO_MEM_SHIFT) | subwidth;
2872 void cpu_unregister_io_memory(int io_table_address)
2874 int i;
2875 int io_index = io_table_address >> IO_MEM_SHIFT;
2877 for (i=0;i < 3; i++) {
2878 io_mem_read[io_index][i] = unassigned_mem_read[i];
2879 io_mem_write[io_index][i] = unassigned_mem_write[i];
2881 io_mem_opaque[io_index] = NULL;
2882 io_mem_used[io_index] = 0;
2885 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2887 return io_mem_write[io_index >> IO_MEM_SHIFT];
2890 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2892 return io_mem_read[io_index >> IO_MEM_SHIFT];
2895 #endif /* !defined(CONFIG_USER_ONLY) */
2897 /* physical memory access (slow version, mainly for debug) */
2898 #if defined(CONFIG_USER_ONLY)
2899 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2900 int len, int is_write)
2902 int l, flags;
2903 target_ulong page;
2904 void * p;
2906 while (len > 0) {
2907 page = addr & TARGET_PAGE_MASK;
2908 l = (page + TARGET_PAGE_SIZE) - addr;
2909 if (l > len)
2910 l = len;
2911 flags = page_get_flags(page);
2912 if (!(flags & PAGE_VALID))
2913 return;
2914 if (is_write) {
2915 if (!(flags & PAGE_WRITE))
2916 return;
2917 /* XXX: this code should not depend on lock_user */
2918 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2919 /* FIXME - should this return an error rather than just fail? */
2920 return;
2921 memcpy(p, buf, l);
2922 unlock_user(p, addr, l);
2923 } else {
2924 if (!(flags & PAGE_READ))
2925 return;
2926 /* XXX: this code should not depend on lock_user */
2927 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2928 /* FIXME - should this return an error rather than just fail? */
2929 return;
2930 memcpy(buf, p, l);
2931 unlock_user(p, addr, 0);
2933 len -= l;
2934 buf += l;
2935 addr += l;
2939 #else
2940 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2941 int len, int is_write)
2943 int l, io_index;
2944 uint8_t *ptr;
2945 uint32_t val;
2946 target_phys_addr_t page;
2947 unsigned long pd;
2948 PhysPageDesc *p;
2950 while (len > 0) {
2951 page = addr & TARGET_PAGE_MASK;
2952 l = (page + TARGET_PAGE_SIZE) - addr;
2953 if (l > len)
2954 l = len;
2955 p = phys_page_find(page >> TARGET_PAGE_BITS);
2956 if (!p) {
2957 pd = IO_MEM_UNASSIGNED;
2958 } else {
2959 pd = p->phys_offset;
2962 if (is_write) {
2963 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2964 target_phys_addr_t addr1 = addr;
2965 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2966 if (p)
2967 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2968 /* XXX: could force cpu_single_env to NULL to avoid
2969 potential bugs */
2970 if (l >= 4 && ((addr1 & 3) == 0)) {
2971 /* 32 bit write access */
2972 val = ldl_p(buf);
2973 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
2974 l = 4;
2975 } else if (l >= 2 && ((addr1 & 1) == 0)) {
2976 /* 16 bit write access */
2977 val = lduw_p(buf);
2978 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
2979 l = 2;
2980 } else {
2981 /* 8 bit write access */
2982 val = ldub_p(buf);
2983 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
2984 l = 1;
2986 } else {
2987 unsigned long addr1;
2988 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2989 /* RAM case */
2990 ptr = phys_ram_base + addr1;
2991 memcpy(ptr, buf, l);
2992 if (!cpu_physical_memory_is_dirty(addr1)) {
2993 /* invalidate code */
2994 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2995 /* set dirty bit */
2996 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2997 (0xff & ~CODE_DIRTY_FLAG);
3000 } else {
3001 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3002 !(pd & IO_MEM_ROMD)) {
3003 target_phys_addr_t addr1 = addr;
3004 /* I/O case */
3005 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3006 if (p)
3007 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3008 if (l >= 4 && ((addr1 & 3) == 0)) {
3009 /* 32 bit read access */
3010 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3011 stl_p(buf, val);
3012 l = 4;
3013 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3014 /* 16 bit read access */
3015 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3016 stw_p(buf, val);
3017 l = 2;
3018 } else {
3019 /* 8 bit read access */
3020 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3021 stb_p(buf, val);
3022 l = 1;
3024 } else {
3025 /* RAM case */
3026 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3027 (addr & ~TARGET_PAGE_MASK);
3028 memcpy(buf, ptr, l);
3031 len -= l;
3032 buf += l;
3033 addr += l;
3037 /* used for ROM loading : can write in RAM and ROM */
3038 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3039 const uint8_t *buf, int len)
3041 int l;
3042 uint8_t *ptr;
3043 target_phys_addr_t page;
3044 unsigned long pd;
3045 PhysPageDesc *p;
3047 while (len > 0) {
3048 page = addr & TARGET_PAGE_MASK;
3049 l = (page + TARGET_PAGE_SIZE) - addr;
3050 if (l > len)
3051 l = len;
3052 p = phys_page_find(page >> TARGET_PAGE_BITS);
3053 if (!p) {
3054 pd = IO_MEM_UNASSIGNED;
3055 } else {
3056 pd = p->phys_offset;
3059 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3060 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3061 !(pd & IO_MEM_ROMD)) {
3062 /* do nothing */
3063 } else {
3064 unsigned long addr1;
3065 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3066 /* ROM/RAM case */
3067 ptr = phys_ram_base + addr1;
3068 memcpy(ptr, buf, l);
3070 len -= l;
3071 buf += l;
3072 addr += l;
3076 typedef struct {
3077 void *buffer;
3078 target_phys_addr_t addr;
3079 target_phys_addr_t len;
3080 } BounceBuffer;
3082 static BounceBuffer bounce;
3084 typedef struct MapClient {
3085 void *opaque;
3086 void (*callback)(void *opaque);
3087 LIST_ENTRY(MapClient) link;
3088 } MapClient;
3090 static LIST_HEAD(map_client_list, MapClient) map_client_list
3091 = LIST_HEAD_INITIALIZER(map_client_list);
3093 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3095 MapClient *client = qemu_malloc(sizeof(*client));
3097 client->opaque = opaque;
3098 client->callback = callback;
3099 LIST_INSERT_HEAD(&map_client_list, client, link);
3100 return client;
3103 void cpu_unregister_map_client(void *_client)
3105 MapClient *client = (MapClient *)_client;
3107 LIST_REMOVE(client, link);
3110 static void cpu_notify_map_clients(void)
3112 MapClient *client;
3114 while (!LIST_EMPTY(&map_client_list)) {
3115 client = LIST_FIRST(&map_client_list);
3116 client->callback(client->opaque);
3117 LIST_REMOVE(client, link);
3121 /* Map a physical memory region into a host virtual address.
3122 * May map a subset of the requested range, given by and returned in *plen.
3123 * May return NULL if resources needed to perform the mapping are exhausted.
3124 * Use only for reads OR writes - not for read-modify-write operations.
3125 * Use cpu_register_map_client() to know when retrying the map operation is
3126 * likely to succeed.
3128 void *cpu_physical_memory_map(target_phys_addr_t addr,
3129 target_phys_addr_t *plen,
3130 int is_write)
3132 target_phys_addr_t len = *plen;
3133 target_phys_addr_t done = 0;
3134 int l;
3135 uint8_t *ret = NULL;
3136 uint8_t *ptr;
3137 target_phys_addr_t page;
3138 unsigned long pd;
3139 PhysPageDesc *p;
3140 unsigned long addr1;
3142 while (len > 0) {
3143 page = addr & TARGET_PAGE_MASK;
3144 l = (page + TARGET_PAGE_SIZE) - addr;
3145 if (l > len)
3146 l = len;
3147 p = phys_page_find(page >> TARGET_PAGE_BITS);
3148 if (!p) {
3149 pd = IO_MEM_UNASSIGNED;
3150 } else {
3151 pd = p->phys_offset;
3154 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3155 if (done || bounce.buffer) {
3156 break;
3158 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3159 bounce.addr = addr;
3160 bounce.len = l;
3161 if (!is_write) {
3162 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3164 ptr = bounce.buffer;
3165 } else {
3166 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3167 ptr = phys_ram_base + addr1;
3169 if (!done) {
3170 ret = ptr;
3171 } else if (ret + done != ptr) {
3172 break;
3175 len -= l;
3176 addr += l;
3177 done += l;
3179 *plen = done;
3180 return ret;
3183 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3184 * Will also mark the memory as dirty if is_write == 1. access_len gives
3185 * the amount of memory that was actually read or written by the caller.
3187 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3188 int is_write, target_phys_addr_t access_len)
3190 if (buffer != bounce.buffer) {
3191 if (is_write) {
3192 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3193 while (access_len) {
3194 unsigned l;
3195 l = TARGET_PAGE_SIZE;
3196 if (l > access_len)
3197 l = access_len;
3198 if (!cpu_physical_memory_is_dirty(addr1)) {
3199 /* invalidate code */
3200 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3201 /* set dirty bit */
3202 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3203 (0xff & ~CODE_DIRTY_FLAG);
3205 addr1 += l;
3206 access_len -= l;
3209 return;
3211 if (is_write) {
3212 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3214 qemu_free(bounce.buffer);
3215 bounce.buffer = NULL;
3216 cpu_notify_map_clients();
3219 /* warning: addr must be aligned */
3220 uint32_t ldl_phys(target_phys_addr_t addr)
3222 int io_index;
3223 uint8_t *ptr;
3224 uint32_t val;
3225 unsigned long pd;
3226 PhysPageDesc *p;
3228 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3229 if (!p) {
3230 pd = IO_MEM_UNASSIGNED;
3231 } else {
3232 pd = p->phys_offset;
3235 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3236 !(pd & IO_MEM_ROMD)) {
3237 /* I/O case */
3238 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3239 if (p)
3240 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3241 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3242 } else {
3243 /* RAM case */
3244 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3245 (addr & ~TARGET_PAGE_MASK);
3246 val = ldl_p(ptr);
3248 return val;
3251 /* warning: addr must be aligned */
3252 uint64_t ldq_phys(target_phys_addr_t addr)
3254 int io_index;
3255 uint8_t *ptr;
3256 uint64_t val;
3257 unsigned long pd;
3258 PhysPageDesc *p;
3260 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3261 if (!p) {
3262 pd = IO_MEM_UNASSIGNED;
3263 } else {
3264 pd = p->phys_offset;
3267 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3268 !(pd & IO_MEM_ROMD)) {
3269 /* I/O case */
3270 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3271 if (p)
3272 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3273 #ifdef TARGET_WORDS_BIGENDIAN
3274 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3275 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3276 #else
3277 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3278 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3279 #endif
3280 } else {
3281 /* RAM case */
3282 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3283 (addr & ~TARGET_PAGE_MASK);
3284 val = ldq_p(ptr);
3286 return val;
3289 /* XXX: optimize */
3290 uint32_t ldub_phys(target_phys_addr_t addr)
3292 uint8_t val;
3293 cpu_physical_memory_read(addr, &val, 1);
3294 return val;
3297 /* XXX: optimize */
3298 uint32_t lduw_phys(target_phys_addr_t addr)
3300 uint16_t val;
3301 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3302 return tswap16(val);
3305 /* warning: addr must be aligned. The ram page is not masked as dirty
3306 and the code inside is not invalidated. It is useful if the dirty
3307 bits are used to track modified PTEs */
3308 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3310 int io_index;
3311 uint8_t *ptr;
3312 unsigned long pd;
3313 PhysPageDesc *p;
3315 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3316 if (!p) {
3317 pd = IO_MEM_UNASSIGNED;
3318 } else {
3319 pd = p->phys_offset;
3322 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3323 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3324 if (p)
3325 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3326 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3327 } else {
3328 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3329 ptr = phys_ram_base + addr1;
3330 stl_p(ptr, val);
3332 if (unlikely(in_migration)) {
3333 if (!cpu_physical_memory_is_dirty(addr1)) {
3334 /* invalidate code */
3335 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3336 /* set dirty bit */
3337 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3338 (0xff & ~CODE_DIRTY_FLAG);
3344 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3346 int io_index;
3347 uint8_t *ptr;
3348 unsigned long pd;
3349 PhysPageDesc *p;
3351 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3352 if (!p) {
3353 pd = IO_MEM_UNASSIGNED;
3354 } else {
3355 pd = p->phys_offset;
3358 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3359 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3360 if (p)
3361 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3362 #ifdef TARGET_WORDS_BIGENDIAN
3363 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3364 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3365 #else
3366 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3367 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3368 #endif
3369 } else {
3370 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3371 (addr & ~TARGET_PAGE_MASK);
3372 stq_p(ptr, val);
3376 /* warning: addr must be aligned */
3377 void stl_phys(target_phys_addr_t addr, uint32_t val)
3379 int io_index;
3380 uint8_t *ptr;
3381 unsigned long pd;
3382 PhysPageDesc *p;
3384 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3385 if (!p) {
3386 pd = IO_MEM_UNASSIGNED;
3387 } else {
3388 pd = p->phys_offset;
3391 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3392 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3393 if (p)
3394 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3395 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3396 } else {
3397 unsigned long addr1;
3398 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3399 /* RAM case */
3400 ptr = phys_ram_base + addr1;
3401 stl_p(ptr, val);
3402 if (!cpu_physical_memory_is_dirty(addr1)) {
3403 /* invalidate code */
3404 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3405 /* set dirty bit */
3406 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3407 (0xff & ~CODE_DIRTY_FLAG);
3412 /* XXX: optimize */
3413 void stb_phys(target_phys_addr_t addr, uint32_t val)
3415 uint8_t v = val;
3416 cpu_physical_memory_write(addr, &v, 1);
3419 /* XXX: optimize */
3420 void stw_phys(target_phys_addr_t addr, uint32_t val)
3422 uint16_t v = tswap16(val);
3423 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3426 /* XXX: optimize */
3427 void stq_phys(target_phys_addr_t addr, uint64_t val)
3429 val = tswap64(val);
3430 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3433 #endif
3435 /* virtual memory access for debug */
3436 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3437 uint8_t *buf, int len, int is_write)
3439 int l;
3440 target_phys_addr_t phys_addr;
3441 target_ulong page;
3443 while (len > 0) {
3444 page = addr & TARGET_PAGE_MASK;
3445 phys_addr = cpu_get_phys_page_debug(env, page);
3446 /* if no physical page mapped, return an error */
3447 if (phys_addr == -1)
3448 return -1;
3449 l = (page + TARGET_PAGE_SIZE) - addr;
3450 if (l > len)
3451 l = len;
3452 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3453 buf, l, is_write);
3454 len -= l;
3455 buf += l;
3456 addr += l;
3458 return 0;
3461 /* in deterministic execution mode, instructions doing device I/Os
3462 must be at the end of the TB */
3463 void cpu_io_recompile(CPUState *env, void *retaddr)
3465 TranslationBlock *tb;
3466 uint32_t n, cflags;
3467 target_ulong pc, cs_base;
3468 uint64_t flags;
3470 tb = tb_find_pc((unsigned long)retaddr);
3471 if (!tb) {
3472 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3473 retaddr);
3475 n = env->icount_decr.u16.low + tb->icount;
3476 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3477 /* Calculate how many instructions had been executed before the fault
3478 occurred. */
3479 n = n - env->icount_decr.u16.low;
3480 /* Generate a new TB ending on the I/O insn. */
3481 n++;
3482 /* On MIPS and SH, delay slot instructions can only be restarted if
3483 they were already the first instruction in the TB. If this is not
3484 the first instruction in a TB then re-execute the preceding
3485 branch. */
3486 #if defined(TARGET_MIPS)
3487 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3488 env->active_tc.PC -= 4;
3489 env->icount_decr.u16.low++;
3490 env->hflags &= ~MIPS_HFLAG_BMASK;
3492 #elif defined(TARGET_SH4)
3493 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3494 && n > 1) {
3495 env->pc -= 2;
3496 env->icount_decr.u16.low++;
3497 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3499 #endif
3500 /* This should never happen. */
3501 if (n > CF_COUNT_MASK)
3502 cpu_abort(env, "TB too big during recompile");
3504 cflags = n | CF_LAST_IO;
3505 pc = tb->pc;
3506 cs_base = tb->cs_base;
3507 flags = tb->flags;
3508 tb_phys_invalidate(tb, -1);
3509 /* FIXME: In theory this could raise an exception. In practice
3510 we have already translated the block once so it's probably ok. */
3511 tb_gen_code(env, pc, cs_base, flags, cflags);
3512 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3513 the first in the TB) then we end up generating a whole new TB and
3514 repeating the fault, which is horribly inefficient.
3515 Better would be to execute just this insn uncached, or generate a
3516 second new TB. */
3517 cpu_resume_from_signal(env, NULL);
3520 void dump_exec_info(FILE *f,
3521 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3523 int i, target_code_size, max_target_code_size;
3524 int direct_jmp_count, direct_jmp2_count, cross_page;
3525 TranslationBlock *tb;
3527 target_code_size = 0;
3528 max_target_code_size = 0;
3529 cross_page = 0;
3530 direct_jmp_count = 0;
3531 direct_jmp2_count = 0;
3532 for(i = 0; i < nb_tbs; i++) {
3533 tb = &tbs[i];
3534 target_code_size += tb->size;
3535 if (tb->size > max_target_code_size)
3536 max_target_code_size = tb->size;
3537 if (tb->page_addr[1] != -1)
3538 cross_page++;
3539 if (tb->tb_next_offset[0] != 0xffff) {
3540 direct_jmp_count++;
3541 if (tb->tb_next_offset[1] != 0xffff) {
3542 direct_jmp2_count++;
3546 /* XXX: avoid using doubles ? */
3547 cpu_fprintf(f, "Translation buffer state:\n");
3548 cpu_fprintf(f, "gen code size %ld/%ld\n",
3549 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3550 cpu_fprintf(f, "TB count %d/%d\n",
3551 nb_tbs, code_gen_max_blocks);
3552 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3553 nb_tbs ? target_code_size / nb_tbs : 0,
3554 max_target_code_size);
3555 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3556 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3557 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3558 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3559 cross_page,
3560 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3561 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3562 direct_jmp_count,
3563 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3564 direct_jmp2_count,
3565 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3566 cpu_fprintf(f, "\nStatistics:\n");
3567 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3568 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3569 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3570 tcg_dump_info(f, cpu_fprintf);
3573 #if !defined(CONFIG_USER_ONLY)
3575 #define MMUSUFFIX _cmmu
3576 #define GETPC() NULL
3577 #define env cpu_single_env
3578 #define SOFTMMU_CODE_ACCESS
3580 #define SHIFT 0
3581 #include "softmmu_template.h"
3583 #define SHIFT 1
3584 #include "softmmu_template.h"
3586 #define SHIFT 2
3587 #include "softmmu_template.h"
3589 #define SHIFT 3
3590 #include "softmmu_template.h"
3592 #undef env
3594 #endif