make qemu_announce_self handle non contiguous net tables (Marcelo Tosatti)
[qemu-kvm/fedora.git] / exec.c
blob37468399fc16bcccedd761662034fd91cb104ec4
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #include "hw/hw.h"
41 #include "osdep.h"
42 #include "kvm.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #endif
47 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_FLUSH
49 //#define DEBUG_TLB
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
61 #undef DEBUG_TB_CHECK
62 #endif
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 #else
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #endif
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 static int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #else
102 #define code_gen_section \
103 __attribute__((aligned (32)))
104 #endif
106 uint8_t code_gen_prologue[1024] code_gen_section;
107 static uint8_t *code_gen_buffer;
108 static unsigned long code_gen_buffer_size;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size;
111 uint8_t *code_gen_ptr;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size;
115 int phys_ram_fd;
116 uint8_t *phys_ram_base;
117 uint8_t *phys_ram_dirty;
118 static int in_migration;
119 static ram_addr_t phys_ram_alloc_offset = 0;
120 #endif
122 CPUState *first_cpu;
123 /* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
125 CPUState *cpu_single_env;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
129 int use_icount = 0;
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132 int64_t qemu_icount;
134 typedef struct PageDesc {
135 /* list of TBs intersecting this ram page */
136 TranslationBlock *first_tb;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141 #if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143 #endif
144 } PageDesc;
146 typedef struct PhysPageDesc {
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset;
149 ram_addr_t region_offset;
150 } PhysPageDesc;
152 #define L2_BITS 10
153 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154 /* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
158 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159 #else
160 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
161 #endif
163 #define L1_SIZE (1 << L1_BITS)
164 #define L2_SIZE (1 << L2_BITS)
166 unsigned long qemu_real_host_page_size;
167 unsigned long qemu_host_page_bits;
168 unsigned long qemu_host_page_size;
169 unsigned long qemu_host_page_mask;
171 /* XXX: for system emulation, it could just be an array */
172 static PageDesc *l1_map[L1_SIZE];
173 static PhysPageDesc **l1_phys_map;
175 #if !defined(CONFIG_USER_ONLY)
176 static void io_mem_init(void);
178 /* io memory support */
179 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
181 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
182 char io_mem_used[IO_MEM_NB_ENTRIES];
183 static int io_mem_watch;
184 #endif
186 /* log support */
187 static const char *logfilename = "/tmp/qemu.log";
188 FILE *logfile;
189 int loglevel;
190 static int log_append = 0;
192 /* statistics */
193 static int tlb_flush_count;
194 static int tb_flush_count;
195 static int tb_phys_invalidate_count;
197 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198 typedef struct subpage_t {
199 target_phys_addr_t base;
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204 } subpage_t;
206 #ifdef _WIN32
207 static void map_exec(void *addr, long size)
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
214 #else
215 static void map_exec(void *addr, long size)
217 unsigned long start, end, page_size;
219 page_size = getpagesize();
220 start = (unsigned long)addr;
221 start &= ~(page_size - 1);
223 end = (unsigned long)addr + size;
224 end += page_size - 1;
225 end &= ~(page_size - 1);
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
230 #endif
232 static void page_init(void)
234 /* NOTE: we can always suppose that qemu_host_page_size >=
235 TARGET_PAGE_SIZE */
236 #ifdef _WIN32
238 SYSTEM_INFO system_info;
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
243 #else
244 qemu_real_host_page_size = getpagesize();
245 #endif
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
257 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
263 mmap_lock();
264 last_brk = (unsigned long)sbrk(0);
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
274 page_set_flags(startaddr & TARGET_PAGE_MASK,
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
278 } while (!feof(f));
279 fclose(f);
281 mmap_unlock();
283 #endif
286 static inline PageDesc **page_l1_map(target_ulong index)
288 #if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
292 return NULL;
293 #endif
294 return &l1_map[index >> L2_BITS];
297 static inline PageDesc *page_find_alloc(target_ulong index)
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
307 #if defined(CONFIG_USER_ONLY)
308 size_t len = sizeof(PageDesc) * L2_SIZE;
309 /* Don't use qemu_malloc because it may recurse. */
310 p = mmap(0, len, PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
312 *lp = p;
313 if (h2g_valid(p)) {
314 unsigned long addr = h2g(p);
315 page_set_flags(addr & TARGET_PAGE_MASK,
316 TARGET_PAGE_ALIGN(addr + len),
317 PAGE_RESERVED);
319 #else
320 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321 *lp = p;
322 #endif
324 return p + (index & (L2_SIZE - 1));
327 static inline PageDesc *page_find(target_ulong index)
329 PageDesc **lp, *p;
330 lp = page_l1_map(index);
331 if (!lp)
332 return NULL;
334 p = *lp;
335 if (!p)
336 return 0;
337 return p + (index & (L2_SIZE - 1));
340 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
342 void **lp, **p;
343 PhysPageDesc *pd;
345 p = (void **)l1_phys_map;
346 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
348 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350 #endif
351 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
352 p = *lp;
353 if (!p) {
354 /* allocate if not found */
355 if (!alloc)
356 return NULL;
357 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358 memset(p, 0, sizeof(void *) * L1_SIZE);
359 *lp = p;
361 #endif
362 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
363 pd = *lp;
364 if (!pd) {
365 int i;
366 /* allocate if not found */
367 if (!alloc)
368 return NULL;
369 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370 *lp = pd;
371 for (i = 0; i < L2_SIZE; i++) {
372 pd[i].phys_offset = IO_MEM_UNASSIGNED;
373 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
376 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
379 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
381 return phys_page_find_alloc(index, 0);
384 #if !defined(CONFIG_USER_ONLY)
385 static void tlb_protect_code(ram_addr_t ram_addr);
386 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
387 target_ulong vaddr);
388 #define mmap_lock() do { } while(0)
389 #define mmap_unlock() do { } while(0)
390 #endif
392 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
394 #if defined(CONFIG_USER_ONLY)
395 /* Currently it is not recommanded to allocate big chunks of data in
396 user mode. It will change when a dedicated libc will be used */
397 #define USE_STATIC_CODE_GEN_BUFFER
398 #endif
400 #ifdef USE_STATIC_CODE_GEN_BUFFER
401 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
402 #endif
404 static void code_gen_alloc(unsigned long tb_size)
406 #ifdef USE_STATIC_CODE_GEN_BUFFER
407 code_gen_buffer = static_code_gen_buffer;
408 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
409 map_exec(code_gen_buffer, code_gen_buffer_size);
410 #else
411 code_gen_buffer_size = tb_size;
412 if (code_gen_buffer_size == 0) {
413 #if defined(CONFIG_USER_ONLY)
414 /* in user mode, phys_ram_size is not meaningful */
415 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416 #else
417 /* XXX: needs ajustments */
418 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
419 #endif
421 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
422 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
423 /* The code gen buffer location may have constraints depending on
424 the host cpu and OS */
425 #if defined(__linux__)
427 int flags;
428 void *start = NULL;
430 flags = MAP_PRIVATE | MAP_ANONYMOUS;
431 #if defined(__x86_64__)
432 flags |= MAP_32BIT;
433 /* Cannot map more than that */
434 if (code_gen_buffer_size > (800 * 1024 * 1024))
435 code_gen_buffer_size = (800 * 1024 * 1024);
436 #elif defined(__sparc_v9__)
437 // Map the buffer below 2G, so we can use direct calls and branches
438 flags |= MAP_FIXED;
439 start = (void *) 0x60000000UL;
440 if (code_gen_buffer_size > (512 * 1024 * 1024))
441 code_gen_buffer_size = (512 * 1024 * 1024);
442 #elif defined(__arm__)
443 /* Map the buffer below 32M, so we can use direct calls and branches */
444 flags |= MAP_FIXED;
445 start = (void *) 0x01000000UL;
446 if (code_gen_buffer_size > 16 * 1024 * 1024)
447 code_gen_buffer_size = 16 * 1024 * 1024;
448 #endif
449 code_gen_buffer = mmap(start, code_gen_buffer_size,
450 PROT_WRITE | PROT_READ | PROT_EXEC,
451 flags, -1, 0);
452 if (code_gen_buffer == MAP_FAILED) {
453 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
454 exit(1);
457 #elif defined(__FreeBSD__)
459 int flags;
460 void *addr = NULL;
461 flags = MAP_PRIVATE | MAP_ANONYMOUS;
462 #if defined(__x86_64__)
463 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
464 * 0x40000000 is free */
465 flags |= MAP_FIXED;
466 addr = (void *)0x40000000;
467 /* Cannot map more than that */
468 if (code_gen_buffer_size > (800 * 1024 * 1024))
469 code_gen_buffer_size = (800 * 1024 * 1024);
470 #endif
471 code_gen_buffer = mmap(addr, code_gen_buffer_size,
472 PROT_WRITE | PROT_READ | PROT_EXEC,
473 flags, -1, 0);
474 if (code_gen_buffer == MAP_FAILED) {
475 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
476 exit(1);
479 #else
480 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482 #endif
483 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
484 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
485 code_gen_buffer_max_size = code_gen_buffer_size -
486 code_gen_max_block_size();
487 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
488 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
491 /* Must be called before using the QEMU cpus. 'tb_size' is the size
492 (in bytes) allocated to the translation buffer. Zero means default
493 size. */
494 void cpu_exec_init_all(unsigned long tb_size)
496 cpu_gen_init();
497 code_gen_alloc(tb_size);
498 code_gen_ptr = code_gen_buffer;
499 page_init();
500 #if !defined(CONFIG_USER_ONLY)
501 io_mem_init();
502 #endif
505 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
507 #define CPU_COMMON_SAVE_VERSION 1
509 static void cpu_common_save(QEMUFile *f, void *opaque)
511 CPUState *env = opaque;
513 qemu_put_be32s(f, &env->halted);
514 qemu_put_be32s(f, &env->interrupt_request);
517 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
519 CPUState *env = opaque;
521 if (version_id != CPU_COMMON_SAVE_VERSION)
522 return -EINVAL;
524 qemu_get_be32s(f, &env->halted);
525 qemu_get_be32s(f, &env->interrupt_request);
526 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
527 tlb_flush(env, 1);
529 return 0;
531 #endif
533 void cpu_exec_init(CPUState *env)
535 CPUState **penv;
536 int cpu_index;
538 env->next_cpu = NULL;
539 penv = &first_cpu;
540 cpu_index = 0;
541 while (*penv != NULL) {
542 penv = (CPUState **)&(*penv)->next_cpu;
543 cpu_index++;
545 env->cpu_index = cpu_index;
546 TAILQ_INIT(&env->breakpoints);
547 TAILQ_INIT(&env->watchpoints);
548 *penv = env;
549 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
550 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
551 cpu_common_save, cpu_common_load, env);
552 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
553 cpu_save, cpu_load, env);
554 #endif
557 static inline void invalidate_page_bitmap(PageDesc *p)
559 if (p->code_bitmap) {
560 qemu_free(p->code_bitmap);
561 p->code_bitmap = NULL;
563 p->code_write_count = 0;
566 /* set to NULL all the 'first_tb' fields in all PageDescs */
567 static void page_flush_tb(void)
569 int i, j;
570 PageDesc *p;
572 for(i = 0; i < L1_SIZE; i++) {
573 p = l1_map[i];
574 if (p) {
575 for(j = 0; j < L2_SIZE; j++) {
576 p->first_tb = NULL;
577 invalidate_page_bitmap(p);
578 p++;
584 /* flush all the translation blocks */
585 /* XXX: tb_flush is currently not thread safe */
586 void tb_flush(CPUState *env1)
588 CPUState *env;
589 #if defined(DEBUG_FLUSH)
590 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
591 (unsigned long)(code_gen_ptr - code_gen_buffer),
592 nb_tbs, nb_tbs > 0 ?
593 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
594 #endif
595 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
596 cpu_abort(env1, "Internal error: code buffer overflow\n");
598 nb_tbs = 0;
600 for(env = first_cpu; env != NULL; env = env->next_cpu) {
601 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
604 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
605 page_flush_tb();
607 code_gen_ptr = code_gen_buffer;
608 /* XXX: flush processor icache at this point if cache flush is
609 expensive */
610 tb_flush_count++;
613 #ifdef DEBUG_TB_CHECK
615 static void tb_invalidate_check(target_ulong address)
617 TranslationBlock *tb;
618 int i;
619 address &= TARGET_PAGE_MASK;
620 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
621 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
622 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
623 address >= tb->pc + tb->size)) {
624 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
625 address, (long)tb->pc, tb->size);
631 /* verify that all the pages have correct rights for code */
632 static void tb_page_check(void)
634 TranslationBlock *tb;
635 int i, flags1, flags2;
637 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
638 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
639 flags1 = page_get_flags(tb->pc);
640 flags2 = page_get_flags(tb->pc + tb->size - 1);
641 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
642 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
643 (long)tb->pc, tb->size, flags1, flags2);
649 static void tb_jmp_check(TranslationBlock *tb)
651 TranslationBlock *tb1;
652 unsigned int n1;
654 /* suppress any remaining jumps to this TB */
655 tb1 = tb->jmp_first;
656 for(;;) {
657 n1 = (long)tb1 & 3;
658 tb1 = (TranslationBlock *)((long)tb1 & ~3);
659 if (n1 == 2)
660 break;
661 tb1 = tb1->jmp_next[n1];
663 /* check end of list */
664 if (tb1 != tb) {
665 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
669 #endif
671 /* invalidate one TB */
672 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
673 int next_offset)
675 TranslationBlock *tb1;
676 for(;;) {
677 tb1 = *ptb;
678 if (tb1 == tb) {
679 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
680 break;
682 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
686 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
688 TranslationBlock *tb1;
689 unsigned int n1;
691 for(;;) {
692 tb1 = *ptb;
693 n1 = (long)tb1 & 3;
694 tb1 = (TranslationBlock *)((long)tb1 & ~3);
695 if (tb1 == tb) {
696 *ptb = tb1->page_next[n1];
697 break;
699 ptb = &tb1->page_next[n1];
703 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
705 TranslationBlock *tb1, **ptb;
706 unsigned int n1;
708 ptb = &tb->jmp_next[n];
709 tb1 = *ptb;
710 if (tb1) {
711 /* find tb(n) in circular list */
712 for(;;) {
713 tb1 = *ptb;
714 n1 = (long)tb1 & 3;
715 tb1 = (TranslationBlock *)((long)tb1 & ~3);
716 if (n1 == n && tb1 == tb)
717 break;
718 if (n1 == 2) {
719 ptb = &tb1->jmp_first;
720 } else {
721 ptb = &tb1->jmp_next[n1];
724 /* now we can suppress tb(n) from the list */
725 *ptb = tb->jmp_next[n];
727 tb->jmp_next[n] = NULL;
731 /* reset the jump entry 'n' of a TB so that it is not chained to
732 another TB */
733 static inline void tb_reset_jump(TranslationBlock *tb, int n)
735 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
738 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
740 CPUState *env;
741 PageDesc *p;
742 unsigned int h, n1;
743 target_phys_addr_t phys_pc;
744 TranslationBlock *tb1, *tb2;
746 /* remove the TB from the hash list */
747 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
748 h = tb_phys_hash_func(phys_pc);
749 tb_remove(&tb_phys_hash[h], tb,
750 offsetof(TranslationBlock, phys_hash_next));
752 /* remove the TB from the page list */
753 if (tb->page_addr[0] != page_addr) {
754 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
755 tb_page_remove(&p->first_tb, tb);
756 invalidate_page_bitmap(p);
758 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
759 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
760 tb_page_remove(&p->first_tb, tb);
761 invalidate_page_bitmap(p);
764 tb_invalidated_flag = 1;
766 /* remove the TB from the hash list */
767 h = tb_jmp_cache_hash_func(tb->pc);
768 for(env = first_cpu; env != NULL; env = env->next_cpu) {
769 if (env->tb_jmp_cache[h] == tb)
770 env->tb_jmp_cache[h] = NULL;
773 /* suppress this TB from the two jump lists */
774 tb_jmp_remove(tb, 0);
775 tb_jmp_remove(tb, 1);
777 /* suppress any remaining jumps to this TB */
778 tb1 = tb->jmp_first;
779 for(;;) {
780 n1 = (long)tb1 & 3;
781 if (n1 == 2)
782 break;
783 tb1 = (TranslationBlock *)((long)tb1 & ~3);
784 tb2 = tb1->jmp_next[n1];
785 tb_reset_jump(tb1, n1);
786 tb1->jmp_next[n1] = NULL;
787 tb1 = tb2;
789 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
791 tb_phys_invalidate_count++;
794 static inline void set_bits(uint8_t *tab, int start, int len)
796 int end, mask, end1;
798 end = start + len;
799 tab += start >> 3;
800 mask = 0xff << (start & 7);
801 if ((start & ~7) == (end & ~7)) {
802 if (start < end) {
803 mask &= ~(0xff << (end & 7));
804 *tab |= mask;
806 } else {
807 *tab++ |= mask;
808 start = (start + 8) & ~7;
809 end1 = end & ~7;
810 while (start < end1) {
811 *tab++ = 0xff;
812 start += 8;
814 if (start < end) {
815 mask = ~(0xff << (end & 7));
816 *tab |= mask;
821 static void build_page_bitmap(PageDesc *p)
823 int n, tb_start, tb_end;
824 TranslationBlock *tb;
826 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
828 tb = p->first_tb;
829 while (tb != NULL) {
830 n = (long)tb & 3;
831 tb = (TranslationBlock *)((long)tb & ~3);
832 /* NOTE: this is subtle as a TB may span two physical pages */
833 if (n == 0) {
834 /* NOTE: tb_end may be after the end of the page, but
835 it is not a problem */
836 tb_start = tb->pc & ~TARGET_PAGE_MASK;
837 tb_end = tb_start + tb->size;
838 if (tb_end > TARGET_PAGE_SIZE)
839 tb_end = TARGET_PAGE_SIZE;
840 } else {
841 tb_start = 0;
842 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
844 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
845 tb = tb->page_next[n];
849 TranslationBlock *tb_gen_code(CPUState *env,
850 target_ulong pc, target_ulong cs_base,
851 int flags, int cflags)
853 TranslationBlock *tb;
854 uint8_t *tc_ptr;
855 target_ulong phys_pc, phys_page2, virt_page2;
856 int code_gen_size;
858 phys_pc = get_phys_addr_code(env, pc);
859 tb = tb_alloc(pc);
860 if (!tb) {
861 /* flush must be done */
862 tb_flush(env);
863 /* cannot fail at this point */
864 tb = tb_alloc(pc);
865 /* Don't forget to invalidate previous TB info. */
866 tb_invalidated_flag = 1;
868 tc_ptr = code_gen_ptr;
869 tb->tc_ptr = tc_ptr;
870 tb->cs_base = cs_base;
871 tb->flags = flags;
872 tb->cflags = cflags;
873 cpu_gen_code(env, tb, &code_gen_size);
874 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
876 /* check next page if needed */
877 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
878 phys_page2 = -1;
879 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
880 phys_page2 = get_phys_addr_code(env, virt_page2);
882 tb_link_phys(tb, phys_pc, phys_page2);
883 return tb;
886 /* invalidate all TBs which intersect with the target physical page
887 starting in range [start;end[. NOTE: start and end must refer to
888 the same physical page. 'is_cpu_write_access' should be true if called
889 from a real cpu write access: the virtual CPU will exit the current
890 TB if code is modified inside this TB. */
891 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
892 int is_cpu_write_access)
894 TranslationBlock *tb, *tb_next, *saved_tb;
895 CPUState *env = cpu_single_env;
896 target_ulong tb_start, tb_end;
897 PageDesc *p;
898 int n;
899 #ifdef TARGET_HAS_PRECISE_SMC
900 int current_tb_not_found = is_cpu_write_access;
901 TranslationBlock *current_tb = NULL;
902 int current_tb_modified = 0;
903 target_ulong current_pc = 0;
904 target_ulong current_cs_base = 0;
905 int current_flags = 0;
906 #endif /* TARGET_HAS_PRECISE_SMC */
908 p = page_find(start >> TARGET_PAGE_BITS);
909 if (!p)
910 return;
911 if (!p->code_bitmap &&
912 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
913 is_cpu_write_access) {
914 /* build code bitmap */
915 build_page_bitmap(p);
918 /* we remove all the TBs in the range [start, end[ */
919 /* XXX: see if in some cases it could be faster to invalidate all the code */
920 tb = p->first_tb;
921 while (tb != NULL) {
922 n = (long)tb & 3;
923 tb = (TranslationBlock *)((long)tb & ~3);
924 tb_next = tb->page_next[n];
925 /* NOTE: this is subtle as a TB may span two physical pages */
926 if (n == 0) {
927 /* NOTE: tb_end may be after the end of the page, but
928 it is not a problem */
929 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
930 tb_end = tb_start + tb->size;
931 } else {
932 tb_start = tb->page_addr[1];
933 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
935 if (!(tb_end <= start || tb_start >= end)) {
936 #ifdef TARGET_HAS_PRECISE_SMC
937 if (current_tb_not_found) {
938 current_tb_not_found = 0;
939 current_tb = NULL;
940 if (env->mem_io_pc) {
941 /* now we have a real cpu fault */
942 current_tb = tb_find_pc(env->mem_io_pc);
945 if (current_tb == tb &&
946 (current_tb->cflags & CF_COUNT_MASK) != 1) {
947 /* If we are modifying the current TB, we must stop
948 its execution. We could be more precise by checking
949 that the modification is after the current PC, but it
950 would require a specialized function to partially
951 restore the CPU state */
953 current_tb_modified = 1;
954 cpu_restore_state(current_tb, env,
955 env->mem_io_pc, NULL);
956 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
957 &current_flags);
959 #endif /* TARGET_HAS_PRECISE_SMC */
960 /* we need to do that to handle the case where a signal
961 occurs while doing tb_phys_invalidate() */
962 saved_tb = NULL;
963 if (env) {
964 saved_tb = env->current_tb;
965 env->current_tb = NULL;
967 tb_phys_invalidate(tb, -1);
968 if (env) {
969 env->current_tb = saved_tb;
970 if (env->interrupt_request && env->current_tb)
971 cpu_interrupt(env, env->interrupt_request);
974 tb = tb_next;
976 #if !defined(CONFIG_USER_ONLY)
977 /* if no code remaining, no need to continue to use slow writes */
978 if (!p->first_tb) {
979 invalidate_page_bitmap(p);
980 if (is_cpu_write_access) {
981 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
984 #endif
985 #ifdef TARGET_HAS_PRECISE_SMC
986 if (current_tb_modified) {
987 /* we generate a block containing just the instruction
988 modifying the memory. It will ensure that it cannot modify
989 itself */
990 env->current_tb = NULL;
991 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
992 cpu_resume_from_signal(env, NULL);
994 #endif
997 /* len must be <= 8 and start must be a multiple of len */
998 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1000 PageDesc *p;
1001 int offset, b;
1002 #if 0
1003 if (1) {
1004 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1005 cpu_single_env->mem_io_vaddr, len,
1006 cpu_single_env->eip,
1007 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1009 #endif
1010 p = page_find(start >> TARGET_PAGE_BITS);
1011 if (!p)
1012 return;
1013 if (p->code_bitmap) {
1014 offset = start & ~TARGET_PAGE_MASK;
1015 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1016 if (b & ((1 << len) - 1))
1017 goto do_invalidate;
1018 } else {
1019 do_invalidate:
1020 tb_invalidate_phys_page_range(start, start + len, 1);
1024 #if !defined(CONFIG_SOFTMMU)
1025 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1026 unsigned long pc, void *puc)
1028 TranslationBlock *tb;
1029 PageDesc *p;
1030 int n;
1031 #ifdef TARGET_HAS_PRECISE_SMC
1032 TranslationBlock *current_tb = NULL;
1033 CPUState *env = cpu_single_env;
1034 int current_tb_modified = 0;
1035 target_ulong current_pc = 0;
1036 target_ulong current_cs_base = 0;
1037 int current_flags = 0;
1038 #endif
1040 addr &= TARGET_PAGE_MASK;
1041 p = page_find(addr >> TARGET_PAGE_BITS);
1042 if (!p)
1043 return;
1044 tb = p->first_tb;
1045 #ifdef TARGET_HAS_PRECISE_SMC
1046 if (tb && pc != 0) {
1047 current_tb = tb_find_pc(pc);
1049 #endif
1050 while (tb != NULL) {
1051 n = (long)tb & 3;
1052 tb = (TranslationBlock *)((long)tb & ~3);
1053 #ifdef TARGET_HAS_PRECISE_SMC
1054 if (current_tb == tb &&
1055 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1056 /* If we are modifying the current TB, we must stop
1057 its execution. We could be more precise by checking
1058 that the modification is after the current PC, but it
1059 would require a specialized function to partially
1060 restore the CPU state */
1062 current_tb_modified = 1;
1063 cpu_restore_state(current_tb, env, pc, puc);
1064 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1065 &current_flags);
1067 #endif /* TARGET_HAS_PRECISE_SMC */
1068 tb_phys_invalidate(tb, addr);
1069 tb = tb->page_next[n];
1071 p->first_tb = NULL;
1072 #ifdef TARGET_HAS_PRECISE_SMC
1073 if (current_tb_modified) {
1074 /* we generate a block containing just the instruction
1075 modifying the memory. It will ensure that it cannot modify
1076 itself */
1077 env->current_tb = NULL;
1078 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1079 cpu_resume_from_signal(env, puc);
1081 #endif
1083 #endif
1085 /* add the tb in the target page and protect it if necessary */
1086 static inline void tb_alloc_page(TranslationBlock *tb,
1087 unsigned int n, target_ulong page_addr)
1089 PageDesc *p;
1090 TranslationBlock *last_first_tb;
1092 tb->page_addr[n] = page_addr;
1093 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1094 tb->page_next[n] = p->first_tb;
1095 last_first_tb = p->first_tb;
1096 p->first_tb = (TranslationBlock *)((long)tb | n);
1097 invalidate_page_bitmap(p);
1099 #if defined(TARGET_HAS_SMC) || 1
1101 #if defined(CONFIG_USER_ONLY)
1102 if (p->flags & PAGE_WRITE) {
1103 target_ulong addr;
1104 PageDesc *p2;
1105 int prot;
1107 /* force the host page as non writable (writes will have a
1108 page fault + mprotect overhead) */
1109 page_addr &= qemu_host_page_mask;
1110 prot = 0;
1111 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1112 addr += TARGET_PAGE_SIZE) {
1114 p2 = page_find (addr >> TARGET_PAGE_BITS);
1115 if (!p2)
1116 continue;
1117 prot |= p2->flags;
1118 p2->flags &= ~PAGE_WRITE;
1119 page_get_flags(addr);
1121 mprotect(g2h(page_addr), qemu_host_page_size,
1122 (prot & PAGE_BITS) & ~PAGE_WRITE);
1123 #ifdef DEBUG_TB_INVALIDATE
1124 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1125 page_addr);
1126 #endif
1128 #else
1129 /* if some code is already present, then the pages are already
1130 protected. So we handle the case where only the first TB is
1131 allocated in a physical page */
1132 if (!last_first_tb) {
1133 tlb_protect_code(page_addr);
1135 #endif
1137 #endif /* TARGET_HAS_SMC */
1140 /* Allocate a new translation block. Flush the translation buffer if
1141 too many translation blocks or too much generated code. */
1142 TranslationBlock *tb_alloc(target_ulong pc)
1144 TranslationBlock *tb;
1146 if (nb_tbs >= code_gen_max_blocks ||
1147 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1148 return NULL;
1149 tb = &tbs[nb_tbs++];
1150 tb->pc = pc;
1151 tb->cflags = 0;
1152 return tb;
1155 void tb_free(TranslationBlock *tb)
1157 /* In practice this is mostly used for single use temporary TB
1158 Ignore the hard cases and just back up if this TB happens to
1159 be the last one generated. */
1160 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1161 code_gen_ptr = tb->tc_ptr;
1162 nb_tbs--;
1166 /* add a new TB and link it to the physical page tables. phys_page2 is
1167 (-1) to indicate that only one page contains the TB. */
1168 void tb_link_phys(TranslationBlock *tb,
1169 target_ulong phys_pc, target_ulong phys_page2)
1171 unsigned int h;
1172 TranslationBlock **ptb;
1174 /* Grab the mmap lock to stop another thread invalidating this TB
1175 before we are done. */
1176 mmap_lock();
1177 /* add in the physical hash table */
1178 h = tb_phys_hash_func(phys_pc);
1179 ptb = &tb_phys_hash[h];
1180 tb->phys_hash_next = *ptb;
1181 *ptb = tb;
1183 /* add in the page list */
1184 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1185 if (phys_page2 != -1)
1186 tb_alloc_page(tb, 1, phys_page2);
1187 else
1188 tb->page_addr[1] = -1;
1190 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1191 tb->jmp_next[0] = NULL;
1192 tb->jmp_next[1] = NULL;
1194 /* init original jump addresses */
1195 if (tb->tb_next_offset[0] != 0xffff)
1196 tb_reset_jump(tb, 0);
1197 if (tb->tb_next_offset[1] != 0xffff)
1198 tb_reset_jump(tb, 1);
1200 #ifdef DEBUG_TB_CHECK
1201 tb_page_check();
1202 #endif
1203 mmap_unlock();
1206 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1207 tb[1].tc_ptr. Return NULL if not found */
1208 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1210 int m_min, m_max, m;
1211 unsigned long v;
1212 TranslationBlock *tb;
1214 if (nb_tbs <= 0)
1215 return NULL;
1216 if (tc_ptr < (unsigned long)code_gen_buffer ||
1217 tc_ptr >= (unsigned long)code_gen_ptr)
1218 return NULL;
1219 /* binary search (cf Knuth) */
1220 m_min = 0;
1221 m_max = nb_tbs - 1;
1222 while (m_min <= m_max) {
1223 m = (m_min + m_max) >> 1;
1224 tb = &tbs[m];
1225 v = (unsigned long)tb->tc_ptr;
1226 if (v == tc_ptr)
1227 return tb;
1228 else if (tc_ptr < v) {
1229 m_max = m - 1;
1230 } else {
1231 m_min = m + 1;
1234 return &tbs[m_max];
1237 static void tb_reset_jump_recursive(TranslationBlock *tb);
1239 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1241 TranslationBlock *tb1, *tb_next, **ptb;
1242 unsigned int n1;
1244 tb1 = tb->jmp_next[n];
1245 if (tb1 != NULL) {
1246 /* find head of list */
1247 for(;;) {
1248 n1 = (long)tb1 & 3;
1249 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1250 if (n1 == 2)
1251 break;
1252 tb1 = tb1->jmp_next[n1];
1254 /* we are now sure now that tb jumps to tb1 */
1255 tb_next = tb1;
1257 /* remove tb from the jmp_first list */
1258 ptb = &tb_next->jmp_first;
1259 for(;;) {
1260 tb1 = *ptb;
1261 n1 = (long)tb1 & 3;
1262 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1263 if (n1 == n && tb1 == tb)
1264 break;
1265 ptb = &tb1->jmp_next[n1];
1267 *ptb = tb->jmp_next[n];
1268 tb->jmp_next[n] = NULL;
1270 /* suppress the jump to next tb in generated code */
1271 tb_reset_jump(tb, n);
1273 /* suppress jumps in the tb on which we could have jumped */
1274 tb_reset_jump_recursive(tb_next);
1278 static void tb_reset_jump_recursive(TranslationBlock *tb)
1280 tb_reset_jump_recursive2(tb, 0);
1281 tb_reset_jump_recursive2(tb, 1);
1284 #if defined(TARGET_HAS_ICE)
1285 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1287 target_phys_addr_t addr;
1288 target_ulong pd;
1289 ram_addr_t ram_addr;
1290 PhysPageDesc *p;
1292 addr = cpu_get_phys_page_debug(env, pc);
1293 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1294 if (!p) {
1295 pd = IO_MEM_UNASSIGNED;
1296 } else {
1297 pd = p->phys_offset;
1299 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1300 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1302 #endif
1304 /* Add a watchpoint. */
1305 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1306 int flags, CPUWatchpoint **watchpoint)
1308 target_ulong len_mask = ~(len - 1);
1309 CPUWatchpoint *wp;
1311 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1312 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1313 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1314 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1315 return -EINVAL;
1317 wp = qemu_malloc(sizeof(*wp));
1319 wp->vaddr = addr;
1320 wp->len_mask = len_mask;
1321 wp->flags = flags;
1323 /* keep all GDB-injected watchpoints in front */
1324 if (flags & BP_GDB)
1325 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1326 else
1327 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1329 tlb_flush_page(env, addr);
1331 if (watchpoint)
1332 *watchpoint = wp;
1333 return 0;
1336 /* Remove a specific watchpoint. */
1337 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1338 int flags)
1340 target_ulong len_mask = ~(len - 1);
1341 CPUWatchpoint *wp;
1343 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1344 if (addr == wp->vaddr && len_mask == wp->len_mask
1345 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1346 cpu_watchpoint_remove_by_ref(env, wp);
1347 return 0;
1350 return -ENOENT;
1353 /* Remove a specific watchpoint by reference. */
1354 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1356 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1358 tlb_flush_page(env, watchpoint->vaddr);
1360 qemu_free(watchpoint);
1363 /* Remove all matching watchpoints. */
1364 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1366 CPUWatchpoint *wp, *next;
1368 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1369 if (wp->flags & mask)
1370 cpu_watchpoint_remove_by_ref(env, wp);
1374 /* Add a breakpoint. */
1375 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1376 CPUBreakpoint **breakpoint)
1378 #if defined(TARGET_HAS_ICE)
1379 CPUBreakpoint *bp;
1381 bp = qemu_malloc(sizeof(*bp));
1383 bp->pc = pc;
1384 bp->flags = flags;
1386 /* keep all GDB-injected breakpoints in front */
1387 if (flags & BP_GDB)
1388 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1389 else
1390 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1392 breakpoint_invalidate(env, pc);
1394 if (breakpoint)
1395 *breakpoint = bp;
1396 return 0;
1397 #else
1398 return -ENOSYS;
1399 #endif
1402 /* Remove a specific breakpoint. */
1403 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1405 #if defined(TARGET_HAS_ICE)
1406 CPUBreakpoint *bp;
1408 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1409 if (bp->pc == pc && bp->flags == flags) {
1410 cpu_breakpoint_remove_by_ref(env, bp);
1411 return 0;
1414 return -ENOENT;
1415 #else
1416 return -ENOSYS;
1417 #endif
1420 /* Remove a specific breakpoint by reference. */
1421 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1423 #if defined(TARGET_HAS_ICE)
1424 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1426 breakpoint_invalidate(env, breakpoint->pc);
1428 qemu_free(breakpoint);
1429 #endif
1432 /* Remove all matching breakpoints. */
1433 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1435 #if defined(TARGET_HAS_ICE)
1436 CPUBreakpoint *bp, *next;
1438 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1439 if (bp->flags & mask)
1440 cpu_breakpoint_remove_by_ref(env, bp);
1442 #endif
1445 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1446 CPU loop after each instruction */
1447 void cpu_single_step(CPUState *env, int enabled)
1449 #if defined(TARGET_HAS_ICE)
1450 if (env->singlestep_enabled != enabled) {
1451 env->singlestep_enabled = enabled;
1452 /* must flush all the translated code to avoid inconsistancies */
1453 /* XXX: only flush what is necessary */
1454 tb_flush(env);
1456 #endif
1459 /* enable or disable low levels log */
1460 void cpu_set_log(int log_flags)
1462 loglevel = log_flags;
1463 if (loglevel && !logfile) {
1464 logfile = fopen(logfilename, log_append ? "a" : "w");
1465 if (!logfile) {
1466 perror(logfilename);
1467 _exit(1);
1469 #if !defined(CONFIG_SOFTMMU)
1470 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1472 static char logfile_buf[4096];
1473 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1475 #else
1476 setvbuf(logfile, NULL, _IOLBF, 0);
1477 #endif
1478 log_append = 1;
1480 if (!loglevel && logfile) {
1481 fclose(logfile);
1482 logfile = NULL;
1486 void cpu_set_log_filename(const char *filename)
1488 logfilename = strdup(filename);
1489 if (logfile) {
1490 fclose(logfile);
1491 logfile = NULL;
1493 cpu_set_log(loglevel);
1496 /* mask must never be zero, except for A20 change call */
1497 void cpu_interrupt(CPUState *env, int mask)
1499 #if !defined(USE_NPTL)
1500 TranslationBlock *tb;
1501 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1502 #endif
1503 int old_mask;
1505 if (mask & CPU_INTERRUPT_EXIT) {
1506 env->exit_request = 1;
1507 mask &= ~CPU_INTERRUPT_EXIT;
1510 old_mask = env->interrupt_request;
1511 env->interrupt_request |= mask;
1512 #if defined(USE_NPTL)
1513 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1514 problem and hope the cpu will stop of its own accord. For userspace
1515 emulation this often isn't actually as bad as it sounds. Often
1516 signals are used primarily to interrupt blocking syscalls. */
1517 #else
1518 if (use_icount) {
1519 env->icount_decr.u16.high = 0xffff;
1520 #ifndef CONFIG_USER_ONLY
1521 if (!can_do_io(env)
1522 && (mask & ~old_mask) != 0) {
1523 cpu_abort(env, "Raised interrupt while not in I/O function");
1525 #endif
1526 } else {
1527 tb = env->current_tb;
1528 /* if the cpu is currently executing code, we must unlink it and
1529 all the potentially executing TB */
1530 if (tb && !testandset(&interrupt_lock)) {
1531 env->current_tb = NULL;
1532 tb_reset_jump_recursive(tb);
1533 resetlock(&interrupt_lock);
1536 #endif
1539 void cpu_reset_interrupt(CPUState *env, int mask)
1541 env->interrupt_request &= ~mask;
1544 const CPULogItem cpu_log_items[] = {
1545 { CPU_LOG_TB_OUT_ASM, "out_asm",
1546 "show generated host assembly code for each compiled TB" },
1547 { CPU_LOG_TB_IN_ASM, "in_asm",
1548 "show target assembly code for each compiled TB" },
1549 { CPU_LOG_TB_OP, "op",
1550 "show micro ops for each compiled TB" },
1551 { CPU_LOG_TB_OP_OPT, "op_opt",
1552 "show micro ops "
1553 #ifdef TARGET_I386
1554 "before eflags optimization and "
1555 #endif
1556 "after liveness analysis" },
1557 { CPU_LOG_INT, "int",
1558 "show interrupts/exceptions in short format" },
1559 { CPU_LOG_EXEC, "exec",
1560 "show trace before each executed TB (lots of logs)" },
1561 { CPU_LOG_TB_CPU, "cpu",
1562 "show CPU state before block translation" },
1563 #ifdef TARGET_I386
1564 { CPU_LOG_PCALL, "pcall",
1565 "show protected mode far calls/returns/exceptions" },
1566 { CPU_LOG_RESET, "cpu_reset",
1567 "show CPU state before CPU resets" },
1568 #endif
1569 #ifdef DEBUG_IOPORT
1570 { CPU_LOG_IOPORT, "ioport",
1571 "show all i/o ports accesses" },
1572 #endif
1573 { 0, NULL, NULL },
1576 static int cmp1(const char *s1, int n, const char *s2)
1578 if (strlen(s2) != n)
1579 return 0;
1580 return memcmp(s1, s2, n) == 0;
1583 /* takes a comma separated list of log masks. Return 0 if error. */
1584 int cpu_str_to_log_mask(const char *str)
1586 const CPULogItem *item;
1587 int mask;
1588 const char *p, *p1;
1590 p = str;
1591 mask = 0;
1592 for(;;) {
1593 p1 = strchr(p, ',');
1594 if (!p1)
1595 p1 = p + strlen(p);
1596 if(cmp1(p,p1-p,"all")) {
1597 for(item = cpu_log_items; item->mask != 0; item++) {
1598 mask |= item->mask;
1600 } else {
1601 for(item = cpu_log_items; item->mask != 0; item++) {
1602 if (cmp1(p, p1 - p, item->name))
1603 goto found;
1605 return 0;
1607 found:
1608 mask |= item->mask;
1609 if (*p1 != ',')
1610 break;
1611 p = p1 + 1;
1613 return mask;
1616 void cpu_abort(CPUState *env, const char *fmt, ...)
1618 va_list ap;
1619 va_list ap2;
1621 va_start(ap, fmt);
1622 va_copy(ap2, ap);
1623 fprintf(stderr, "qemu: fatal: ");
1624 vfprintf(stderr, fmt, ap);
1625 fprintf(stderr, "\n");
1626 #ifdef TARGET_I386
1627 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1628 #else
1629 cpu_dump_state(env, stderr, fprintf, 0);
1630 #endif
1631 if (qemu_log_enabled()) {
1632 qemu_log("qemu: fatal: ");
1633 qemu_log_vprintf(fmt, ap2);
1634 qemu_log("\n");
1635 #ifdef TARGET_I386
1636 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1637 #else
1638 log_cpu_state(env, 0);
1639 #endif
1640 qemu_log_flush();
1641 qemu_log_close();
1643 va_end(ap2);
1644 va_end(ap);
1645 abort();
1648 CPUState *cpu_copy(CPUState *env)
1650 CPUState *new_env = cpu_init(env->cpu_model_str);
1651 CPUState *next_cpu = new_env->next_cpu;
1652 int cpu_index = new_env->cpu_index;
1653 #if defined(TARGET_HAS_ICE)
1654 CPUBreakpoint *bp;
1655 CPUWatchpoint *wp;
1656 #endif
1658 memcpy(new_env, env, sizeof(CPUState));
1660 /* Preserve chaining and index. */
1661 new_env->next_cpu = next_cpu;
1662 new_env->cpu_index = cpu_index;
1664 /* Clone all break/watchpoints.
1665 Note: Once we support ptrace with hw-debug register access, make sure
1666 BP_CPU break/watchpoints are handled correctly on clone. */
1667 TAILQ_INIT(&env->breakpoints);
1668 TAILQ_INIT(&env->watchpoints);
1669 #if defined(TARGET_HAS_ICE)
1670 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1671 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1673 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1674 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1675 wp->flags, NULL);
1677 #endif
1679 return new_env;
1682 #if !defined(CONFIG_USER_ONLY)
1684 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1686 unsigned int i;
1688 /* Discard jump cache entries for any tb which might potentially
1689 overlap the flushed page. */
1690 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1691 memset (&env->tb_jmp_cache[i], 0,
1692 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1694 i = tb_jmp_cache_hash_page(addr);
1695 memset (&env->tb_jmp_cache[i], 0,
1696 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1699 /* NOTE: if flush_global is true, also flush global entries (not
1700 implemented yet) */
1701 void tlb_flush(CPUState *env, int flush_global)
1703 int i;
1705 #if defined(DEBUG_TLB)
1706 printf("tlb_flush:\n");
1707 #endif
1708 /* must reset current TB so that interrupts cannot modify the
1709 links while we are modifying them */
1710 env->current_tb = NULL;
1712 for(i = 0; i < CPU_TLB_SIZE; i++) {
1713 env->tlb_table[0][i].addr_read = -1;
1714 env->tlb_table[0][i].addr_write = -1;
1715 env->tlb_table[0][i].addr_code = -1;
1716 env->tlb_table[1][i].addr_read = -1;
1717 env->tlb_table[1][i].addr_write = -1;
1718 env->tlb_table[1][i].addr_code = -1;
1719 #if (NB_MMU_MODES >= 3)
1720 env->tlb_table[2][i].addr_read = -1;
1721 env->tlb_table[2][i].addr_write = -1;
1722 env->tlb_table[2][i].addr_code = -1;
1723 #if (NB_MMU_MODES == 4)
1724 env->tlb_table[3][i].addr_read = -1;
1725 env->tlb_table[3][i].addr_write = -1;
1726 env->tlb_table[3][i].addr_code = -1;
1727 #endif
1728 #endif
1731 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1733 #ifdef USE_KQEMU
1734 if (env->kqemu_enabled) {
1735 kqemu_flush(env, flush_global);
1737 #endif
1738 tlb_flush_count++;
1741 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1743 if (addr == (tlb_entry->addr_read &
1744 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1745 addr == (tlb_entry->addr_write &
1746 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1747 addr == (tlb_entry->addr_code &
1748 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1749 tlb_entry->addr_read = -1;
1750 tlb_entry->addr_write = -1;
1751 tlb_entry->addr_code = -1;
1755 void tlb_flush_page(CPUState *env, target_ulong addr)
1757 int i;
1759 #if defined(DEBUG_TLB)
1760 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1761 #endif
1762 /* must reset current TB so that interrupts cannot modify the
1763 links while we are modifying them */
1764 env->current_tb = NULL;
1766 addr &= TARGET_PAGE_MASK;
1767 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1768 tlb_flush_entry(&env->tlb_table[0][i], addr);
1769 tlb_flush_entry(&env->tlb_table[1][i], addr);
1770 #if (NB_MMU_MODES >= 3)
1771 tlb_flush_entry(&env->tlb_table[2][i], addr);
1772 #if (NB_MMU_MODES == 4)
1773 tlb_flush_entry(&env->tlb_table[3][i], addr);
1774 #endif
1775 #endif
1777 tlb_flush_jmp_cache(env, addr);
1779 #ifdef USE_KQEMU
1780 if (env->kqemu_enabled) {
1781 kqemu_flush_page(env, addr);
1783 #endif
1786 /* update the TLBs so that writes to code in the virtual page 'addr'
1787 can be detected */
1788 static void tlb_protect_code(ram_addr_t ram_addr)
1790 cpu_physical_memory_reset_dirty(ram_addr,
1791 ram_addr + TARGET_PAGE_SIZE,
1792 CODE_DIRTY_FLAG);
1795 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1796 tested for self modifying code */
1797 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1798 target_ulong vaddr)
1800 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1803 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1804 unsigned long start, unsigned long length)
1806 unsigned long addr;
1807 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1808 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1809 if ((addr - start) < length) {
1810 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1815 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1816 int dirty_flags)
1818 CPUState *env;
1819 unsigned long length, start1;
1820 int i, mask, len;
1821 uint8_t *p;
1823 start &= TARGET_PAGE_MASK;
1824 end = TARGET_PAGE_ALIGN(end);
1826 length = end - start;
1827 if (length == 0)
1828 return;
1829 len = length >> TARGET_PAGE_BITS;
1830 #ifdef USE_KQEMU
1831 /* XXX: should not depend on cpu context */
1832 env = first_cpu;
1833 if (env->kqemu_enabled) {
1834 ram_addr_t addr;
1835 addr = start;
1836 for(i = 0; i < len; i++) {
1837 kqemu_set_notdirty(env, addr);
1838 addr += TARGET_PAGE_SIZE;
1841 #endif
1842 mask = ~dirty_flags;
1843 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1844 for(i = 0; i < len; i++)
1845 p[i] &= mask;
1847 /* we modify the TLB cache so that the dirty bit will be set again
1848 when accessing the range */
1849 start1 = start + (unsigned long)phys_ram_base;
1850 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1851 for(i = 0; i < CPU_TLB_SIZE; i++)
1852 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1853 for(i = 0; i < CPU_TLB_SIZE; i++)
1854 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1855 #if (NB_MMU_MODES >= 3)
1856 for(i = 0; i < CPU_TLB_SIZE; i++)
1857 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1858 #if (NB_MMU_MODES == 4)
1859 for(i = 0; i < CPU_TLB_SIZE; i++)
1860 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1861 #endif
1862 #endif
1866 int cpu_physical_memory_set_dirty_tracking(int enable)
1868 in_migration = enable;
1869 return 0;
1872 int cpu_physical_memory_get_dirty_tracking(void)
1874 return in_migration;
1877 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1879 if (kvm_enabled())
1880 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1883 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1885 ram_addr_t ram_addr;
1887 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1888 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1889 tlb_entry->addend - (unsigned long)phys_ram_base;
1890 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1891 tlb_entry->addr_write |= TLB_NOTDIRTY;
1896 /* update the TLB according to the current state of the dirty bits */
1897 void cpu_tlb_update_dirty(CPUState *env)
1899 int i;
1900 for(i = 0; i < CPU_TLB_SIZE; i++)
1901 tlb_update_dirty(&env->tlb_table[0][i]);
1902 for(i = 0; i < CPU_TLB_SIZE; i++)
1903 tlb_update_dirty(&env->tlb_table[1][i]);
1904 #if (NB_MMU_MODES >= 3)
1905 for(i = 0; i < CPU_TLB_SIZE; i++)
1906 tlb_update_dirty(&env->tlb_table[2][i]);
1907 #if (NB_MMU_MODES == 4)
1908 for(i = 0; i < CPU_TLB_SIZE; i++)
1909 tlb_update_dirty(&env->tlb_table[3][i]);
1910 #endif
1911 #endif
1914 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1916 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1917 tlb_entry->addr_write = vaddr;
1920 /* update the TLB corresponding to virtual page vaddr
1921 so that it is no longer dirty */
1922 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1924 int i;
1926 vaddr &= TARGET_PAGE_MASK;
1927 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1928 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1929 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1930 #if (NB_MMU_MODES >= 3)
1931 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1932 #if (NB_MMU_MODES == 4)
1933 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1934 #endif
1935 #endif
1938 /* add a new TLB entry. At most one entry for a given virtual address
1939 is permitted. Return 0 if OK or 2 if the page could not be mapped
1940 (can only happen in non SOFTMMU mode for I/O pages or pages
1941 conflicting with the host address space). */
1942 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1943 target_phys_addr_t paddr, int prot,
1944 int mmu_idx, int is_softmmu)
1946 PhysPageDesc *p;
1947 unsigned long pd;
1948 unsigned int index;
1949 target_ulong address;
1950 target_ulong code_address;
1951 target_phys_addr_t addend;
1952 int ret;
1953 CPUTLBEntry *te;
1954 CPUWatchpoint *wp;
1955 target_phys_addr_t iotlb;
1957 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1958 if (!p) {
1959 pd = IO_MEM_UNASSIGNED;
1960 } else {
1961 pd = p->phys_offset;
1963 #if defined(DEBUG_TLB)
1964 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1965 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1966 #endif
1968 ret = 0;
1969 address = vaddr;
1970 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1971 /* IO memory case (romd handled later) */
1972 address |= TLB_MMIO;
1974 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1975 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1976 /* Normal RAM. */
1977 iotlb = pd & TARGET_PAGE_MASK;
1978 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1979 iotlb |= IO_MEM_NOTDIRTY;
1980 else
1981 iotlb |= IO_MEM_ROM;
1982 } else {
1983 /* IO handlers are currently passed a phsical address.
1984 It would be nice to pass an offset from the base address
1985 of that region. This would avoid having to special case RAM,
1986 and avoid full address decoding in every device.
1987 We can't use the high bits of pd for this because
1988 IO_MEM_ROMD uses these as a ram address. */
1989 iotlb = (pd & ~TARGET_PAGE_MASK);
1990 if (p) {
1991 iotlb += p->region_offset;
1992 } else {
1993 iotlb += paddr;
1997 code_address = address;
1998 /* Make accesses to pages with watchpoints go via the
1999 watchpoint trap routines. */
2000 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2001 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2002 iotlb = io_mem_watch + paddr;
2003 /* TODO: The memory case can be optimized by not trapping
2004 reads of pages with a write breakpoint. */
2005 address |= TLB_MMIO;
2009 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2010 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2011 te = &env->tlb_table[mmu_idx][index];
2012 te->addend = addend - vaddr;
2013 if (prot & PAGE_READ) {
2014 te->addr_read = address;
2015 } else {
2016 te->addr_read = -1;
2019 if (prot & PAGE_EXEC) {
2020 te->addr_code = code_address;
2021 } else {
2022 te->addr_code = -1;
2024 if (prot & PAGE_WRITE) {
2025 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2026 (pd & IO_MEM_ROMD)) {
2027 /* Write access calls the I/O callback. */
2028 te->addr_write = address | TLB_MMIO;
2029 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2030 !cpu_physical_memory_is_dirty(pd)) {
2031 te->addr_write = address | TLB_NOTDIRTY;
2032 } else {
2033 te->addr_write = address;
2035 } else {
2036 te->addr_write = -1;
2038 return ret;
2041 #else
2043 void tlb_flush(CPUState *env, int flush_global)
2047 void tlb_flush_page(CPUState *env, target_ulong addr)
2051 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2052 target_phys_addr_t paddr, int prot,
2053 int mmu_idx, int is_softmmu)
2055 return 0;
2058 /* dump memory mappings */
2059 void page_dump(FILE *f)
2061 unsigned long start, end;
2062 int i, j, prot, prot1;
2063 PageDesc *p;
2065 fprintf(f, "%-8s %-8s %-8s %s\n",
2066 "start", "end", "size", "prot");
2067 start = -1;
2068 end = -1;
2069 prot = 0;
2070 for(i = 0; i <= L1_SIZE; i++) {
2071 if (i < L1_SIZE)
2072 p = l1_map[i];
2073 else
2074 p = NULL;
2075 for(j = 0;j < L2_SIZE; j++) {
2076 if (!p)
2077 prot1 = 0;
2078 else
2079 prot1 = p[j].flags;
2080 if (prot1 != prot) {
2081 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2082 if (start != -1) {
2083 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2084 start, end, end - start,
2085 prot & PAGE_READ ? 'r' : '-',
2086 prot & PAGE_WRITE ? 'w' : '-',
2087 prot & PAGE_EXEC ? 'x' : '-');
2089 if (prot1 != 0)
2090 start = end;
2091 else
2092 start = -1;
2093 prot = prot1;
2095 if (!p)
2096 break;
2101 int page_get_flags(target_ulong address)
2103 PageDesc *p;
2105 p = page_find(address >> TARGET_PAGE_BITS);
2106 if (!p)
2107 return 0;
2108 return p->flags;
2111 /* modify the flags of a page and invalidate the code if
2112 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2113 depending on PAGE_WRITE */
2114 void page_set_flags(target_ulong start, target_ulong end, int flags)
2116 PageDesc *p;
2117 target_ulong addr;
2119 /* mmap_lock should already be held. */
2120 start = start & TARGET_PAGE_MASK;
2121 end = TARGET_PAGE_ALIGN(end);
2122 if (flags & PAGE_WRITE)
2123 flags |= PAGE_WRITE_ORG;
2124 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2125 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2126 /* We may be called for host regions that are outside guest
2127 address space. */
2128 if (!p)
2129 return;
2130 /* if the write protection is set, then we invalidate the code
2131 inside */
2132 if (!(p->flags & PAGE_WRITE) &&
2133 (flags & PAGE_WRITE) &&
2134 p->first_tb) {
2135 tb_invalidate_phys_page(addr, 0, NULL);
2137 p->flags = flags;
2141 int page_check_range(target_ulong start, target_ulong len, int flags)
2143 PageDesc *p;
2144 target_ulong end;
2145 target_ulong addr;
2147 if (start + len < start)
2148 /* we've wrapped around */
2149 return -1;
2151 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2152 start = start & TARGET_PAGE_MASK;
2154 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2155 p = page_find(addr >> TARGET_PAGE_BITS);
2156 if( !p )
2157 return -1;
2158 if( !(p->flags & PAGE_VALID) )
2159 return -1;
2161 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2162 return -1;
2163 if (flags & PAGE_WRITE) {
2164 if (!(p->flags & PAGE_WRITE_ORG))
2165 return -1;
2166 /* unprotect the page if it was put read-only because it
2167 contains translated code */
2168 if (!(p->flags & PAGE_WRITE)) {
2169 if (!page_unprotect(addr, 0, NULL))
2170 return -1;
2172 return 0;
2175 return 0;
2178 /* called from signal handler: invalidate the code and unprotect the
2179 page. Return TRUE if the fault was succesfully handled. */
2180 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2182 unsigned int page_index, prot, pindex;
2183 PageDesc *p, *p1;
2184 target_ulong host_start, host_end, addr;
2186 /* Technically this isn't safe inside a signal handler. However we
2187 know this only ever happens in a synchronous SEGV handler, so in
2188 practice it seems to be ok. */
2189 mmap_lock();
2191 host_start = address & qemu_host_page_mask;
2192 page_index = host_start >> TARGET_PAGE_BITS;
2193 p1 = page_find(page_index);
2194 if (!p1) {
2195 mmap_unlock();
2196 return 0;
2198 host_end = host_start + qemu_host_page_size;
2199 p = p1;
2200 prot = 0;
2201 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2202 prot |= p->flags;
2203 p++;
2205 /* if the page was really writable, then we change its
2206 protection back to writable */
2207 if (prot & PAGE_WRITE_ORG) {
2208 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2209 if (!(p1[pindex].flags & PAGE_WRITE)) {
2210 mprotect((void *)g2h(host_start), qemu_host_page_size,
2211 (prot & PAGE_BITS) | PAGE_WRITE);
2212 p1[pindex].flags |= PAGE_WRITE;
2213 /* and since the content will be modified, we must invalidate
2214 the corresponding translated code. */
2215 tb_invalidate_phys_page(address, pc, puc);
2216 #ifdef DEBUG_TB_CHECK
2217 tb_invalidate_check(address);
2218 #endif
2219 mmap_unlock();
2220 return 1;
2223 mmap_unlock();
2224 return 0;
2227 static inline void tlb_set_dirty(CPUState *env,
2228 unsigned long addr, target_ulong vaddr)
2231 #endif /* defined(CONFIG_USER_ONLY) */
2233 #if !defined(CONFIG_USER_ONLY)
2235 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2236 ram_addr_t memory, ram_addr_t region_offset);
2237 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2238 ram_addr_t orig_memory, ram_addr_t region_offset);
2239 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2240 need_subpage) \
2241 do { \
2242 if (addr > start_addr) \
2243 start_addr2 = 0; \
2244 else { \
2245 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2246 if (start_addr2 > 0) \
2247 need_subpage = 1; \
2250 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2251 end_addr2 = TARGET_PAGE_SIZE - 1; \
2252 else { \
2253 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2254 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2255 need_subpage = 1; \
2257 } while (0)
2259 /* register physical memory. 'size' must be a multiple of the target
2260 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2261 io memory page. The address used when calling the IO function is
2262 the offset from the start of the region, plus region_offset. Both
2263 start_region and regon_offset are rounded down to a page boundary
2264 before calculating this offset. This should not be a problem unless
2265 the low bits of start_addr and region_offset differ. */
2266 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2267 ram_addr_t size,
2268 ram_addr_t phys_offset,
2269 ram_addr_t region_offset)
2271 target_phys_addr_t addr, end_addr;
2272 PhysPageDesc *p;
2273 CPUState *env;
2274 ram_addr_t orig_size = size;
2275 void *subpage;
2277 #ifdef USE_KQEMU
2278 /* XXX: should not depend on cpu context */
2279 env = first_cpu;
2280 if (env->kqemu_enabled) {
2281 kqemu_set_phys_mem(start_addr, size, phys_offset);
2283 #endif
2284 if (kvm_enabled())
2285 kvm_set_phys_mem(start_addr, size, phys_offset);
2287 if (phys_offset == IO_MEM_UNASSIGNED) {
2288 region_offset = start_addr;
2290 region_offset &= TARGET_PAGE_MASK;
2291 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2292 end_addr = start_addr + (target_phys_addr_t)size;
2293 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2294 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2295 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2296 ram_addr_t orig_memory = p->phys_offset;
2297 target_phys_addr_t start_addr2, end_addr2;
2298 int need_subpage = 0;
2300 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2301 need_subpage);
2302 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2303 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2304 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2305 &p->phys_offset, orig_memory,
2306 p->region_offset);
2307 } else {
2308 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2309 >> IO_MEM_SHIFT];
2311 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2312 region_offset);
2313 p->region_offset = 0;
2314 } else {
2315 p->phys_offset = phys_offset;
2316 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2317 (phys_offset & IO_MEM_ROMD))
2318 phys_offset += TARGET_PAGE_SIZE;
2320 } else {
2321 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2322 p->phys_offset = phys_offset;
2323 p->region_offset = region_offset;
2324 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2325 (phys_offset & IO_MEM_ROMD)) {
2326 phys_offset += TARGET_PAGE_SIZE;
2327 } else {
2328 target_phys_addr_t start_addr2, end_addr2;
2329 int need_subpage = 0;
2331 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2332 end_addr2, need_subpage);
2334 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2335 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2336 &p->phys_offset, IO_MEM_UNASSIGNED,
2337 addr & TARGET_PAGE_MASK);
2338 subpage_register(subpage, start_addr2, end_addr2,
2339 phys_offset, region_offset);
2340 p->region_offset = 0;
2344 region_offset += TARGET_PAGE_SIZE;
2347 /* since each CPU stores ram addresses in its TLB cache, we must
2348 reset the modified entries */
2349 /* XXX: slow ! */
2350 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2351 tlb_flush(env, 1);
2355 /* XXX: temporary until new memory mapping API */
2356 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2358 PhysPageDesc *p;
2360 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2361 if (!p)
2362 return IO_MEM_UNASSIGNED;
2363 return p->phys_offset;
2366 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2368 if (kvm_enabled())
2369 kvm_coalesce_mmio_region(addr, size);
2372 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2374 if (kvm_enabled())
2375 kvm_uncoalesce_mmio_region(addr, size);
2378 /* XXX: better than nothing */
2379 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2381 ram_addr_t addr;
2382 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2383 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2384 (uint64_t)size, (uint64_t)phys_ram_size);
2385 abort();
2387 addr = phys_ram_alloc_offset;
2388 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2389 return addr;
2392 void qemu_ram_free(ram_addr_t addr)
2396 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2398 #ifdef DEBUG_UNASSIGNED
2399 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2400 #endif
2401 #if defined(TARGET_SPARC)
2402 do_unassigned_access(addr, 0, 0, 0, 1);
2403 #endif
2404 return 0;
2407 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2409 #ifdef DEBUG_UNASSIGNED
2410 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2411 #endif
2412 #if defined(TARGET_SPARC)
2413 do_unassigned_access(addr, 0, 0, 0, 2);
2414 #endif
2415 return 0;
2418 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2420 #ifdef DEBUG_UNASSIGNED
2421 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2422 #endif
2423 #if defined(TARGET_SPARC)
2424 do_unassigned_access(addr, 0, 0, 0, 4);
2425 #endif
2426 return 0;
2429 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2431 #ifdef DEBUG_UNASSIGNED
2432 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2433 #endif
2434 #if defined(TARGET_SPARC)
2435 do_unassigned_access(addr, 1, 0, 0, 1);
2436 #endif
2439 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2441 #ifdef DEBUG_UNASSIGNED
2442 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2443 #endif
2444 #if defined(TARGET_SPARC)
2445 do_unassigned_access(addr, 1, 0, 0, 2);
2446 #endif
2449 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2451 #ifdef DEBUG_UNASSIGNED
2452 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2453 #endif
2454 #if defined(TARGET_SPARC)
2455 do_unassigned_access(addr, 1, 0, 0, 4);
2456 #endif
2459 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2460 unassigned_mem_readb,
2461 unassigned_mem_readw,
2462 unassigned_mem_readl,
2465 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2466 unassigned_mem_writeb,
2467 unassigned_mem_writew,
2468 unassigned_mem_writel,
2471 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2472 uint32_t val)
2474 int dirty_flags;
2475 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2476 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2477 #if !defined(CONFIG_USER_ONLY)
2478 tb_invalidate_phys_page_fast(ram_addr, 1);
2479 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2480 #endif
2482 stb_p(phys_ram_base + ram_addr, val);
2483 #ifdef USE_KQEMU
2484 if (cpu_single_env->kqemu_enabled &&
2485 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2486 kqemu_modify_page(cpu_single_env, ram_addr);
2487 #endif
2488 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2489 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2490 /* we remove the notdirty callback only if the code has been
2491 flushed */
2492 if (dirty_flags == 0xff)
2493 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2496 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2497 uint32_t val)
2499 int dirty_flags;
2500 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2501 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2502 #if !defined(CONFIG_USER_ONLY)
2503 tb_invalidate_phys_page_fast(ram_addr, 2);
2504 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2505 #endif
2507 stw_p(phys_ram_base + ram_addr, val);
2508 #ifdef USE_KQEMU
2509 if (cpu_single_env->kqemu_enabled &&
2510 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2511 kqemu_modify_page(cpu_single_env, ram_addr);
2512 #endif
2513 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2514 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2515 /* we remove the notdirty callback only if the code has been
2516 flushed */
2517 if (dirty_flags == 0xff)
2518 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2521 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2522 uint32_t val)
2524 int dirty_flags;
2525 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2526 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2527 #if !defined(CONFIG_USER_ONLY)
2528 tb_invalidate_phys_page_fast(ram_addr, 4);
2529 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2530 #endif
2532 stl_p(phys_ram_base + ram_addr, val);
2533 #ifdef USE_KQEMU
2534 if (cpu_single_env->kqemu_enabled &&
2535 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2536 kqemu_modify_page(cpu_single_env, ram_addr);
2537 #endif
2538 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2539 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2540 /* we remove the notdirty callback only if the code has been
2541 flushed */
2542 if (dirty_flags == 0xff)
2543 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2546 static CPUReadMemoryFunc *error_mem_read[3] = {
2547 NULL, /* never used */
2548 NULL, /* never used */
2549 NULL, /* never used */
2552 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2553 notdirty_mem_writeb,
2554 notdirty_mem_writew,
2555 notdirty_mem_writel,
2558 /* Generate a debug exception if a watchpoint has been hit. */
2559 static void check_watchpoint(int offset, int len_mask, int flags)
2561 CPUState *env = cpu_single_env;
2562 target_ulong pc, cs_base;
2563 TranslationBlock *tb;
2564 target_ulong vaddr;
2565 CPUWatchpoint *wp;
2566 int cpu_flags;
2568 if (env->watchpoint_hit) {
2569 /* We re-entered the check after replacing the TB. Now raise
2570 * the debug interrupt so that is will trigger after the
2571 * current instruction. */
2572 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2573 return;
2575 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2576 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2577 if ((vaddr == (wp->vaddr & len_mask) ||
2578 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2579 wp->flags |= BP_WATCHPOINT_HIT;
2580 if (!env->watchpoint_hit) {
2581 env->watchpoint_hit = wp;
2582 tb = tb_find_pc(env->mem_io_pc);
2583 if (!tb) {
2584 cpu_abort(env, "check_watchpoint: could not find TB for "
2585 "pc=%p", (void *)env->mem_io_pc);
2587 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2588 tb_phys_invalidate(tb, -1);
2589 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2590 env->exception_index = EXCP_DEBUG;
2591 } else {
2592 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2593 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2595 cpu_resume_from_signal(env, NULL);
2597 } else {
2598 wp->flags &= ~BP_WATCHPOINT_HIT;
2603 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2604 so these check for a hit then pass through to the normal out-of-line
2605 phys routines. */
2606 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2608 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2609 return ldub_phys(addr);
2612 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2614 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2615 return lduw_phys(addr);
2618 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2620 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2621 return ldl_phys(addr);
2624 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2625 uint32_t val)
2627 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2628 stb_phys(addr, val);
2631 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2632 uint32_t val)
2634 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2635 stw_phys(addr, val);
2638 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2639 uint32_t val)
2641 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2642 stl_phys(addr, val);
2645 static CPUReadMemoryFunc *watch_mem_read[3] = {
2646 watch_mem_readb,
2647 watch_mem_readw,
2648 watch_mem_readl,
2651 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2652 watch_mem_writeb,
2653 watch_mem_writew,
2654 watch_mem_writel,
2657 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2658 unsigned int len)
2660 uint32_t ret;
2661 unsigned int idx;
2663 idx = SUBPAGE_IDX(addr);
2664 #if defined(DEBUG_SUBPAGE)
2665 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2666 mmio, len, addr, idx);
2667 #endif
2668 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2669 addr + mmio->region_offset[idx][0][len]);
2671 return ret;
2674 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2675 uint32_t value, unsigned int len)
2677 unsigned int idx;
2679 idx = SUBPAGE_IDX(addr);
2680 #if defined(DEBUG_SUBPAGE)
2681 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2682 mmio, len, addr, idx, value);
2683 #endif
2684 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2685 addr + mmio->region_offset[idx][1][len],
2686 value);
2689 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2691 #if defined(DEBUG_SUBPAGE)
2692 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2693 #endif
2695 return subpage_readlen(opaque, addr, 0);
2698 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2699 uint32_t value)
2701 #if defined(DEBUG_SUBPAGE)
2702 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2703 #endif
2704 subpage_writelen(opaque, addr, value, 0);
2707 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2709 #if defined(DEBUG_SUBPAGE)
2710 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2711 #endif
2713 return subpage_readlen(opaque, addr, 1);
2716 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2717 uint32_t value)
2719 #if defined(DEBUG_SUBPAGE)
2720 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2721 #endif
2722 subpage_writelen(opaque, addr, value, 1);
2725 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2727 #if defined(DEBUG_SUBPAGE)
2728 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2729 #endif
2731 return subpage_readlen(opaque, addr, 2);
2734 static void subpage_writel (void *opaque,
2735 target_phys_addr_t addr, uint32_t value)
2737 #if defined(DEBUG_SUBPAGE)
2738 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2739 #endif
2740 subpage_writelen(opaque, addr, value, 2);
2743 static CPUReadMemoryFunc *subpage_read[] = {
2744 &subpage_readb,
2745 &subpage_readw,
2746 &subpage_readl,
2749 static CPUWriteMemoryFunc *subpage_write[] = {
2750 &subpage_writeb,
2751 &subpage_writew,
2752 &subpage_writel,
2755 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2756 ram_addr_t memory, ram_addr_t region_offset)
2758 int idx, eidx;
2759 unsigned int i;
2761 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2762 return -1;
2763 idx = SUBPAGE_IDX(start);
2764 eidx = SUBPAGE_IDX(end);
2765 #if defined(DEBUG_SUBPAGE)
2766 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2767 mmio, start, end, idx, eidx, memory);
2768 #endif
2769 memory >>= IO_MEM_SHIFT;
2770 for (; idx <= eidx; idx++) {
2771 for (i = 0; i < 4; i++) {
2772 if (io_mem_read[memory][i]) {
2773 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2774 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2775 mmio->region_offset[idx][0][i] = region_offset;
2777 if (io_mem_write[memory][i]) {
2778 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2779 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2780 mmio->region_offset[idx][1][i] = region_offset;
2785 return 0;
2788 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2789 ram_addr_t orig_memory, ram_addr_t region_offset)
2791 subpage_t *mmio;
2792 int subpage_memory;
2794 mmio = qemu_mallocz(sizeof(subpage_t));
2796 mmio->base = base;
2797 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2798 #if defined(DEBUG_SUBPAGE)
2799 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2800 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2801 #endif
2802 *phys = subpage_memory | IO_MEM_SUBPAGE;
2803 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2804 region_offset);
2806 return mmio;
2809 static int get_free_io_mem_idx(void)
2811 int i;
2813 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2814 if (!io_mem_used[i]) {
2815 io_mem_used[i] = 1;
2816 return i;
2819 return -1;
2822 static void io_mem_init(void)
2824 int i;
2826 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2827 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2828 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2829 for (i=0; i<5; i++)
2830 io_mem_used[i] = 1;
2832 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2833 watch_mem_write, NULL);
2834 /* alloc dirty bits array */
2835 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2836 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2839 /* mem_read and mem_write are arrays of functions containing the
2840 function to access byte (index 0), word (index 1) and dword (index
2841 2). Functions can be omitted with a NULL function pointer. The
2842 registered functions may be modified dynamically later.
2843 If io_index is non zero, the corresponding io zone is
2844 modified. If it is zero, a new io zone is allocated. The return
2845 value can be used with cpu_register_physical_memory(). (-1) is
2846 returned if error. */
2847 int cpu_register_io_memory(int io_index,
2848 CPUReadMemoryFunc **mem_read,
2849 CPUWriteMemoryFunc **mem_write,
2850 void *opaque)
2852 int i, subwidth = 0;
2854 if (io_index <= 0) {
2855 io_index = get_free_io_mem_idx();
2856 if (io_index == -1)
2857 return io_index;
2858 } else {
2859 if (io_index >= IO_MEM_NB_ENTRIES)
2860 return -1;
2863 for(i = 0;i < 3; i++) {
2864 if (!mem_read[i] || !mem_write[i])
2865 subwidth = IO_MEM_SUBWIDTH;
2866 io_mem_read[io_index][i] = mem_read[i];
2867 io_mem_write[io_index][i] = mem_write[i];
2869 io_mem_opaque[io_index] = opaque;
2870 return (io_index << IO_MEM_SHIFT) | subwidth;
2873 void cpu_unregister_io_memory(int io_table_address)
2875 int i;
2876 int io_index = io_table_address >> IO_MEM_SHIFT;
2878 for (i=0;i < 3; i++) {
2879 io_mem_read[io_index][i] = unassigned_mem_read[i];
2880 io_mem_write[io_index][i] = unassigned_mem_write[i];
2882 io_mem_opaque[io_index] = NULL;
2883 io_mem_used[io_index] = 0;
2886 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2888 return io_mem_write[io_index >> IO_MEM_SHIFT];
2891 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2893 return io_mem_read[io_index >> IO_MEM_SHIFT];
2896 #endif /* !defined(CONFIG_USER_ONLY) */
2898 /* physical memory access (slow version, mainly for debug) */
2899 #if defined(CONFIG_USER_ONLY)
2900 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2901 int len, int is_write)
2903 int l, flags;
2904 target_ulong page;
2905 void * p;
2907 while (len > 0) {
2908 page = addr & TARGET_PAGE_MASK;
2909 l = (page + TARGET_PAGE_SIZE) - addr;
2910 if (l > len)
2911 l = len;
2912 flags = page_get_flags(page);
2913 if (!(flags & PAGE_VALID))
2914 return;
2915 if (is_write) {
2916 if (!(flags & PAGE_WRITE))
2917 return;
2918 /* XXX: this code should not depend on lock_user */
2919 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2920 /* FIXME - should this return an error rather than just fail? */
2921 return;
2922 memcpy(p, buf, l);
2923 unlock_user(p, addr, l);
2924 } else {
2925 if (!(flags & PAGE_READ))
2926 return;
2927 /* XXX: this code should not depend on lock_user */
2928 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2929 /* FIXME - should this return an error rather than just fail? */
2930 return;
2931 memcpy(buf, p, l);
2932 unlock_user(p, addr, 0);
2934 len -= l;
2935 buf += l;
2936 addr += l;
2940 #else
2941 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2942 int len, int is_write)
2944 int l, io_index;
2945 uint8_t *ptr;
2946 uint32_t val;
2947 target_phys_addr_t page;
2948 unsigned long pd;
2949 PhysPageDesc *p;
2951 while (len > 0) {
2952 page = addr & TARGET_PAGE_MASK;
2953 l = (page + TARGET_PAGE_SIZE) - addr;
2954 if (l > len)
2955 l = len;
2956 p = phys_page_find(page >> TARGET_PAGE_BITS);
2957 if (!p) {
2958 pd = IO_MEM_UNASSIGNED;
2959 } else {
2960 pd = p->phys_offset;
2963 if (is_write) {
2964 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2965 target_phys_addr_t addr1 = addr;
2966 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2967 if (p)
2968 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2969 /* XXX: could force cpu_single_env to NULL to avoid
2970 potential bugs */
2971 if (l >= 4 && ((addr1 & 3) == 0)) {
2972 /* 32 bit write access */
2973 val = ldl_p(buf);
2974 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
2975 l = 4;
2976 } else if (l >= 2 && ((addr1 & 1) == 0)) {
2977 /* 16 bit write access */
2978 val = lduw_p(buf);
2979 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
2980 l = 2;
2981 } else {
2982 /* 8 bit write access */
2983 val = ldub_p(buf);
2984 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
2985 l = 1;
2987 } else {
2988 unsigned long addr1;
2989 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2990 /* RAM case */
2991 ptr = phys_ram_base + addr1;
2992 memcpy(ptr, buf, l);
2993 if (!cpu_physical_memory_is_dirty(addr1)) {
2994 /* invalidate code */
2995 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2996 /* set dirty bit */
2997 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2998 (0xff & ~CODE_DIRTY_FLAG);
3001 } else {
3002 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3003 !(pd & IO_MEM_ROMD)) {
3004 target_phys_addr_t addr1 = addr;
3005 /* I/O case */
3006 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3007 if (p)
3008 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3009 if (l >= 4 && ((addr1 & 3) == 0)) {
3010 /* 32 bit read access */
3011 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3012 stl_p(buf, val);
3013 l = 4;
3014 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3015 /* 16 bit read access */
3016 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3017 stw_p(buf, val);
3018 l = 2;
3019 } else {
3020 /* 8 bit read access */
3021 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3022 stb_p(buf, val);
3023 l = 1;
3025 } else {
3026 /* RAM case */
3027 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3028 (addr & ~TARGET_PAGE_MASK);
3029 memcpy(buf, ptr, l);
3032 len -= l;
3033 buf += l;
3034 addr += l;
3038 /* used for ROM loading : can write in RAM and ROM */
3039 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3040 const uint8_t *buf, int len)
3042 int l;
3043 uint8_t *ptr;
3044 target_phys_addr_t page;
3045 unsigned long pd;
3046 PhysPageDesc *p;
3048 while (len > 0) {
3049 page = addr & TARGET_PAGE_MASK;
3050 l = (page + TARGET_PAGE_SIZE) - addr;
3051 if (l > len)
3052 l = len;
3053 p = phys_page_find(page >> TARGET_PAGE_BITS);
3054 if (!p) {
3055 pd = IO_MEM_UNASSIGNED;
3056 } else {
3057 pd = p->phys_offset;
3060 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3061 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3062 !(pd & IO_MEM_ROMD)) {
3063 /* do nothing */
3064 } else {
3065 unsigned long addr1;
3066 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3067 /* ROM/RAM case */
3068 ptr = phys_ram_base + addr1;
3069 memcpy(ptr, buf, l);
3071 len -= l;
3072 buf += l;
3073 addr += l;
3077 typedef struct {
3078 void *buffer;
3079 target_phys_addr_t addr;
3080 target_phys_addr_t len;
3081 } BounceBuffer;
3083 static BounceBuffer bounce;
3085 typedef struct MapClient {
3086 void *opaque;
3087 void (*callback)(void *opaque);
3088 LIST_ENTRY(MapClient) link;
3089 } MapClient;
3091 static LIST_HEAD(map_client_list, MapClient) map_client_list
3092 = LIST_HEAD_INITIALIZER(map_client_list);
3094 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3096 MapClient *client = qemu_malloc(sizeof(*client));
3098 client->opaque = opaque;
3099 client->callback = callback;
3100 LIST_INSERT_HEAD(&map_client_list, client, link);
3101 return client;
3104 void cpu_unregister_map_client(void *_client)
3106 MapClient *client = (MapClient *)_client;
3108 LIST_REMOVE(client, link);
3111 static void cpu_notify_map_clients(void)
3113 MapClient *client;
3115 while (!LIST_EMPTY(&map_client_list)) {
3116 client = LIST_FIRST(&map_client_list);
3117 client->callback(client->opaque);
3118 LIST_REMOVE(client, link);
3122 /* Map a physical memory region into a host virtual address.
3123 * May map a subset of the requested range, given by and returned in *plen.
3124 * May return NULL if resources needed to perform the mapping are exhausted.
3125 * Use only for reads OR writes - not for read-modify-write operations.
3126 * Use cpu_register_map_client() to know when retrying the map operation is
3127 * likely to succeed.
3129 void *cpu_physical_memory_map(target_phys_addr_t addr,
3130 target_phys_addr_t *plen,
3131 int is_write)
3133 target_phys_addr_t len = *plen;
3134 target_phys_addr_t done = 0;
3135 int l;
3136 uint8_t *ret = NULL;
3137 uint8_t *ptr;
3138 target_phys_addr_t page;
3139 unsigned long pd;
3140 PhysPageDesc *p;
3141 unsigned long addr1;
3143 while (len > 0) {
3144 page = addr & TARGET_PAGE_MASK;
3145 l = (page + TARGET_PAGE_SIZE) - addr;
3146 if (l > len)
3147 l = len;
3148 p = phys_page_find(page >> TARGET_PAGE_BITS);
3149 if (!p) {
3150 pd = IO_MEM_UNASSIGNED;
3151 } else {
3152 pd = p->phys_offset;
3155 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3156 if (done || bounce.buffer) {
3157 break;
3159 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3160 bounce.addr = addr;
3161 bounce.len = l;
3162 if (!is_write) {
3163 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3165 ptr = bounce.buffer;
3166 } else {
3167 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3168 ptr = phys_ram_base + addr1;
3170 if (!done) {
3171 ret = ptr;
3172 } else if (ret + done != ptr) {
3173 break;
3176 len -= l;
3177 addr += l;
3178 done += l;
3180 *plen = done;
3181 return ret;
3184 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3185 * Will also mark the memory as dirty if is_write == 1. access_len gives
3186 * the amount of memory that was actually read or written by the caller.
3188 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3189 int is_write, target_phys_addr_t access_len)
3191 if (buffer != bounce.buffer) {
3192 if (is_write) {
3193 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3194 while (access_len) {
3195 unsigned l;
3196 l = TARGET_PAGE_SIZE;
3197 if (l > access_len)
3198 l = access_len;
3199 if (!cpu_physical_memory_is_dirty(addr1)) {
3200 /* invalidate code */
3201 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3202 /* set dirty bit */
3203 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3204 (0xff & ~CODE_DIRTY_FLAG);
3206 addr1 += l;
3207 access_len -= l;
3210 return;
3212 if (is_write) {
3213 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3215 qemu_free(bounce.buffer);
3216 bounce.buffer = NULL;
3217 cpu_notify_map_clients();
3220 /* warning: addr must be aligned */
3221 uint32_t ldl_phys(target_phys_addr_t addr)
3223 int io_index;
3224 uint8_t *ptr;
3225 uint32_t val;
3226 unsigned long pd;
3227 PhysPageDesc *p;
3229 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3230 if (!p) {
3231 pd = IO_MEM_UNASSIGNED;
3232 } else {
3233 pd = p->phys_offset;
3236 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3237 !(pd & IO_MEM_ROMD)) {
3238 /* I/O case */
3239 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3240 if (p)
3241 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3242 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3243 } else {
3244 /* RAM case */
3245 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3246 (addr & ~TARGET_PAGE_MASK);
3247 val = ldl_p(ptr);
3249 return val;
3252 /* warning: addr must be aligned */
3253 uint64_t ldq_phys(target_phys_addr_t addr)
3255 int io_index;
3256 uint8_t *ptr;
3257 uint64_t val;
3258 unsigned long pd;
3259 PhysPageDesc *p;
3261 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3262 if (!p) {
3263 pd = IO_MEM_UNASSIGNED;
3264 } else {
3265 pd = p->phys_offset;
3268 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3269 !(pd & IO_MEM_ROMD)) {
3270 /* I/O case */
3271 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3272 if (p)
3273 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3274 #ifdef TARGET_WORDS_BIGENDIAN
3275 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3276 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3277 #else
3278 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3279 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3280 #endif
3281 } else {
3282 /* RAM case */
3283 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3284 (addr & ~TARGET_PAGE_MASK);
3285 val = ldq_p(ptr);
3287 return val;
3290 /* XXX: optimize */
3291 uint32_t ldub_phys(target_phys_addr_t addr)
3293 uint8_t val;
3294 cpu_physical_memory_read(addr, &val, 1);
3295 return val;
3298 /* XXX: optimize */
3299 uint32_t lduw_phys(target_phys_addr_t addr)
3301 uint16_t val;
3302 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3303 return tswap16(val);
3306 /* warning: addr must be aligned. The ram page is not masked as dirty
3307 and the code inside is not invalidated. It is useful if the dirty
3308 bits are used to track modified PTEs */
3309 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3311 int io_index;
3312 uint8_t *ptr;
3313 unsigned long pd;
3314 PhysPageDesc *p;
3316 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3317 if (!p) {
3318 pd = IO_MEM_UNASSIGNED;
3319 } else {
3320 pd = p->phys_offset;
3323 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3324 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3325 if (p)
3326 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3327 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3328 } else {
3329 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3330 ptr = phys_ram_base + addr1;
3331 stl_p(ptr, val);
3333 if (unlikely(in_migration)) {
3334 if (!cpu_physical_memory_is_dirty(addr1)) {
3335 /* invalidate code */
3336 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3337 /* set dirty bit */
3338 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3339 (0xff & ~CODE_DIRTY_FLAG);
3345 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3347 int io_index;
3348 uint8_t *ptr;
3349 unsigned long pd;
3350 PhysPageDesc *p;
3352 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3353 if (!p) {
3354 pd = IO_MEM_UNASSIGNED;
3355 } else {
3356 pd = p->phys_offset;
3359 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3360 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3361 if (p)
3362 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3363 #ifdef TARGET_WORDS_BIGENDIAN
3364 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3365 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3366 #else
3367 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3368 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3369 #endif
3370 } else {
3371 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3372 (addr & ~TARGET_PAGE_MASK);
3373 stq_p(ptr, val);
3377 /* warning: addr must be aligned */
3378 void stl_phys(target_phys_addr_t addr, uint32_t val)
3380 int io_index;
3381 uint8_t *ptr;
3382 unsigned long pd;
3383 PhysPageDesc *p;
3385 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3386 if (!p) {
3387 pd = IO_MEM_UNASSIGNED;
3388 } else {
3389 pd = p->phys_offset;
3392 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3393 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3394 if (p)
3395 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3396 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3397 } else {
3398 unsigned long addr1;
3399 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3400 /* RAM case */
3401 ptr = phys_ram_base + addr1;
3402 stl_p(ptr, val);
3403 if (!cpu_physical_memory_is_dirty(addr1)) {
3404 /* invalidate code */
3405 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3406 /* set dirty bit */
3407 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3408 (0xff & ~CODE_DIRTY_FLAG);
3413 /* XXX: optimize */
3414 void stb_phys(target_phys_addr_t addr, uint32_t val)
3416 uint8_t v = val;
3417 cpu_physical_memory_write(addr, &v, 1);
3420 /* XXX: optimize */
3421 void stw_phys(target_phys_addr_t addr, uint32_t val)
3423 uint16_t v = tswap16(val);
3424 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3427 /* XXX: optimize */
3428 void stq_phys(target_phys_addr_t addr, uint64_t val)
3430 val = tswap64(val);
3431 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3434 #endif
3436 /* virtual memory access for debug */
3437 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3438 uint8_t *buf, int len, int is_write)
3440 int l;
3441 target_phys_addr_t phys_addr;
3442 target_ulong page;
3444 while (len > 0) {
3445 page = addr & TARGET_PAGE_MASK;
3446 phys_addr = cpu_get_phys_page_debug(env, page);
3447 /* if no physical page mapped, return an error */
3448 if (phys_addr == -1)
3449 return -1;
3450 l = (page + TARGET_PAGE_SIZE) - addr;
3451 if (l > len)
3452 l = len;
3453 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3454 buf, l, is_write);
3455 len -= l;
3456 buf += l;
3457 addr += l;
3459 return 0;
3462 /* in deterministic execution mode, instructions doing device I/Os
3463 must be at the end of the TB */
3464 void cpu_io_recompile(CPUState *env, void *retaddr)
3466 TranslationBlock *tb;
3467 uint32_t n, cflags;
3468 target_ulong pc, cs_base;
3469 uint64_t flags;
3471 tb = tb_find_pc((unsigned long)retaddr);
3472 if (!tb) {
3473 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3474 retaddr);
3476 n = env->icount_decr.u16.low + tb->icount;
3477 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3478 /* Calculate how many instructions had been executed before the fault
3479 occurred. */
3480 n = n - env->icount_decr.u16.low;
3481 /* Generate a new TB ending on the I/O insn. */
3482 n++;
3483 /* On MIPS and SH, delay slot instructions can only be restarted if
3484 they were already the first instruction in the TB. If this is not
3485 the first instruction in a TB then re-execute the preceding
3486 branch. */
3487 #if defined(TARGET_MIPS)
3488 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3489 env->active_tc.PC -= 4;
3490 env->icount_decr.u16.low++;
3491 env->hflags &= ~MIPS_HFLAG_BMASK;
3493 #elif defined(TARGET_SH4)
3494 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3495 && n > 1) {
3496 env->pc -= 2;
3497 env->icount_decr.u16.low++;
3498 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3500 #endif
3501 /* This should never happen. */
3502 if (n > CF_COUNT_MASK)
3503 cpu_abort(env, "TB too big during recompile");
3505 cflags = n | CF_LAST_IO;
3506 pc = tb->pc;
3507 cs_base = tb->cs_base;
3508 flags = tb->flags;
3509 tb_phys_invalidate(tb, -1);
3510 /* FIXME: In theory this could raise an exception. In practice
3511 we have already translated the block once so it's probably ok. */
3512 tb_gen_code(env, pc, cs_base, flags, cflags);
3513 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3514 the first in the TB) then we end up generating a whole new TB and
3515 repeating the fault, which is horribly inefficient.
3516 Better would be to execute just this insn uncached, or generate a
3517 second new TB. */
3518 cpu_resume_from_signal(env, NULL);
3521 void dump_exec_info(FILE *f,
3522 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3524 int i, target_code_size, max_target_code_size;
3525 int direct_jmp_count, direct_jmp2_count, cross_page;
3526 TranslationBlock *tb;
3528 target_code_size = 0;
3529 max_target_code_size = 0;
3530 cross_page = 0;
3531 direct_jmp_count = 0;
3532 direct_jmp2_count = 0;
3533 for(i = 0; i < nb_tbs; i++) {
3534 tb = &tbs[i];
3535 target_code_size += tb->size;
3536 if (tb->size > max_target_code_size)
3537 max_target_code_size = tb->size;
3538 if (tb->page_addr[1] != -1)
3539 cross_page++;
3540 if (tb->tb_next_offset[0] != 0xffff) {
3541 direct_jmp_count++;
3542 if (tb->tb_next_offset[1] != 0xffff) {
3543 direct_jmp2_count++;
3547 /* XXX: avoid using doubles ? */
3548 cpu_fprintf(f, "Translation buffer state:\n");
3549 cpu_fprintf(f, "gen code size %ld/%ld\n",
3550 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3551 cpu_fprintf(f, "TB count %d/%d\n",
3552 nb_tbs, code_gen_max_blocks);
3553 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3554 nb_tbs ? target_code_size / nb_tbs : 0,
3555 max_target_code_size);
3556 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3557 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3558 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3559 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3560 cross_page,
3561 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3562 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3563 direct_jmp_count,
3564 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3565 direct_jmp2_count,
3566 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3567 cpu_fprintf(f, "\nStatistics:\n");
3568 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3569 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3570 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3571 tcg_dump_info(f, cpu_fprintf);
3574 #if !defined(CONFIG_USER_ONLY)
3576 #define MMUSUFFIX _cmmu
3577 #define GETPC() NULL
3578 #define env cpu_single_env
3579 #define SOFTMMU_CODE_ACCESS
3581 #define SHIFT 0
3582 #include "softmmu_template.h"
3584 #define SHIFT 1
3585 #include "softmmu_template.h"
3587 #define SHIFT 2
3588 #include "softmmu_template.h"
3590 #define SHIFT 3
3591 #include "softmmu_template.h"
3593 #undef env
3595 #endif