1. fix the bug of sm502 pci configuration r/w
[qemu/qemu-loongson.git] / exec.c
blob720e5543ba435ed72dd066de8bceab7c8aaf7fe7
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #include "hw/hw.h"
41 #include "osdep.h"
42 #include "kvm.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #endif
47 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_FLUSH
49 //#define DEBUG_TLB
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
61 #undef DEBUG_TB_CHECK
62 #endif
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #else
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 #endif
84 static TranslationBlock *tbs;
85 int code_gen_max_blocks;
86 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87 static int nb_tbs;
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91 #if defined(__arm__) || defined(__sparc_v9__)
92 /* The prologue must be reachable with a direct jump. ARM and Sparc64
93 have limited branch ranges (possibly also PPC) so place it in a
94 section close to code segment. */
95 #define code_gen_section \
96 __attribute__((__section__(".gen_code"))) \
97 __attribute__((aligned (32)))
98 #else
99 #define code_gen_section \
100 __attribute__((aligned (32)))
101 #endif
103 uint8_t code_gen_prologue[1024] code_gen_section;
104 static uint8_t *code_gen_buffer;
105 static unsigned long code_gen_buffer_size;
106 /* threshold to flush the translated code buffer */
107 static unsigned long code_gen_buffer_max_size;
108 uint8_t *code_gen_ptr;
110 #if !defined(CONFIG_USER_ONLY)
111 ram_addr_t phys_ram_size;
112 int phys_ram_fd;
113 uint8_t *phys_ram_base;
114 uint8_t *phys_ram_dirty;
115 static int in_migration;
116 static ram_addr_t phys_ram_alloc_offset = 0;
117 #endif
119 CPUState *first_cpu;
120 /* current CPU in the current thread. It is only valid inside
121 cpu_exec() */
122 CPUState *cpu_single_env;
123 /* 0 = Do not count executed instructions.
124 1 = Precise instruction counting.
125 2 = Adaptive rate instruction counting. */
126 int use_icount = 0;
127 /* Current instruction counter. While executing translated code this may
128 include some instructions that have not yet been executed. */
129 int64_t qemu_icount;
131 typedef struct PageDesc {
132 /* list of TBs intersecting this ram page */
133 TranslationBlock *first_tb;
134 /* in order to optimize self modifying code, we count the number
135 of lookups we do to a given page to use a bitmap */
136 unsigned int code_write_count;
137 uint8_t *code_bitmap;
138 #if defined(CONFIG_USER_ONLY)
139 unsigned long flags;
140 #endif
141 } PageDesc;
143 typedef struct PhysPageDesc {
144 /* offset in host memory of the page + io_index in the low bits */
145 ram_addr_t phys_offset;
146 ram_addr_t region_offset;
147 } PhysPageDesc;
149 #define L2_BITS 10
150 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
151 /* XXX: this is a temporary hack for alpha target.
152 * In the future, this is to be replaced by a multi-level table
153 * to actually be able to handle the complete 64 bits address space.
155 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
156 #else
157 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
158 #endif
160 #define L1_SIZE (1 << L1_BITS)
161 #define L2_SIZE (1 << L2_BITS)
163 unsigned long qemu_real_host_page_size;
164 unsigned long qemu_host_page_bits;
165 unsigned long qemu_host_page_size;
166 unsigned long qemu_host_page_mask;
168 /* XXX: for system emulation, it could just be an array */
169 static PageDesc *l1_map[L1_SIZE];
170 static PhysPageDesc **l1_phys_map;
172 #if !defined(CONFIG_USER_ONLY)
173 static void io_mem_init(void);
175 /* io memory support */
176 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
177 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
178 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
179 static char io_mem_used[IO_MEM_NB_ENTRIES];
180 static int io_mem_watch;
181 #endif
183 /* log support */
184 static const char *logfilename = "/tmp/qemu.log";
185 FILE *logfile;
186 int loglevel;
187 static int log_append = 0;
189 /* statistics */
190 static int tlb_flush_count;
191 static int tb_flush_count;
192 static int tb_phys_invalidate_count;
194 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
195 typedef struct subpage_t {
196 target_phys_addr_t base;
197 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
198 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
199 void *opaque[TARGET_PAGE_SIZE][2][4];
200 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
201 } subpage_t;
203 #ifdef _WIN32
204 static void map_exec(void *addr, long size)
206 DWORD old_protect;
207 VirtualProtect(addr, size,
208 PAGE_EXECUTE_READWRITE, &old_protect);
211 #else
212 static void map_exec(void *addr, long size)
214 unsigned long start, end, page_size;
216 page_size = getpagesize();
217 start = (unsigned long)addr;
218 start &= ~(page_size - 1);
220 end = (unsigned long)addr + size;
221 end += page_size - 1;
222 end &= ~(page_size - 1);
224 mprotect((void *)start, end - start,
225 PROT_READ | PROT_WRITE | PROT_EXEC);
227 #endif
229 static void page_init(void)
231 /* NOTE: we can always suppose that qemu_host_page_size >=
232 TARGET_PAGE_SIZE */
233 #ifdef _WIN32
235 SYSTEM_INFO system_info;
237 GetSystemInfo(&system_info);
238 qemu_real_host_page_size = system_info.dwPageSize;
240 #else
241 qemu_real_host_page_size = getpagesize();
242 #endif
243 if (qemu_host_page_size == 0)
244 qemu_host_page_size = qemu_real_host_page_size;
245 if (qemu_host_page_size < TARGET_PAGE_SIZE)
246 qemu_host_page_size = TARGET_PAGE_SIZE;
247 qemu_host_page_bits = 0;
248 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
249 qemu_host_page_bits++;
250 qemu_host_page_mask = ~(qemu_host_page_size - 1);
251 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
252 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
254 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
256 long long startaddr, endaddr;
257 FILE *f;
258 int n;
260 mmap_lock();
261 last_brk = (unsigned long)sbrk(0);
262 f = fopen("/proc/self/maps", "r");
263 if (f) {
264 do {
265 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
266 if (n == 2) {
267 startaddr = MIN(startaddr,
268 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
269 endaddr = MIN(endaddr,
270 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
271 page_set_flags(startaddr & TARGET_PAGE_MASK,
272 TARGET_PAGE_ALIGN(endaddr),
273 PAGE_RESERVED);
275 } while (!feof(f));
276 fclose(f);
278 mmap_unlock();
280 #endif
283 static inline PageDesc **page_l1_map(target_ulong index)
285 #if TARGET_LONG_BITS > 32
286 /* Host memory outside guest VM. For 32-bit targets we have already
287 excluded high addresses. */
288 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
289 return NULL;
290 #endif
291 return &l1_map[index >> L2_BITS];
294 static inline PageDesc *page_find_alloc(target_ulong index)
296 PageDesc **lp, *p;
297 lp = page_l1_map(index);
298 if (!lp)
299 return NULL;
301 p = *lp;
302 if (!p) {
303 /* allocate if not found */
304 #if defined(CONFIG_USER_ONLY)
305 size_t len = sizeof(PageDesc) * L2_SIZE;
306 /* Don't use qemu_malloc because it may recurse. */
307 p = mmap(0, len, PROT_READ | PROT_WRITE,
308 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
309 *lp = p;
310 if (h2g_valid(p)) {
311 unsigned long addr = h2g(p);
312 page_set_flags(addr & TARGET_PAGE_MASK,
313 TARGET_PAGE_ALIGN(addr + len),
314 PAGE_RESERVED);
316 #else
317 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
318 *lp = p;
319 #endif
321 return p + (index & (L2_SIZE - 1));
324 static inline PageDesc *page_find(target_ulong index)
326 PageDesc **lp, *p;
327 lp = page_l1_map(index);
328 if (!lp)
329 return NULL;
331 p = *lp;
332 if (!p)
333 return 0;
334 return p + (index & (L2_SIZE - 1));
337 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
339 void **lp, **p;
340 PhysPageDesc *pd;
342 p = (void **)l1_phys_map;
343 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
345 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
346 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
347 #endif
348 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
349 p = *lp;
350 if (!p) {
351 /* allocate if not found */
352 if (!alloc)
353 return NULL;
354 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
355 memset(p, 0, sizeof(void *) * L1_SIZE);
356 *lp = p;
358 #endif
359 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
360 pd = *lp;
361 if (!pd) {
362 int i;
363 /* allocate if not found */
364 if (!alloc)
365 return NULL;
366 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
367 *lp = pd;
368 for (i = 0; i < L2_SIZE; i++) {
369 pd[i].phys_offset = IO_MEM_UNASSIGNED;
370 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
373 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
376 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
378 return phys_page_find_alloc(index, 0);
381 #if !defined(CONFIG_USER_ONLY)
382 static void tlb_protect_code(ram_addr_t ram_addr);
383 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
384 target_ulong vaddr);
385 #define mmap_lock() do { } while(0)
386 #define mmap_unlock() do { } while(0)
387 #endif
389 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
391 #if defined(CONFIG_USER_ONLY)
392 /* Currently it is not recommanded to allocate big chunks of data in
393 user mode. It will change when a dedicated libc will be used */
394 #define USE_STATIC_CODE_GEN_BUFFER
395 #endif
397 #ifdef USE_STATIC_CODE_GEN_BUFFER
398 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
399 #endif
401 static void code_gen_alloc(unsigned long tb_size)
403 #ifdef USE_STATIC_CODE_GEN_BUFFER
404 code_gen_buffer = static_code_gen_buffer;
405 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406 map_exec(code_gen_buffer, code_gen_buffer_size);
407 #else
408 code_gen_buffer_size = tb_size;
409 if (code_gen_buffer_size == 0) {
410 #if defined(CONFIG_USER_ONLY)
411 /* in user mode, phys_ram_size is not meaningful */
412 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
413 #else
414 /* XXX: needs ajustments */
415 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
416 #endif
418 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420 /* The code gen buffer location may have constraints depending on
421 the host cpu and OS */
422 #if defined(__linux__)
424 int flags;
425 void *start = NULL;
427 flags = MAP_PRIVATE | MAP_ANONYMOUS;
428 #if defined(__x86_64__)
429 flags |= MAP_32BIT;
430 /* Cannot map more than that */
431 if (code_gen_buffer_size > (800 * 1024 * 1024))
432 code_gen_buffer_size = (800 * 1024 * 1024);
433 #elif defined(__sparc_v9__)
434 // Map the buffer below 2G, so we can use direct calls and branches
435 flags |= MAP_FIXED;
436 start = (void *) 0x60000000UL;
437 if (code_gen_buffer_size > (512 * 1024 * 1024))
438 code_gen_buffer_size = (512 * 1024 * 1024);
439 #elif defined(__arm__)
440 /* Map the buffer below 32M, so we can use direct calls and branches */
441 flags |= MAP_FIXED;
442 start = (void *) 0x01000000UL;
443 if (code_gen_buffer_size > 16 * 1024 * 1024)
444 code_gen_buffer_size = 16 * 1024 * 1024;
445 #endif
446 code_gen_buffer = mmap(start, code_gen_buffer_size,
447 PROT_WRITE | PROT_READ | PROT_EXEC,
448 flags, -1, 0);
449 if (code_gen_buffer == MAP_FAILED) {
450 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
451 exit(1);
454 #elif defined(__FreeBSD__) || defined(__DragonFly__)
456 int flags;
457 void *addr = NULL;
458 flags = MAP_PRIVATE | MAP_ANONYMOUS;
459 #if defined(__x86_64__)
460 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
461 * 0x40000000 is free */
462 flags |= MAP_FIXED;
463 addr = (void *)0x40000000;
464 /* Cannot map more than that */
465 if (code_gen_buffer_size > (800 * 1024 * 1024))
466 code_gen_buffer_size = (800 * 1024 * 1024);
467 #endif
468 code_gen_buffer = mmap(addr, code_gen_buffer_size,
469 PROT_WRITE | PROT_READ | PROT_EXEC,
470 flags, -1, 0);
471 if (code_gen_buffer == MAP_FAILED) {
472 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
473 exit(1);
476 #else
477 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
478 map_exec(code_gen_buffer, code_gen_buffer_size);
479 #endif
480 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
481 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
482 code_gen_buffer_max_size = code_gen_buffer_size -
483 code_gen_max_block_size();
484 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
485 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
488 /* Must be called before using the QEMU cpus. 'tb_size' is the size
489 (in bytes) allocated to the translation buffer. Zero means default
490 size. */
491 void cpu_exec_init_all(unsigned long tb_size)
493 cpu_gen_init();
494 code_gen_alloc(tb_size);
495 code_gen_ptr = code_gen_buffer;
496 page_init();
497 #if !defined(CONFIG_USER_ONLY)
498 io_mem_init();
499 #endif
502 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
504 #define CPU_COMMON_SAVE_VERSION 1
506 static void cpu_common_save(QEMUFile *f, void *opaque)
508 CPUState *env = opaque;
510 qemu_put_be32s(f, &env->halted);
511 qemu_put_be32s(f, &env->interrupt_request);
514 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
516 CPUState *env = opaque;
518 if (version_id != CPU_COMMON_SAVE_VERSION)
519 return -EINVAL;
521 qemu_get_be32s(f, &env->halted);
522 qemu_get_be32s(f, &env->interrupt_request);
523 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
524 version_id is increased. */
525 env->interrupt_request &= ~0x01;
526 tlb_flush(env, 1);
528 return 0;
530 #endif
532 void cpu_exec_init(CPUState *env)
534 CPUState **penv;
535 int cpu_index;
537 #if defined(CONFIG_USER_ONLY)
538 cpu_list_lock();
539 #endif
540 env->next_cpu = NULL;
541 penv = &first_cpu;
542 cpu_index = 0;
543 while (*penv != NULL) {
544 penv = (CPUState **)&(*penv)->next_cpu;
545 cpu_index++;
547 env->cpu_index = cpu_index;
548 TAILQ_INIT(&env->breakpoints);
549 TAILQ_INIT(&env->watchpoints);
550 *penv = env;
551 #if defined(CONFIG_USER_ONLY)
552 cpu_list_unlock();
553 #endif
554 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
555 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
556 cpu_common_save, cpu_common_load, env);
557 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
558 cpu_save, cpu_load, env);
559 #endif
562 static inline void invalidate_page_bitmap(PageDesc *p)
564 if (p->code_bitmap) {
565 qemu_free(p->code_bitmap);
566 p->code_bitmap = NULL;
568 p->code_write_count = 0;
571 /* set to NULL all the 'first_tb' fields in all PageDescs */
572 static void page_flush_tb(void)
574 int i, j;
575 PageDesc *p;
577 for(i = 0; i < L1_SIZE; i++) {
578 p = l1_map[i];
579 if (p) {
580 for(j = 0; j < L2_SIZE; j++) {
581 p->first_tb = NULL;
582 invalidate_page_bitmap(p);
583 p++;
589 /* flush all the translation blocks */
590 /* XXX: tb_flush is currently not thread safe */
591 void tb_flush(CPUState *env1)
593 CPUState *env;
594 #if defined(DEBUG_FLUSH)
595 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
596 (unsigned long)(code_gen_ptr - code_gen_buffer),
597 nb_tbs, nb_tbs > 0 ?
598 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
599 #endif
600 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
601 cpu_abort(env1, "Internal error: code buffer overflow\n");
603 nb_tbs = 0;
605 for(env = first_cpu; env != NULL; env = env->next_cpu) {
606 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
609 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
610 page_flush_tb();
612 code_gen_ptr = code_gen_buffer;
613 /* XXX: flush processor icache at this point if cache flush is
614 expensive */
615 tb_flush_count++;
618 #ifdef DEBUG_TB_CHECK
620 static void tb_invalidate_check(target_ulong address)
622 TranslationBlock *tb;
623 int i;
624 address &= TARGET_PAGE_MASK;
625 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
626 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
627 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
628 address >= tb->pc + tb->size)) {
629 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
630 address, (long)tb->pc, tb->size);
636 /* verify that all the pages have correct rights for code */
637 static void tb_page_check(void)
639 TranslationBlock *tb;
640 int i, flags1, flags2;
642 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
643 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
644 flags1 = page_get_flags(tb->pc);
645 flags2 = page_get_flags(tb->pc + tb->size - 1);
646 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
647 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
648 (long)tb->pc, tb->size, flags1, flags2);
654 static void tb_jmp_check(TranslationBlock *tb)
656 TranslationBlock *tb1;
657 unsigned int n1;
659 /* suppress any remaining jumps to this TB */
660 tb1 = tb->jmp_first;
661 for(;;) {
662 n1 = (long)tb1 & 3;
663 tb1 = (TranslationBlock *)((long)tb1 & ~3);
664 if (n1 == 2)
665 break;
666 tb1 = tb1->jmp_next[n1];
668 /* check end of list */
669 if (tb1 != tb) {
670 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
674 #endif
676 /* invalidate one TB */
677 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
678 int next_offset)
680 TranslationBlock *tb1;
681 for(;;) {
682 tb1 = *ptb;
683 if (tb1 == tb) {
684 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
685 break;
687 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
691 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
693 TranslationBlock *tb1;
694 unsigned int n1;
696 for(;;) {
697 tb1 = *ptb;
698 n1 = (long)tb1 & 3;
699 tb1 = (TranslationBlock *)((long)tb1 & ~3);
700 if (tb1 == tb) {
701 *ptb = tb1->page_next[n1];
702 break;
704 ptb = &tb1->page_next[n1];
708 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
710 TranslationBlock *tb1, **ptb;
711 unsigned int n1;
713 ptb = &tb->jmp_next[n];
714 tb1 = *ptb;
715 if (tb1) {
716 /* find tb(n) in circular list */
717 for(;;) {
718 tb1 = *ptb;
719 n1 = (long)tb1 & 3;
720 tb1 = (TranslationBlock *)((long)tb1 & ~3);
721 if (n1 == n && tb1 == tb)
722 break;
723 if (n1 == 2) {
724 ptb = &tb1->jmp_first;
725 } else {
726 ptb = &tb1->jmp_next[n1];
729 /* now we can suppress tb(n) from the list */
730 *ptb = tb->jmp_next[n];
732 tb->jmp_next[n] = NULL;
736 /* reset the jump entry 'n' of a TB so that it is not chained to
737 another TB */
738 static inline void tb_reset_jump(TranslationBlock *tb, int n)
740 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
743 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
745 CPUState *env;
746 PageDesc *p;
747 unsigned int h, n1;
748 target_phys_addr_t phys_pc;
749 TranslationBlock *tb1, *tb2;
751 /* remove the TB from the hash list */
752 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
753 h = tb_phys_hash_func(phys_pc);
754 tb_remove(&tb_phys_hash[h], tb,
755 offsetof(TranslationBlock, phys_hash_next));
757 /* remove the TB from the page list */
758 if (tb->page_addr[0] != page_addr) {
759 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
760 tb_page_remove(&p->first_tb, tb);
761 invalidate_page_bitmap(p);
763 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
764 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
765 tb_page_remove(&p->first_tb, tb);
766 invalidate_page_bitmap(p);
769 tb_invalidated_flag = 1;
771 /* remove the TB from the hash list */
772 h = tb_jmp_cache_hash_func(tb->pc);
773 for(env = first_cpu; env != NULL; env = env->next_cpu) {
774 if (env->tb_jmp_cache[h] == tb)
775 env->tb_jmp_cache[h] = NULL;
778 /* suppress this TB from the two jump lists */
779 tb_jmp_remove(tb, 0);
780 tb_jmp_remove(tb, 1);
782 /* suppress any remaining jumps to this TB */
783 tb1 = tb->jmp_first;
784 for(;;) {
785 n1 = (long)tb1 & 3;
786 if (n1 == 2)
787 break;
788 tb1 = (TranslationBlock *)((long)tb1 & ~3);
789 tb2 = tb1->jmp_next[n1];
790 tb_reset_jump(tb1, n1);
791 tb1->jmp_next[n1] = NULL;
792 tb1 = tb2;
794 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
796 tb_phys_invalidate_count++;
799 static inline void set_bits(uint8_t *tab, int start, int len)
801 int end, mask, end1;
803 end = start + len;
804 tab += start >> 3;
805 mask = 0xff << (start & 7);
806 if ((start & ~7) == (end & ~7)) {
807 if (start < end) {
808 mask &= ~(0xff << (end & 7));
809 *tab |= mask;
811 } else {
812 *tab++ |= mask;
813 start = (start + 8) & ~7;
814 end1 = end & ~7;
815 while (start < end1) {
816 *tab++ = 0xff;
817 start += 8;
819 if (start < end) {
820 mask = ~(0xff << (end & 7));
821 *tab |= mask;
826 static void build_page_bitmap(PageDesc *p)
828 int n, tb_start, tb_end;
829 TranslationBlock *tb;
831 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
833 tb = p->first_tb;
834 while (tb != NULL) {
835 n = (long)tb & 3;
836 tb = (TranslationBlock *)((long)tb & ~3);
837 /* NOTE: this is subtle as a TB may span two physical pages */
838 if (n == 0) {
839 /* NOTE: tb_end may be after the end of the page, but
840 it is not a problem */
841 tb_start = tb->pc & ~TARGET_PAGE_MASK;
842 tb_end = tb_start + tb->size;
843 if (tb_end > TARGET_PAGE_SIZE)
844 tb_end = TARGET_PAGE_SIZE;
845 } else {
846 tb_start = 0;
847 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
849 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
850 tb = tb->page_next[n];
854 TranslationBlock *tb_gen_code(CPUState *env,
855 target_ulong pc, target_ulong cs_base,
856 int flags, int cflags)
858 TranslationBlock *tb;
859 uint8_t *tc_ptr;
860 target_ulong phys_pc, phys_page2, virt_page2;
861 int code_gen_size;
863 phys_pc = get_phys_addr_code(env, pc);
864 tb = tb_alloc(pc);
865 if (!tb) {
866 /* flush must be done */
867 tb_flush(env);
868 /* cannot fail at this point */
869 tb = tb_alloc(pc);
870 /* Don't forget to invalidate previous TB info. */
871 tb_invalidated_flag = 1;
873 tc_ptr = code_gen_ptr;
874 tb->tc_ptr = tc_ptr;
875 tb->cs_base = cs_base;
876 tb->flags = flags;
877 tb->cflags = cflags;
878 cpu_gen_code(env, tb, &code_gen_size);
879 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
881 /* check next page if needed */
882 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
883 phys_page2 = -1;
884 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
885 phys_page2 = get_phys_addr_code(env, virt_page2);
887 tb_link_phys(tb, phys_pc, phys_page2);
888 return tb;
891 /* invalidate all TBs which intersect with the target physical page
892 starting in range [start;end[. NOTE: start and end must refer to
893 the same physical page. 'is_cpu_write_access' should be true if called
894 from a real cpu write access: the virtual CPU will exit the current
895 TB if code is modified inside this TB. */
896 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
897 int is_cpu_write_access)
899 TranslationBlock *tb, *tb_next, *saved_tb;
900 CPUState *env = cpu_single_env;
901 target_ulong tb_start, tb_end;
902 PageDesc *p;
903 int n;
904 #ifdef TARGET_HAS_PRECISE_SMC
905 int current_tb_not_found = is_cpu_write_access;
906 TranslationBlock *current_tb = NULL;
907 int current_tb_modified = 0;
908 target_ulong current_pc = 0;
909 target_ulong current_cs_base = 0;
910 int current_flags = 0;
911 #endif /* TARGET_HAS_PRECISE_SMC */
913 p = page_find(start >> TARGET_PAGE_BITS);
914 if (!p)
915 return;
916 if (!p->code_bitmap &&
917 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
918 is_cpu_write_access) {
919 /* build code bitmap */
920 build_page_bitmap(p);
923 /* we remove all the TBs in the range [start, end[ */
924 /* XXX: see if in some cases it could be faster to invalidate all the code */
925 tb = p->first_tb;
926 while (tb != NULL) {
927 n = (long)tb & 3;
928 tb = (TranslationBlock *)((long)tb & ~3);
929 tb_next = tb->page_next[n];
930 /* NOTE: this is subtle as a TB may span two physical pages */
931 if (n == 0) {
932 /* NOTE: tb_end may be after the end of the page, but
933 it is not a problem */
934 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
935 tb_end = tb_start + tb->size;
936 } else {
937 tb_start = tb->page_addr[1];
938 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
940 if (!(tb_end <= start || tb_start >= end)) {
941 #ifdef TARGET_HAS_PRECISE_SMC
942 if (current_tb_not_found) {
943 current_tb_not_found = 0;
944 current_tb = NULL;
945 if (env->mem_io_pc) {
946 /* now we have a real cpu fault */
947 current_tb = tb_find_pc(env->mem_io_pc);
950 if (current_tb == tb &&
951 (current_tb->cflags & CF_COUNT_MASK) != 1) {
952 /* If we are modifying the current TB, we must stop
953 its execution. We could be more precise by checking
954 that the modification is after the current PC, but it
955 would require a specialized function to partially
956 restore the CPU state */
958 current_tb_modified = 1;
959 cpu_restore_state(current_tb, env,
960 env->mem_io_pc, NULL);
961 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
962 &current_flags);
964 #endif /* TARGET_HAS_PRECISE_SMC */
965 /* we need to do that to handle the case where a signal
966 occurs while doing tb_phys_invalidate() */
967 saved_tb = NULL;
968 if (env) {
969 saved_tb = env->current_tb;
970 env->current_tb = NULL;
972 tb_phys_invalidate(tb, -1);
973 if (env) {
974 env->current_tb = saved_tb;
975 if (env->interrupt_request && env->current_tb)
976 cpu_interrupt(env, env->interrupt_request);
979 tb = tb_next;
981 #if !defined(CONFIG_USER_ONLY)
982 /* if no code remaining, no need to continue to use slow writes */
983 if (!p->first_tb) {
984 invalidate_page_bitmap(p);
985 if (is_cpu_write_access) {
986 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
989 #endif
990 #ifdef TARGET_HAS_PRECISE_SMC
991 if (current_tb_modified) {
992 /* we generate a block containing just the instruction
993 modifying the memory. It will ensure that it cannot modify
994 itself */
995 env->current_tb = NULL;
996 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
997 cpu_resume_from_signal(env, NULL);
999 #endif
1002 /* len must be <= 8 and start must be a multiple of len */
1003 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1005 PageDesc *p;
1006 int offset, b;
1007 #if 0
1008 if (1) {
1009 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1010 cpu_single_env->mem_io_vaddr, len,
1011 cpu_single_env->eip,
1012 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1014 #endif
1015 p = page_find(start >> TARGET_PAGE_BITS);
1016 if (!p)
1017 return;
1018 if (p->code_bitmap) {
1019 offset = start & ~TARGET_PAGE_MASK;
1020 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1021 if (b & ((1 << len) - 1))
1022 goto do_invalidate;
1023 } else {
1024 do_invalidate:
1025 tb_invalidate_phys_page_range(start, start + len, 1);
1029 #if !defined(CONFIG_SOFTMMU)
1030 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1031 unsigned long pc, void *puc)
1033 TranslationBlock *tb;
1034 PageDesc *p;
1035 int n;
1036 #ifdef TARGET_HAS_PRECISE_SMC
1037 TranslationBlock *current_tb = NULL;
1038 CPUState *env = cpu_single_env;
1039 int current_tb_modified = 0;
1040 target_ulong current_pc = 0;
1041 target_ulong current_cs_base = 0;
1042 int current_flags = 0;
1043 #endif
1045 addr &= TARGET_PAGE_MASK;
1046 p = page_find(addr >> TARGET_PAGE_BITS);
1047 if (!p)
1048 return;
1049 tb = p->first_tb;
1050 #ifdef TARGET_HAS_PRECISE_SMC
1051 if (tb && pc != 0) {
1052 current_tb = tb_find_pc(pc);
1054 #endif
1055 while (tb != NULL) {
1056 n = (long)tb & 3;
1057 tb = (TranslationBlock *)((long)tb & ~3);
1058 #ifdef TARGET_HAS_PRECISE_SMC
1059 if (current_tb == tb &&
1060 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1061 /* If we are modifying the current TB, we must stop
1062 its execution. We could be more precise by checking
1063 that the modification is after the current PC, but it
1064 would require a specialized function to partially
1065 restore the CPU state */
1067 current_tb_modified = 1;
1068 cpu_restore_state(current_tb, env, pc, puc);
1069 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1070 &current_flags);
1072 #endif /* TARGET_HAS_PRECISE_SMC */
1073 tb_phys_invalidate(tb, addr);
1074 tb = tb->page_next[n];
1076 p->first_tb = NULL;
1077 #ifdef TARGET_HAS_PRECISE_SMC
1078 if (current_tb_modified) {
1079 /* we generate a block containing just the instruction
1080 modifying the memory. It will ensure that it cannot modify
1081 itself */
1082 env->current_tb = NULL;
1083 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1084 cpu_resume_from_signal(env, puc);
1086 #endif
1088 #endif
1090 /* add the tb in the target page and protect it if necessary */
1091 static inline void tb_alloc_page(TranslationBlock *tb,
1092 unsigned int n, target_ulong page_addr)
1094 PageDesc *p;
1095 TranslationBlock *last_first_tb;
1097 tb->page_addr[n] = page_addr;
1098 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1099 tb->page_next[n] = p->first_tb;
1100 last_first_tb = p->first_tb;
1101 p->first_tb = (TranslationBlock *)((long)tb | n);
1102 invalidate_page_bitmap(p);
1104 #if defined(TARGET_HAS_SMC) || 1
1106 #if defined(CONFIG_USER_ONLY)
1107 if (p->flags & PAGE_WRITE) {
1108 target_ulong addr;
1109 PageDesc *p2;
1110 int prot;
1112 /* force the host page as non writable (writes will have a
1113 page fault + mprotect overhead) */
1114 page_addr &= qemu_host_page_mask;
1115 prot = 0;
1116 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1117 addr += TARGET_PAGE_SIZE) {
1119 p2 = page_find (addr >> TARGET_PAGE_BITS);
1120 if (!p2)
1121 continue;
1122 prot |= p2->flags;
1123 p2->flags &= ~PAGE_WRITE;
1124 page_get_flags(addr);
1126 mprotect(g2h(page_addr), qemu_host_page_size,
1127 (prot & PAGE_BITS) & ~PAGE_WRITE);
1128 #ifdef DEBUG_TB_INVALIDATE
1129 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1130 page_addr);
1131 #endif
1133 #else
1134 /* if some code is already present, then the pages are already
1135 protected. So we handle the case where only the first TB is
1136 allocated in a physical page */
1137 if (!last_first_tb) {
1138 tlb_protect_code(page_addr);
1140 #endif
1142 #endif /* TARGET_HAS_SMC */
1145 /* Allocate a new translation block. Flush the translation buffer if
1146 too many translation blocks or too much generated code. */
1147 TranslationBlock *tb_alloc(target_ulong pc)
1149 TranslationBlock *tb;
1151 if (nb_tbs >= code_gen_max_blocks ||
1152 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1153 return NULL;
1154 tb = &tbs[nb_tbs++];
1155 tb->pc = pc;
1156 tb->cflags = 0;
1157 return tb;
1160 void tb_free(TranslationBlock *tb)
1162 /* In practice this is mostly used for single use temporary TB
1163 Ignore the hard cases and just back up if this TB happens to
1164 be the last one generated. */
1165 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1166 code_gen_ptr = tb->tc_ptr;
1167 nb_tbs--;
1171 /* add a new TB and link it to the physical page tables. phys_page2 is
1172 (-1) to indicate that only one page contains the TB. */
1173 void tb_link_phys(TranslationBlock *tb,
1174 target_ulong phys_pc, target_ulong phys_page2)
1176 unsigned int h;
1177 TranslationBlock **ptb;
1179 /* Grab the mmap lock to stop another thread invalidating this TB
1180 before we are done. */
1181 mmap_lock();
1182 /* add in the physical hash table */
1183 h = tb_phys_hash_func(phys_pc);
1184 ptb = &tb_phys_hash[h];
1185 tb->phys_hash_next = *ptb;
1186 *ptb = tb;
1188 /* add in the page list */
1189 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1190 if (phys_page2 != -1)
1191 tb_alloc_page(tb, 1, phys_page2);
1192 else
1193 tb->page_addr[1] = -1;
1195 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1196 tb->jmp_next[0] = NULL;
1197 tb->jmp_next[1] = NULL;
1199 /* init original jump addresses */
1200 if (tb->tb_next_offset[0] != 0xffff)
1201 tb_reset_jump(tb, 0);
1202 if (tb->tb_next_offset[1] != 0xffff)
1203 tb_reset_jump(tb, 1);
1205 #ifdef DEBUG_TB_CHECK
1206 tb_page_check();
1207 #endif
1208 mmap_unlock();
1211 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1212 tb[1].tc_ptr. Return NULL if not found */
1213 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1215 int m_min, m_max, m;
1216 unsigned long v;
1217 TranslationBlock *tb;
1219 if (nb_tbs <= 0)
1220 return NULL;
1221 if (tc_ptr < (unsigned long)code_gen_buffer ||
1222 tc_ptr >= (unsigned long)code_gen_ptr)
1223 return NULL;
1224 /* binary search (cf Knuth) */
1225 m_min = 0;
1226 m_max = nb_tbs - 1;
1227 while (m_min <= m_max) {
1228 m = (m_min + m_max) >> 1;
1229 tb = &tbs[m];
1230 v = (unsigned long)tb->tc_ptr;
1231 if (v == tc_ptr)
1232 return tb;
1233 else if (tc_ptr < v) {
1234 m_max = m - 1;
1235 } else {
1236 m_min = m + 1;
1239 return &tbs[m_max];
1242 static void tb_reset_jump_recursive(TranslationBlock *tb);
1244 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1246 TranslationBlock *tb1, *tb_next, **ptb;
1247 unsigned int n1;
1249 tb1 = tb->jmp_next[n];
1250 if (tb1 != NULL) {
1251 /* find head of list */
1252 for(;;) {
1253 n1 = (long)tb1 & 3;
1254 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1255 if (n1 == 2)
1256 break;
1257 tb1 = tb1->jmp_next[n1];
1259 /* we are now sure now that tb jumps to tb1 */
1260 tb_next = tb1;
1262 /* remove tb from the jmp_first list */
1263 ptb = &tb_next->jmp_first;
1264 for(;;) {
1265 tb1 = *ptb;
1266 n1 = (long)tb1 & 3;
1267 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1268 if (n1 == n && tb1 == tb)
1269 break;
1270 ptb = &tb1->jmp_next[n1];
1272 *ptb = tb->jmp_next[n];
1273 tb->jmp_next[n] = NULL;
1275 /* suppress the jump to next tb in generated code */
1276 tb_reset_jump(tb, n);
1278 /* suppress jumps in the tb on which we could have jumped */
1279 tb_reset_jump_recursive(tb_next);
1283 static void tb_reset_jump_recursive(TranslationBlock *tb)
1285 tb_reset_jump_recursive2(tb, 0);
1286 tb_reset_jump_recursive2(tb, 1);
1289 #if defined(TARGET_HAS_ICE)
1290 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1292 target_phys_addr_t addr;
1293 target_ulong pd;
1294 ram_addr_t ram_addr;
1295 PhysPageDesc *p;
1297 addr = cpu_get_phys_page_debug(env, pc);
1298 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1299 if (!p) {
1300 pd = IO_MEM_UNASSIGNED;
1301 } else {
1302 pd = p->phys_offset;
1304 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1305 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1307 #endif
1309 /* Add a watchpoint. */
1310 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1311 int flags, CPUWatchpoint **watchpoint)
1313 target_ulong len_mask = ~(len - 1);
1314 CPUWatchpoint *wp;
1316 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1317 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1318 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1319 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1320 return -EINVAL;
1322 wp = qemu_malloc(sizeof(*wp));
1324 wp->vaddr = addr;
1325 wp->len_mask = len_mask;
1326 wp->flags = flags;
1328 /* keep all GDB-injected watchpoints in front */
1329 if (flags & BP_GDB)
1330 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1331 else
1332 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1334 tlb_flush_page(env, addr);
1336 if (watchpoint)
1337 *watchpoint = wp;
1338 return 0;
1341 /* Remove a specific watchpoint. */
1342 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1343 int flags)
1345 target_ulong len_mask = ~(len - 1);
1346 CPUWatchpoint *wp;
1348 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1349 if (addr == wp->vaddr && len_mask == wp->len_mask
1350 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1351 cpu_watchpoint_remove_by_ref(env, wp);
1352 return 0;
1355 return -ENOENT;
1358 /* Remove a specific watchpoint by reference. */
1359 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1361 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1363 tlb_flush_page(env, watchpoint->vaddr);
1365 qemu_free(watchpoint);
1368 /* Remove all matching watchpoints. */
1369 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1371 CPUWatchpoint *wp, *next;
1373 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1374 if (wp->flags & mask)
1375 cpu_watchpoint_remove_by_ref(env, wp);
1379 /* Add a breakpoint. */
1380 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1381 CPUBreakpoint **breakpoint)
1383 #if defined(TARGET_HAS_ICE)
1384 CPUBreakpoint *bp;
1386 bp = qemu_malloc(sizeof(*bp));
1388 bp->pc = pc;
1389 bp->flags = flags;
1391 /* keep all GDB-injected breakpoints in front */
1392 if (flags & BP_GDB)
1393 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1394 else
1395 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1397 breakpoint_invalidate(env, pc);
1399 if (breakpoint)
1400 *breakpoint = bp;
1401 return 0;
1402 #else
1403 return -ENOSYS;
1404 #endif
1407 /* Remove a specific breakpoint. */
1408 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1410 #if defined(TARGET_HAS_ICE)
1411 CPUBreakpoint *bp;
1413 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1414 if (bp->pc == pc && bp->flags == flags) {
1415 cpu_breakpoint_remove_by_ref(env, bp);
1416 return 0;
1419 return -ENOENT;
1420 #else
1421 return -ENOSYS;
1422 #endif
1425 /* Remove a specific breakpoint by reference. */
1426 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1428 #if defined(TARGET_HAS_ICE)
1429 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1431 breakpoint_invalidate(env, breakpoint->pc);
1433 qemu_free(breakpoint);
1434 #endif
1437 /* Remove all matching breakpoints. */
1438 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1440 #if defined(TARGET_HAS_ICE)
1441 CPUBreakpoint *bp, *next;
1443 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1444 if (bp->flags & mask)
1445 cpu_breakpoint_remove_by_ref(env, bp);
1447 #endif
1450 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1451 CPU loop after each instruction */
1452 void cpu_single_step(CPUState *env, int enabled)
1454 #if defined(TARGET_HAS_ICE)
1455 if (env->singlestep_enabled != enabled) {
1456 env->singlestep_enabled = enabled;
1457 if (kvm_enabled())
1458 kvm_update_guest_debug(env, 0);
1459 else {
1460 /* must flush all the translated code to avoid inconsistancies */
1461 /* XXX: only flush what is necessary */
1462 tb_flush(env);
1465 #endif
1468 /* enable or disable low levels log */
1469 void cpu_set_log(int log_flags)
1471 loglevel = log_flags;
1472 if (loglevel && !logfile) {
1473 logfile = fopen(logfilename, log_append ? "a" : "w");
1474 if (!logfile) {
1475 perror(logfilename);
1476 _exit(1);
1478 #if !defined(CONFIG_SOFTMMU)
1479 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1481 static char logfile_buf[4096];
1482 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1484 #else
1485 setvbuf(logfile, NULL, _IOLBF, 0);
1486 #endif
1487 log_append = 1;
1489 if (!loglevel && logfile) {
1490 fclose(logfile);
1491 logfile = NULL;
1495 void cpu_set_log_filename(const char *filename)
1497 logfilename = strdup(filename);
1498 if (logfile) {
1499 fclose(logfile);
1500 logfile = NULL;
1502 cpu_set_log(loglevel);
1505 static void cpu_unlink_tb(CPUState *env)
1507 #if defined(USE_NPTL)
1508 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1509 problem and hope the cpu will stop of its own accord. For userspace
1510 emulation this often isn't actually as bad as it sounds. Often
1511 signals are used primarily to interrupt blocking syscalls. */
1512 #else
1513 TranslationBlock *tb;
1514 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1516 tb = env->current_tb;
1517 /* if the cpu is currently executing code, we must unlink it and
1518 all the potentially executing TB */
1519 if (tb && !testandset(&interrupt_lock)) {
1520 env->current_tb = NULL;
1521 tb_reset_jump_recursive(tb);
1522 resetlock(&interrupt_lock);
1524 #endif
1527 /* mask must never be zero, except for A20 change call */
1528 void cpu_interrupt(CPUState *env, int mask)
1530 int old_mask;
1532 old_mask = env->interrupt_request;
1533 env->interrupt_request |= mask;
1535 if (use_icount) {
1536 env->icount_decr.u16.high = 0xffff;
1537 #ifndef CONFIG_USER_ONLY
1538 if (!can_do_io(env)
1539 && (mask & ~old_mask) != 0) {
1540 cpu_abort(env, "Raised interrupt while not in I/O function");
1542 #endif
1543 } else {
1544 cpu_unlink_tb(env);
1548 void cpu_reset_interrupt(CPUState *env, int mask)
1550 env->interrupt_request &= ~mask;
1553 void cpu_exit(CPUState *env)
1555 env->exit_request = 1;
1556 cpu_unlink_tb(env);
1559 const CPULogItem cpu_log_items[] = {
1560 { CPU_LOG_TB_OUT_ASM, "out_asm",
1561 "show generated host assembly code for each compiled TB" },
1562 { CPU_LOG_TB_IN_ASM, "in_asm",
1563 "show target assembly code for each compiled TB" },
1564 { CPU_LOG_TB_OP, "op",
1565 "show micro ops for each compiled TB" },
1566 { CPU_LOG_TB_OP_OPT, "op_opt",
1567 "show micro ops "
1568 #ifdef TARGET_I386
1569 "before eflags optimization and "
1570 #endif
1571 "after liveness analysis" },
1572 { CPU_LOG_INT, "int",
1573 "show interrupts/exceptions in short format" },
1574 { CPU_LOG_EXEC, "exec",
1575 "show trace before each executed TB (lots of logs)" },
1576 { CPU_LOG_TB_CPU, "cpu",
1577 "show CPU state before block translation" },
1578 #ifdef TARGET_I386
1579 { CPU_LOG_PCALL, "pcall",
1580 "show protected mode far calls/returns/exceptions" },
1581 { CPU_LOG_RESET, "cpu_reset",
1582 "show CPU state before CPU resets" },
1583 #endif
1584 #ifdef DEBUG_IOPORT
1585 { CPU_LOG_IOPORT, "ioport",
1586 "show all i/o ports accesses" },
1587 #endif
1588 { 0, NULL, NULL },
1591 static int cmp1(const char *s1, int n, const char *s2)
1593 if (strlen(s2) != n)
1594 return 0;
1595 return memcmp(s1, s2, n) == 0;
1598 /* takes a comma separated list of log masks. Return 0 if error. */
1599 int cpu_str_to_log_mask(const char *str)
1601 const CPULogItem *item;
1602 int mask;
1603 const char *p, *p1;
1605 p = str;
1606 mask = 0;
1607 for(;;) {
1608 p1 = strchr(p, ',');
1609 if (!p1)
1610 p1 = p + strlen(p);
1611 if(cmp1(p,p1-p,"all")) {
1612 for(item = cpu_log_items; item->mask != 0; item++) {
1613 mask |= item->mask;
1615 } else {
1616 for(item = cpu_log_items; item->mask != 0; item++) {
1617 if (cmp1(p, p1 - p, item->name))
1618 goto found;
1620 return 0;
1622 found:
1623 mask |= item->mask;
1624 if (*p1 != ',')
1625 break;
1626 p = p1 + 1;
1628 return mask;
1631 void cpu_abort(CPUState *env, const char *fmt, ...)
1633 va_list ap;
1634 va_list ap2;
1636 va_start(ap, fmt);
1637 va_copy(ap2, ap);
1638 fprintf(stderr, "qemu: fatal: ");
1639 vfprintf(stderr, fmt, ap);
1640 fprintf(stderr, "\n");
1641 #ifdef TARGET_I386
1642 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1643 #else
1644 cpu_dump_state(env, stderr, fprintf, 0);
1645 #endif
1646 if (qemu_log_enabled()) {
1647 qemu_log("qemu: fatal: ");
1648 qemu_log_vprintf(fmt, ap2);
1649 qemu_log("\n");
1650 #ifdef TARGET_I386
1651 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1652 #else
1653 log_cpu_state(env, 0);
1654 #endif
1655 qemu_log_flush();
1656 qemu_log_close();
1658 va_end(ap2);
1659 va_end(ap);
1660 abort();
1663 CPUState *cpu_copy(CPUState *env)
1665 CPUState *new_env = cpu_init(env->cpu_model_str);
1666 CPUState *next_cpu = new_env->next_cpu;
1667 int cpu_index = new_env->cpu_index;
1668 #if defined(TARGET_HAS_ICE)
1669 CPUBreakpoint *bp;
1670 CPUWatchpoint *wp;
1671 #endif
1673 memcpy(new_env, env, sizeof(CPUState));
1675 /* Preserve chaining and index. */
1676 new_env->next_cpu = next_cpu;
1677 new_env->cpu_index = cpu_index;
1679 /* Clone all break/watchpoints.
1680 Note: Once we support ptrace with hw-debug register access, make sure
1681 BP_CPU break/watchpoints are handled correctly on clone. */
1682 TAILQ_INIT(&env->breakpoints);
1683 TAILQ_INIT(&env->watchpoints);
1684 #if defined(TARGET_HAS_ICE)
1685 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1686 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1688 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1689 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1690 wp->flags, NULL);
1692 #endif
1694 return new_env;
1697 #if !defined(CONFIG_USER_ONLY)
1699 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1701 unsigned int i;
1703 /* Discard jump cache entries for any tb which might potentially
1704 overlap the flushed page. */
1705 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1706 memset (&env->tb_jmp_cache[i], 0,
1707 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1709 i = tb_jmp_cache_hash_page(addr);
1710 memset (&env->tb_jmp_cache[i], 0,
1711 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1714 /* NOTE: if flush_global is true, also flush global entries (not
1715 implemented yet) */
1716 void tlb_flush(CPUState *env, int flush_global)
1718 int i;
1720 #if defined(DEBUG_TLB)
1721 printf("tlb_flush:\n");
1722 #endif
1723 /* must reset current TB so that interrupts cannot modify the
1724 links while we are modifying them */
1725 env->current_tb = NULL;
1727 for(i = 0; i < CPU_TLB_SIZE; i++) {
1728 env->tlb_table[0][i].addr_read = -1;
1729 env->tlb_table[0][i].addr_write = -1;
1730 env->tlb_table[0][i].addr_code = -1;
1731 env->tlb_table[1][i].addr_read = -1;
1732 env->tlb_table[1][i].addr_write = -1;
1733 env->tlb_table[1][i].addr_code = -1;
1734 #if (NB_MMU_MODES >= 3)
1735 env->tlb_table[2][i].addr_read = -1;
1736 env->tlb_table[2][i].addr_write = -1;
1737 env->tlb_table[2][i].addr_code = -1;
1738 #if (NB_MMU_MODES == 4)
1739 env->tlb_table[3][i].addr_read = -1;
1740 env->tlb_table[3][i].addr_write = -1;
1741 env->tlb_table[3][i].addr_code = -1;
1742 #endif
1743 #endif
1746 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1748 #ifdef USE_KQEMU
1749 if (env->kqemu_enabled) {
1750 kqemu_flush(env, flush_global);
1752 #endif
1753 tlb_flush_count++;
1756 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1758 if (addr == (tlb_entry->addr_read &
1759 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1760 addr == (tlb_entry->addr_write &
1761 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1762 addr == (tlb_entry->addr_code &
1763 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1764 tlb_entry->addr_read = -1;
1765 tlb_entry->addr_write = -1;
1766 tlb_entry->addr_code = -1;
1770 void tlb_flush_page(CPUState *env, target_ulong addr)
1772 int i;
1774 #if defined(DEBUG_TLB)
1775 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1776 #endif
1777 /* must reset current TB so that interrupts cannot modify the
1778 links while we are modifying them */
1779 env->current_tb = NULL;
1781 addr &= TARGET_PAGE_MASK;
1782 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1783 tlb_flush_entry(&env->tlb_table[0][i], addr);
1784 tlb_flush_entry(&env->tlb_table[1][i], addr);
1785 #if (NB_MMU_MODES >= 3)
1786 tlb_flush_entry(&env->tlb_table[2][i], addr);
1787 #if (NB_MMU_MODES == 4)
1788 tlb_flush_entry(&env->tlb_table[3][i], addr);
1789 #endif
1790 #endif
1792 tlb_flush_jmp_cache(env, addr);
1794 #ifdef USE_KQEMU
1795 if (env->kqemu_enabled) {
1796 kqemu_flush_page(env, addr);
1798 #endif
1801 /* update the TLBs so that writes to code in the virtual page 'addr'
1802 can be detected */
1803 static void tlb_protect_code(ram_addr_t ram_addr)
1805 cpu_physical_memory_reset_dirty(ram_addr,
1806 ram_addr + TARGET_PAGE_SIZE,
1807 CODE_DIRTY_FLAG);
1810 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1811 tested for self modifying code */
1812 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1813 target_ulong vaddr)
1815 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1818 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1819 unsigned long start, unsigned long length)
1821 unsigned long addr;
1822 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1823 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1824 if ((addr - start) < length) {
1825 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1830 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1831 int dirty_flags)
1833 CPUState *env;
1834 unsigned long length, start1;
1835 int i, mask, len;
1836 uint8_t *p;
1838 start &= TARGET_PAGE_MASK;
1839 end = TARGET_PAGE_ALIGN(end);
1841 length = end - start;
1842 if (length == 0)
1843 return;
1844 len = length >> TARGET_PAGE_BITS;
1845 #ifdef USE_KQEMU
1846 /* XXX: should not depend on cpu context */
1847 env = first_cpu;
1848 if (env->kqemu_enabled) {
1849 ram_addr_t addr;
1850 addr = start;
1851 for(i = 0; i < len; i++) {
1852 kqemu_set_notdirty(env, addr);
1853 addr += TARGET_PAGE_SIZE;
1856 #endif
1857 mask = ~dirty_flags;
1858 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1859 for(i = 0; i < len; i++)
1860 p[i] &= mask;
1862 /* we modify the TLB cache so that the dirty bit will be set again
1863 when accessing the range */
1864 start1 = start + (unsigned long)phys_ram_base;
1865 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1866 for(i = 0; i < CPU_TLB_SIZE; i++)
1867 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1868 for(i = 0; i < CPU_TLB_SIZE; i++)
1869 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1870 #if (NB_MMU_MODES >= 3)
1871 for(i = 0; i < CPU_TLB_SIZE; i++)
1872 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1873 #if (NB_MMU_MODES == 4)
1874 for(i = 0; i < CPU_TLB_SIZE; i++)
1875 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1876 #endif
1877 #endif
1881 int cpu_physical_memory_set_dirty_tracking(int enable)
1883 in_migration = enable;
1884 return 0;
1887 int cpu_physical_memory_get_dirty_tracking(void)
1889 return in_migration;
1892 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1894 if (kvm_enabled())
1895 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1898 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1900 ram_addr_t ram_addr;
1902 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1903 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1904 tlb_entry->addend - (unsigned long)phys_ram_base;
1905 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1906 tlb_entry->addr_write |= TLB_NOTDIRTY;
1911 /* update the TLB according to the current state of the dirty bits */
1912 void cpu_tlb_update_dirty(CPUState *env)
1914 int i;
1915 for(i = 0; i < CPU_TLB_SIZE; i++)
1916 tlb_update_dirty(&env->tlb_table[0][i]);
1917 for(i = 0; i < CPU_TLB_SIZE; i++)
1918 tlb_update_dirty(&env->tlb_table[1][i]);
1919 #if (NB_MMU_MODES >= 3)
1920 for(i = 0; i < CPU_TLB_SIZE; i++)
1921 tlb_update_dirty(&env->tlb_table[2][i]);
1922 #if (NB_MMU_MODES == 4)
1923 for(i = 0; i < CPU_TLB_SIZE; i++)
1924 tlb_update_dirty(&env->tlb_table[3][i]);
1925 #endif
1926 #endif
1929 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1931 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1932 tlb_entry->addr_write = vaddr;
1935 /* update the TLB corresponding to virtual page vaddr
1936 so that it is no longer dirty */
1937 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1939 int i;
1941 vaddr &= TARGET_PAGE_MASK;
1942 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1943 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1944 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1945 #if (NB_MMU_MODES >= 3)
1946 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1947 #if (NB_MMU_MODES == 4)
1948 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1949 #endif
1950 #endif
1953 /* add a new TLB entry. At most one entry for a given virtual address
1954 is permitted. Return 0 if OK or 2 if the page could not be mapped
1955 (can only happen in non SOFTMMU mode for I/O pages or pages
1956 conflicting with the host address space). */
1957 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1958 target_phys_addr_t paddr, int prot,
1959 int mmu_idx, int is_softmmu)
1961 PhysPageDesc *p;
1962 unsigned long pd;
1963 unsigned int index;
1964 target_ulong address;
1965 target_ulong code_address;
1966 target_phys_addr_t addend;
1967 int ret;
1968 CPUTLBEntry *te;
1969 CPUWatchpoint *wp;
1970 target_phys_addr_t iotlb;
1972 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1973 if (!p) {
1974 pd = IO_MEM_UNASSIGNED;
1975 } else {
1976 pd = p->phys_offset;
1978 #if defined(DEBUG_TLB)
1979 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1980 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1981 #endif
1983 ret = 0;
1984 address = vaddr;
1985 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1986 /* IO memory case (romd handled later) */
1987 address |= TLB_MMIO;
1989 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1990 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1991 /* Normal RAM. */
1992 iotlb = pd & TARGET_PAGE_MASK;
1993 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1994 iotlb |= IO_MEM_NOTDIRTY;
1995 else
1996 iotlb |= IO_MEM_ROM;
1997 } else {
1998 /* IO handlers are currently passed a phsical address.
1999 It would be nice to pass an offset from the base address
2000 of that region. This would avoid having to special case RAM,
2001 and avoid full address decoding in every device.
2002 We can't use the high bits of pd for this because
2003 IO_MEM_ROMD uses these as a ram address. */
2004 iotlb = (pd & ~TARGET_PAGE_MASK);
2005 if (p) {
2006 iotlb += p->region_offset;
2007 } else {
2008 iotlb += paddr;
2012 code_address = address;
2013 /* Make accesses to pages with watchpoints go via the
2014 watchpoint trap routines. */
2015 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2016 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2017 iotlb = io_mem_watch + paddr;
2018 /* TODO: The memory case can be optimized by not trapping
2019 reads of pages with a write breakpoint. */
2020 address |= TLB_MMIO;
2024 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2025 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2026 te = &env->tlb_table[mmu_idx][index];
2027 te->addend = addend - vaddr;
2028 if (prot & PAGE_READ) {
2029 te->addr_read = address;
2030 } else {
2031 te->addr_read = -1;
2034 if (prot & PAGE_EXEC) {
2035 te->addr_code = code_address;
2036 } else {
2037 te->addr_code = -1;
2039 if (prot & PAGE_WRITE) {
2040 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2041 (pd & IO_MEM_ROMD)) {
2042 /* Write access calls the I/O callback. */
2043 te->addr_write = address | TLB_MMIO;
2044 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2045 !cpu_physical_memory_is_dirty(pd)) {
2046 te->addr_write = address | TLB_NOTDIRTY;
2047 } else {
2048 te->addr_write = address;
2050 } else {
2051 te->addr_write = -1;
2053 return ret;
2056 #else
2058 void tlb_flush(CPUState *env, int flush_global)
2062 void tlb_flush_page(CPUState *env, target_ulong addr)
2066 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2067 target_phys_addr_t paddr, int prot,
2068 int mmu_idx, int is_softmmu)
2070 return 0;
2073 /* dump memory mappings */
2074 void page_dump(FILE *f)
2076 unsigned long start, end;
2077 int i, j, prot, prot1;
2078 PageDesc *p;
2080 fprintf(f, "%-8s %-8s %-8s %s\n",
2081 "start", "end", "size", "prot");
2082 start = -1;
2083 end = -1;
2084 prot = 0;
2085 for(i = 0; i <= L1_SIZE; i++) {
2086 if (i < L1_SIZE)
2087 p = l1_map[i];
2088 else
2089 p = NULL;
2090 for(j = 0;j < L2_SIZE; j++) {
2091 if (!p)
2092 prot1 = 0;
2093 else
2094 prot1 = p[j].flags;
2095 if (prot1 != prot) {
2096 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2097 if (start != -1) {
2098 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2099 start, end, end - start,
2100 prot & PAGE_READ ? 'r' : '-',
2101 prot & PAGE_WRITE ? 'w' : '-',
2102 prot & PAGE_EXEC ? 'x' : '-');
2104 if (prot1 != 0)
2105 start = end;
2106 else
2107 start = -1;
2108 prot = prot1;
2110 if (!p)
2111 break;
2116 int page_get_flags(target_ulong address)
2118 PageDesc *p;
2120 p = page_find(address >> TARGET_PAGE_BITS);
2121 if (!p)
2122 return 0;
2123 return p->flags;
2126 /* modify the flags of a page and invalidate the code if
2127 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2128 depending on PAGE_WRITE */
2129 void page_set_flags(target_ulong start, target_ulong end, int flags)
2131 PageDesc *p;
2132 target_ulong addr;
2134 /* mmap_lock should already be held. */
2135 start = start & TARGET_PAGE_MASK;
2136 end = TARGET_PAGE_ALIGN(end);
2137 if (flags & PAGE_WRITE)
2138 flags |= PAGE_WRITE_ORG;
2139 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2140 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2141 /* We may be called for host regions that are outside guest
2142 address space. */
2143 if (!p)
2144 return;
2145 /* if the write protection is set, then we invalidate the code
2146 inside */
2147 if (!(p->flags & PAGE_WRITE) &&
2148 (flags & PAGE_WRITE) &&
2149 p->first_tb) {
2150 tb_invalidate_phys_page(addr, 0, NULL);
2152 p->flags = flags;
2156 int page_check_range(target_ulong start, target_ulong len, int flags)
2158 PageDesc *p;
2159 target_ulong end;
2160 target_ulong addr;
2162 if (start + len < start)
2163 /* we've wrapped around */
2164 return -1;
2166 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2167 start = start & TARGET_PAGE_MASK;
2169 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2170 p = page_find(addr >> TARGET_PAGE_BITS);
2171 if( !p )
2172 return -1;
2173 if( !(p->flags & PAGE_VALID) )
2174 return -1;
2176 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2177 return -1;
2178 if (flags & PAGE_WRITE) {
2179 if (!(p->flags & PAGE_WRITE_ORG))
2180 return -1;
2181 /* unprotect the page if it was put read-only because it
2182 contains translated code */
2183 if (!(p->flags & PAGE_WRITE)) {
2184 if (!page_unprotect(addr, 0, NULL))
2185 return -1;
2187 return 0;
2190 return 0;
2193 /* called from signal handler: invalidate the code and unprotect the
2194 page. Return TRUE if the fault was succesfully handled. */
2195 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2197 unsigned int page_index, prot, pindex;
2198 PageDesc *p, *p1;
2199 target_ulong host_start, host_end, addr;
2201 /* Technically this isn't safe inside a signal handler. However we
2202 know this only ever happens in a synchronous SEGV handler, so in
2203 practice it seems to be ok. */
2204 mmap_lock();
2206 host_start = address & qemu_host_page_mask;
2207 page_index = host_start >> TARGET_PAGE_BITS;
2208 p1 = page_find(page_index);
2209 if (!p1) {
2210 mmap_unlock();
2211 return 0;
2213 host_end = host_start + qemu_host_page_size;
2214 p = p1;
2215 prot = 0;
2216 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2217 prot |= p->flags;
2218 p++;
2220 /* if the page was really writable, then we change its
2221 protection back to writable */
2222 if (prot & PAGE_WRITE_ORG) {
2223 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2224 if (!(p1[pindex].flags & PAGE_WRITE)) {
2225 mprotect((void *)g2h(host_start), qemu_host_page_size,
2226 (prot & PAGE_BITS) | PAGE_WRITE);
2227 p1[pindex].flags |= PAGE_WRITE;
2228 /* and since the content will be modified, we must invalidate
2229 the corresponding translated code. */
2230 tb_invalidate_phys_page(address, pc, puc);
2231 #ifdef DEBUG_TB_CHECK
2232 tb_invalidate_check(address);
2233 #endif
2234 mmap_unlock();
2235 return 1;
2238 mmap_unlock();
2239 return 0;
2242 static inline void tlb_set_dirty(CPUState *env,
2243 unsigned long addr, target_ulong vaddr)
2246 #endif /* defined(CONFIG_USER_ONLY) */
2248 #if !defined(CONFIG_USER_ONLY)
2250 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2251 ram_addr_t memory, ram_addr_t region_offset);
2252 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2253 ram_addr_t orig_memory, ram_addr_t region_offset);
2254 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2255 need_subpage) \
2256 do { \
2257 if (addr > start_addr) \
2258 start_addr2 = 0; \
2259 else { \
2260 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2261 if (start_addr2 > 0) \
2262 need_subpage = 1; \
2265 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2266 end_addr2 = TARGET_PAGE_SIZE - 1; \
2267 else { \
2268 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2269 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2270 need_subpage = 1; \
2272 } while (0)
2274 /* register physical memory. 'size' must be a multiple of the target
2275 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2276 io memory page. The address used when calling the IO function is
2277 the offset from the start of the region, plus region_offset. Both
2278 start_region and regon_offset are rounded down to a page boundary
2279 before calculating this offset. This should not be a problem unless
2280 the low bits of start_addr and region_offset differ. */
2281 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2282 ram_addr_t size,
2283 ram_addr_t phys_offset,
2284 ram_addr_t region_offset)
2286 target_phys_addr_t addr, end_addr;
2287 PhysPageDesc *p;
2288 CPUState *env;
2289 ram_addr_t orig_size = size;
2290 void *subpage;
2292 #ifdef USE_KQEMU
2293 /* XXX: should not depend on cpu context */
2294 env = first_cpu;
2295 if (env->kqemu_enabled) {
2296 kqemu_set_phys_mem(start_addr, size, phys_offset);
2298 #endif
2299 if (kvm_enabled())
2300 kvm_set_phys_mem(start_addr, size, phys_offset);
2302 if (phys_offset == IO_MEM_UNASSIGNED) {
2303 region_offset = start_addr;
2305 region_offset &= TARGET_PAGE_MASK;
2306 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2307 end_addr = start_addr + (target_phys_addr_t)size;
2308 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2309 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2310 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2311 ram_addr_t orig_memory = p->phys_offset;
2312 target_phys_addr_t start_addr2, end_addr2;
2313 int need_subpage = 0;
2315 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2316 need_subpage);
2317 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2318 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2319 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2320 &p->phys_offset, orig_memory,
2321 p->region_offset);
2322 } else {
2323 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2324 >> IO_MEM_SHIFT];
2326 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2327 region_offset);
2328 p->region_offset = 0;
2329 } else {
2330 p->phys_offset = phys_offset;
2331 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2332 (phys_offset & IO_MEM_ROMD))
2333 phys_offset += TARGET_PAGE_SIZE;
2335 } else {
2336 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2337 p->phys_offset = phys_offset;
2338 p->region_offset = region_offset;
2339 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2340 (phys_offset & IO_MEM_ROMD)) {
2341 phys_offset += TARGET_PAGE_SIZE;
2342 } else {
2343 target_phys_addr_t start_addr2, end_addr2;
2344 int need_subpage = 0;
2346 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2347 end_addr2, need_subpage);
2349 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2350 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2351 &p->phys_offset, IO_MEM_UNASSIGNED,
2352 addr & TARGET_PAGE_MASK);
2353 subpage_register(subpage, start_addr2, end_addr2,
2354 phys_offset, region_offset);
2355 p->region_offset = 0;
2359 region_offset += TARGET_PAGE_SIZE;
2362 /* since each CPU stores ram addresses in its TLB cache, we must
2363 reset the modified entries */
2364 /* XXX: slow ! */
2365 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2366 tlb_flush(env, 1);
2370 /* XXX: temporary until new memory mapping API */
2371 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2373 PhysPageDesc *p;
2375 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2376 if (!p)
2377 return IO_MEM_UNASSIGNED;
2378 return p->phys_offset;
2381 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2383 if (kvm_enabled())
2384 kvm_coalesce_mmio_region(addr, size);
2387 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2389 if (kvm_enabled())
2390 kvm_uncoalesce_mmio_region(addr, size);
2393 /* XXX: better than nothing */
2394 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2396 ram_addr_t addr;
2397 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2398 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2399 (uint64_t)size, (uint64_t)phys_ram_size);
2400 abort();
2402 addr = phys_ram_alloc_offset;
2403 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2404 return addr;
2407 void qemu_ram_free(ram_addr_t addr)
2411 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2413 #ifdef DEBUG_UNASSIGNED
2414 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2415 #endif
2416 printf("unassigned_mem_readb Unassigned mem read " TARGET_FMT_plx "\n", addr);
2417 exit(-1);
2418 #if defined(TARGET_SPARC)
2419 do_unassigned_access(addr, 0, 0, 0, 1);
2420 #endif
2421 return 0;
2424 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2426 #ifdef DEBUG_UNASSIGNED
2427 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2428 #endif
2429 printf("unassigned_mem_readw Unassigned mem read " TARGET_FMT_plx "\n", addr);
2430 exit(-1);
2431 #if defined(TARGET_SPARC)
2432 do_unassigned_access(addr, 0, 0, 0, 2);
2433 #endif
2434 return 0;
2437 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2439 #ifdef DEBUG_UNASSIGNED
2440 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2441 #endif
2442 printf("unassigned_mem_readl Unassigned mem read " TARGET_FMT_plx "\n", addr);
2443 if (addr != 0x000000001fbffffcLL)
2444 exit(-1);
2446 #if defined(TARGET_SPARC)
2447 do_unassigned_access(addr, 0, 0, 0, 4);
2448 #endif
2449 return 0;
2452 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2454 #ifdef DEBUG_UNASSIGNED
2455 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2456 #endif
2458 printf("%s Unassigned mem read " TARGET_FMT_plx " pc "TARGET_FMT_plx"\n", __FUNCTION__,addr,cpu_single_env->active_tc.PC);
2459 exit(-1);
2462 #if defined(TARGET_SPARC)
2463 do_unassigned_access(addr, 1, 0, 0, 1);
2464 #endif
2467 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2469 #ifdef DEBUG_UNASSIGNED
2470 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2471 #endif
2473 printf("%s Unassigned mem read " TARGET_FMT_plx " pc "TARGET_FMT_plx"\n", __FUNCTION__,addr,cpu_single_env->active_tc.PC);
2474 exit(-1);
2477 #if defined(TARGET_SPARC)
2478 do_unassigned_access(addr, 1, 0, 0, 2);
2479 #endif
2482 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2484 #ifdef DEBUG_UNASSIGNED
2485 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2486 #endif
2488 printf("%s Unassigned mem read " TARGET_FMT_plx " pc "TARGET_FMT_plx"\n", __FUNCTION__,addr,cpu_single_env->active_tc.PC);
2489 exit(-1);
2492 #if defined(TARGET_SPARC)
2493 do_unassigned_access(addr, 1, 0, 0, 4);
2494 #endif
2497 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2498 unassigned_mem_readb,
2499 unassigned_mem_readw,
2500 unassigned_mem_readl,
2503 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2504 unassigned_mem_writeb,
2505 unassigned_mem_writew,
2506 unassigned_mem_writel,
2509 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2510 uint32_t val)
2512 int dirty_flags;
2513 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2514 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2515 #if !defined(CONFIG_USER_ONLY)
2516 tb_invalidate_phys_page_fast(ram_addr, 1);
2517 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2518 #endif
2520 stb_p(phys_ram_base + ram_addr, val);
2521 #ifdef USE_KQEMU
2522 if (cpu_single_env->kqemu_enabled &&
2523 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2524 kqemu_modify_page(cpu_single_env, ram_addr);
2525 #endif
2526 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2527 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2528 /* we remove the notdirty callback only if the code has been
2529 flushed */
2530 if (dirty_flags == 0xff)
2531 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2534 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2535 uint32_t val)
2537 int dirty_flags;
2538 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2539 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2540 #if !defined(CONFIG_USER_ONLY)
2541 tb_invalidate_phys_page_fast(ram_addr, 2);
2542 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2543 #endif
2545 stw_p(phys_ram_base + ram_addr, val);
2546 #ifdef USE_KQEMU
2547 if (cpu_single_env->kqemu_enabled &&
2548 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2549 kqemu_modify_page(cpu_single_env, ram_addr);
2550 #endif
2551 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2552 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2553 /* we remove the notdirty callback only if the code has been
2554 flushed */
2555 if (dirty_flags == 0xff)
2556 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2559 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2560 uint32_t val)
2562 int dirty_flags;
2563 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2564 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2565 #if !defined(CONFIG_USER_ONLY)
2566 tb_invalidate_phys_page_fast(ram_addr, 4);
2567 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2568 #endif
2570 stl_p(phys_ram_base + ram_addr, val);
2571 #ifdef USE_KQEMU
2572 if (cpu_single_env->kqemu_enabled &&
2573 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2574 kqemu_modify_page(cpu_single_env, ram_addr);
2575 #endif
2576 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2577 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2578 /* we remove the notdirty callback only if the code has been
2579 flushed */
2580 if (dirty_flags == 0xff)
2581 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2584 static CPUReadMemoryFunc *error_mem_read[3] = {
2585 NULL, /* never used */
2586 NULL, /* never used */
2587 NULL, /* never used */
2590 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2591 notdirty_mem_writeb,
2592 notdirty_mem_writew,
2593 notdirty_mem_writel,
2596 /* Generate a debug exception if a watchpoint has been hit. */
2597 static void check_watchpoint(int offset, int len_mask, int flags)
2599 CPUState *env = cpu_single_env;
2600 target_ulong pc, cs_base;
2601 TranslationBlock *tb;
2602 target_ulong vaddr;
2603 CPUWatchpoint *wp;
2604 int cpu_flags;
2606 if (env->watchpoint_hit) {
2607 /* We re-entered the check after replacing the TB. Now raise
2608 * the debug interrupt so that is will trigger after the
2609 * current instruction. */
2610 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2611 return;
2613 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2614 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2615 if ((vaddr == (wp->vaddr & len_mask) ||
2616 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2617 wp->flags |= BP_WATCHPOINT_HIT;
2618 if (!env->watchpoint_hit) {
2619 env->watchpoint_hit = wp;
2620 tb = tb_find_pc(env->mem_io_pc);
2621 if (!tb) {
2622 cpu_abort(env, "check_watchpoint: could not find TB for "
2623 "pc=%p", (void *)env->mem_io_pc);
2625 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2626 tb_phys_invalidate(tb, -1);
2627 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2628 env->exception_index = EXCP_DEBUG;
2629 } else {
2630 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2631 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2633 cpu_resume_from_signal(env, NULL);
2635 } else {
2636 wp->flags &= ~BP_WATCHPOINT_HIT;
2641 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2642 so these check for a hit then pass through to the normal out-of-line
2643 phys routines. */
2644 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2646 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2647 return ldub_phys(addr);
2650 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2652 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2653 return lduw_phys(addr);
2656 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2658 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2659 return ldl_phys(addr);
2662 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2663 uint32_t val)
2665 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2666 stb_phys(addr, val);
2669 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2670 uint32_t val)
2672 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2673 stw_phys(addr, val);
2676 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2677 uint32_t val)
2679 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2680 stl_phys(addr, val);
2683 static CPUReadMemoryFunc *watch_mem_read[3] = {
2684 watch_mem_readb,
2685 watch_mem_readw,
2686 watch_mem_readl,
2689 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2690 watch_mem_writeb,
2691 watch_mem_writew,
2692 watch_mem_writel,
2695 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2696 unsigned int len)
2698 uint32_t ret;
2699 unsigned int idx;
2701 idx = SUBPAGE_IDX(addr);
2702 #if defined(DEBUG_SUBPAGE)
2703 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2704 mmio, len, addr, idx);
2705 #endif
2706 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2707 addr + mmio->region_offset[idx][0][len]);
2709 return ret;
2712 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2713 uint32_t value, unsigned int len)
2715 unsigned int idx;
2717 idx = SUBPAGE_IDX(addr);
2718 #if defined(DEBUG_SUBPAGE)
2719 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2720 mmio, len, addr, idx, value);
2721 #endif
2722 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2723 addr + mmio->region_offset[idx][1][len],
2724 value);
2727 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2729 #if defined(DEBUG_SUBPAGE)
2730 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2731 #endif
2733 return subpage_readlen(opaque, addr, 0);
2736 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2737 uint32_t value)
2739 #if defined(DEBUG_SUBPAGE)
2740 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2741 #endif
2742 subpage_writelen(opaque, addr, value, 0);
2745 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2747 #if defined(DEBUG_SUBPAGE)
2748 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2749 #endif
2751 return subpage_readlen(opaque, addr, 1);
2754 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2755 uint32_t value)
2757 #if defined(DEBUG_SUBPAGE)
2758 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2759 #endif
2760 subpage_writelen(opaque, addr, value, 1);
2763 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2765 #if defined(DEBUG_SUBPAGE)
2766 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2767 #endif
2769 return subpage_readlen(opaque, addr, 2);
2772 static void subpage_writel (void *opaque,
2773 target_phys_addr_t addr, uint32_t value)
2775 #if defined(DEBUG_SUBPAGE)
2776 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2777 #endif
2778 subpage_writelen(opaque, addr, value, 2);
2781 static CPUReadMemoryFunc *subpage_read[] = {
2782 &subpage_readb,
2783 &subpage_readw,
2784 &subpage_readl,
2787 static CPUWriteMemoryFunc *subpage_write[] = {
2788 &subpage_writeb,
2789 &subpage_writew,
2790 &subpage_writel,
2793 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2794 ram_addr_t memory, ram_addr_t region_offset)
2796 int idx, eidx;
2797 unsigned int i;
2799 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2800 return -1;
2801 idx = SUBPAGE_IDX(start);
2802 eidx = SUBPAGE_IDX(end);
2803 #if defined(DEBUG_SUBPAGE)
2804 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2805 mmio, start, end, idx, eidx, memory);
2806 #endif
2807 memory >>= IO_MEM_SHIFT;
2808 for (; idx <= eidx; idx++) {
2809 for (i = 0; i < 4; i++) {
2810 if (io_mem_read[memory][i]) {
2811 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2812 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2813 mmio->region_offset[idx][0][i] = region_offset;
2815 if (io_mem_write[memory][i]) {
2816 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2817 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2818 mmio->region_offset[idx][1][i] = region_offset;
2823 return 0;
2826 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2827 ram_addr_t orig_memory, ram_addr_t region_offset)
2829 subpage_t *mmio;
2830 int subpage_memory;
2832 mmio = qemu_mallocz(sizeof(subpage_t));
2834 mmio->base = base;
2835 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2836 #if defined(DEBUG_SUBPAGE)
2837 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2838 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2839 #endif
2840 *phys = subpage_memory | IO_MEM_SUBPAGE;
2841 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2842 region_offset);
2844 return mmio;
2847 static int get_free_io_mem_idx(void)
2849 int i;
2851 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2852 if (!io_mem_used[i]) {
2853 io_mem_used[i] = 1;
2854 return i;
2857 return -1;
2860 static void io_mem_init(void)
2862 int i;
2864 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2865 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2866 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2867 for (i=0; i<5; i++)
2868 io_mem_used[i] = 1;
2870 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2871 watch_mem_write, NULL);
2872 /* alloc dirty bits array */
2873 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2874 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2877 /* mem_read and mem_write are arrays of functions containing the
2878 function to access byte (index 0), word (index 1) and dword (index
2879 2). Functions can be omitted with a NULL function pointer. The
2880 registered functions may be modified dynamically later.
2881 If io_index is non zero, the corresponding io zone is
2882 modified. If it is zero, a new io zone is allocated. The return
2883 value can be used with cpu_register_physical_memory(). (-1) is
2884 returned if error. */
2885 int cpu_register_io_memory(int io_index,
2886 CPUReadMemoryFunc **mem_read,
2887 CPUWriteMemoryFunc **mem_write,
2888 void *opaque)
2890 int i, subwidth = 0;
2892 if (io_index <= 0) {
2893 io_index = get_free_io_mem_idx();
2894 if (io_index == -1)
2895 return io_index;
2896 } else {
2897 if (io_index >= IO_MEM_NB_ENTRIES)
2898 return -1;
2901 for(i = 0;i < 3; i++) {
2902 if (!mem_read[i] || !mem_write[i])
2903 subwidth = IO_MEM_SUBWIDTH;
2904 io_mem_read[io_index][i] = mem_read[i];
2905 io_mem_write[io_index][i] = mem_write[i];
2907 io_mem_opaque[io_index] = opaque;
2908 return (io_index << IO_MEM_SHIFT) | subwidth;
2911 void cpu_unregister_io_memory(int io_table_address)
2913 int i;
2914 int io_index = io_table_address >> IO_MEM_SHIFT;
2916 for (i=0;i < 3; i++) {
2917 io_mem_read[io_index][i] = unassigned_mem_read[i];
2918 io_mem_write[io_index][i] = unassigned_mem_write[i];
2920 io_mem_opaque[io_index] = NULL;
2921 io_mem_used[io_index] = 0;
2924 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2926 return io_mem_write[io_index >> IO_MEM_SHIFT];
2929 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2931 return io_mem_read[io_index >> IO_MEM_SHIFT];
2934 #endif /* !defined(CONFIG_USER_ONLY) */
2936 /* physical memory access (slow version, mainly for debug) */
2937 #if defined(CONFIG_USER_ONLY)
2938 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2939 int len, int is_write)
2941 int l, flags;
2942 target_ulong page;
2943 void * p;
2945 while (len > 0) {
2946 page = addr & TARGET_PAGE_MASK;
2947 l = (page + TARGET_PAGE_SIZE) - addr;
2948 if (l > len)
2949 l = len;
2950 flags = page_get_flags(page);
2951 if (!(flags & PAGE_VALID))
2952 return;
2953 if (is_write) {
2954 if (!(flags & PAGE_WRITE))
2955 return;
2956 /* XXX: this code should not depend on lock_user */
2957 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2958 /* FIXME - should this return an error rather than just fail? */
2959 return;
2960 memcpy(p, buf, l);
2961 unlock_user(p, addr, l);
2962 } else {
2963 if (!(flags & PAGE_READ))
2964 return;
2965 /* XXX: this code should not depend on lock_user */
2966 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2967 /* FIXME - should this return an error rather than just fail? */
2968 return;
2969 memcpy(buf, p, l);
2970 unlock_user(p, addr, 0);
2972 len -= l;
2973 buf += l;
2974 addr += l;
2978 #else
2979 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2980 int len, int is_write)
2982 int l, io_index;
2983 uint8_t *ptr;
2984 uint32_t val;
2985 target_phys_addr_t page;
2986 unsigned long pd;
2987 PhysPageDesc *p;
2989 while (len > 0) {
2990 page = addr & TARGET_PAGE_MASK;
2991 l = (page + TARGET_PAGE_SIZE) - addr;
2992 if (l > len)
2993 l = len;
2994 p = phys_page_find(page >> TARGET_PAGE_BITS);
2995 if (!p) {
2996 pd = IO_MEM_UNASSIGNED;
2997 } else {
2998 pd = p->phys_offset;
3001 if (is_write) {
3002 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3003 target_phys_addr_t addr1 = addr;
3004 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3005 if (p)
3006 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3007 /* XXX: could force cpu_single_env to NULL to avoid
3008 potential bugs */
3009 if (l >= 4 && ((addr1 & 3) == 0)) {
3010 /* 32 bit write access */
3011 val = ldl_p(buf);
3012 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3013 l = 4;
3014 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3015 /* 16 bit write access */
3016 val = lduw_p(buf);
3017 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3018 l = 2;
3019 } else {
3020 /* 8 bit write access */
3021 val = ldub_p(buf);
3022 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3023 l = 1;
3025 } else {
3026 unsigned long addr1;
3027 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3028 /* RAM case */
3029 ptr = phys_ram_base + addr1;
3030 memcpy(ptr, buf, l);
3031 if (!cpu_physical_memory_is_dirty(addr1)) {
3032 /* invalidate code */
3033 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3034 /* set dirty bit */
3035 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3036 (0xff & ~CODE_DIRTY_FLAG);
3039 } else {
3040 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3041 !(pd & IO_MEM_ROMD)) {
3042 target_phys_addr_t addr1 = addr;
3043 /* I/O case */
3044 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3045 if (p)
3046 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3047 if (l >= 4 && ((addr1 & 3) == 0)) {
3048 /* 32 bit read access */
3049 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3050 stl_p(buf, val);
3051 l = 4;
3052 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3053 /* 16 bit read access */
3054 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3055 stw_p(buf, val);
3056 l = 2;
3057 } else {
3058 /* 8 bit read access */
3059 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3060 stb_p(buf, val);
3061 l = 1;
3063 } else {
3064 /* RAM case */
3065 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3066 (addr & ~TARGET_PAGE_MASK);
3067 memcpy(buf, ptr, l);
3070 len -= l;
3071 buf += l;
3072 addr += l;
3076 /* used for ROM loading : can write in RAM and ROM */
3077 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3078 const uint8_t *buf, int len)
3080 int l;
3081 uint8_t *ptr;
3082 target_phys_addr_t page;
3083 unsigned long pd;
3084 PhysPageDesc *p;
3086 while (len > 0) {
3087 page = addr & TARGET_PAGE_MASK;
3088 l = (page + TARGET_PAGE_SIZE) - addr;
3089 if (l > len)
3090 l = len;
3091 p = phys_page_find(page >> TARGET_PAGE_BITS);
3092 if (!p) {
3093 pd = IO_MEM_UNASSIGNED;
3094 } else {
3095 pd = p->phys_offset;
3098 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3099 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3100 !(pd & IO_MEM_ROMD)) {
3101 /* do nothing */
3102 } else {
3103 unsigned long addr1;
3104 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3105 /* ROM/RAM case */
3106 ptr = phys_ram_base + addr1;
3107 memcpy(ptr, buf, l);
3109 len -= l;
3110 buf += l;
3111 addr += l;
3115 typedef struct {
3116 void *buffer;
3117 target_phys_addr_t addr;
3118 target_phys_addr_t len;
3119 } BounceBuffer;
3121 static BounceBuffer bounce;
3123 typedef struct MapClient {
3124 void *opaque;
3125 void (*callback)(void *opaque);
3126 LIST_ENTRY(MapClient) link;
3127 } MapClient;
3129 static LIST_HEAD(map_client_list, MapClient) map_client_list
3130 = LIST_HEAD_INITIALIZER(map_client_list);
3132 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3134 MapClient *client = qemu_malloc(sizeof(*client));
3136 client->opaque = opaque;
3137 client->callback = callback;
3138 LIST_INSERT_HEAD(&map_client_list, client, link);
3139 return client;
3142 void cpu_unregister_map_client(void *_client)
3144 MapClient *client = (MapClient *)_client;
3146 LIST_REMOVE(client, link);
3149 static void cpu_notify_map_clients(void)
3151 MapClient *client;
3153 while (!LIST_EMPTY(&map_client_list)) {
3154 client = LIST_FIRST(&map_client_list);
3155 client->callback(client->opaque);
3156 LIST_REMOVE(client, link);
3160 /* Map a physical memory region into a host virtual address.
3161 * May map a subset of the requested range, given by and returned in *plen.
3162 * May return NULL if resources needed to perform the mapping are exhausted.
3163 * Use only for reads OR writes - not for read-modify-write operations.
3164 * Use cpu_register_map_client() to know when retrying the map operation is
3165 * likely to succeed.
3167 void *cpu_physical_memory_map(target_phys_addr_t addr,
3168 target_phys_addr_t *plen,
3169 int is_write)
3171 target_phys_addr_t len = *plen;
3172 target_phys_addr_t done = 0;
3173 int l;
3174 uint8_t *ret = NULL;
3175 uint8_t *ptr;
3176 target_phys_addr_t page;
3177 unsigned long pd;
3178 PhysPageDesc *p;
3179 unsigned long addr1;
3181 while (len > 0) {
3182 page = addr & TARGET_PAGE_MASK;
3183 l = (page + TARGET_PAGE_SIZE) - addr;
3184 if (l > len)
3185 l = len;
3186 p = phys_page_find(page >> TARGET_PAGE_BITS);
3187 if (!p) {
3188 pd = IO_MEM_UNASSIGNED;
3189 } else {
3190 pd = p->phys_offset;
3193 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3194 if (done || bounce.buffer) {
3195 break;
3197 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3198 bounce.addr = addr;
3199 bounce.len = l;
3200 if (!is_write) {
3201 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3203 ptr = bounce.buffer;
3204 } else {
3205 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3206 ptr = phys_ram_base + addr1;
3208 if (!done) {
3209 ret = ptr;
3210 } else if (ret + done != ptr) {
3211 break;
3214 len -= l;
3215 addr += l;
3216 done += l;
3218 *plen = done;
3219 return ret;
3222 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3223 * Will also mark the memory as dirty if is_write == 1. access_len gives
3224 * the amount of memory that was actually read or written by the caller.
3226 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3227 int is_write, target_phys_addr_t access_len)
3229 if (buffer != bounce.buffer) {
3230 if (is_write) {
3231 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3232 while (access_len) {
3233 unsigned l;
3234 l = TARGET_PAGE_SIZE;
3235 if (l > access_len)
3236 l = access_len;
3237 if (!cpu_physical_memory_is_dirty(addr1)) {
3238 /* invalidate code */
3239 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3240 /* set dirty bit */
3241 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3242 (0xff & ~CODE_DIRTY_FLAG);
3244 addr1 += l;
3245 access_len -= l;
3248 return;
3250 if (is_write) {
3251 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3253 qemu_free(bounce.buffer);
3254 bounce.buffer = NULL;
3255 cpu_notify_map_clients();
3258 /* warning: addr must be aligned */
3259 uint32_t ldl_phys(target_phys_addr_t addr)
3261 int io_index;
3262 uint8_t *ptr;
3263 uint32_t val;
3264 unsigned long pd;
3265 PhysPageDesc *p;
3267 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3268 if (!p) {
3269 pd = IO_MEM_UNASSIGNED;
3270 } else {
3271 pd = p->phys_offset;
3274 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3275 !(pd & IO_MEM_ROMD)) {
3276 /* I/O case */
3277 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3278 if (p)
3279 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3280 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3281 } else {
3282 /* RAM case */
3283 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3284 (addr & ~TARGET_PAGE_MASK);
3285 val = ldl_p(ptr);
3287 return val;
3290 /* warning: addr must be aligned */
3291 uint64_t ldq_phys(target_phys_addr_t addr)
3293 int io_index;
3294 uint8_t *ptr;
3295 uint64_t val;
3296 unsigned long pd;
3297 PhysPageDesc *p;
3299 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3300 if (!p) {
3301 pd = IO_MEM_UNASSIGNED;
3302 } else {
3303 pd = p->phys_offset;
3306 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3307 !(pd & IO_MEM_ROMD)) {
3308 /* I/O case */
3309 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3310 if (p)
3311 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3312 #ifdef TARGET_WORDS_BIGENDIAN
3313 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3314 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3315 #else
3316 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3317 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3318 #endif
3319 } else {
3320 /* RAM case */
3321 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3322 (addr & ~TARGET_PAGE_MASK);
3323 val = ldq_p(ptr);
3325 return val;
3328 /* XXX: optimize */
3329 uint32_t ldub_phys(target_phys_addr_t addr)
3331 uint8_t val;
3332 cpu_physical_memory_read(addr, &val, 1);
3333 return val;
3336 /* XXX: optimize */
3337 uint32_t lduw_phys(target_phys_addr_t addr)
3339 uint16_t val;
3340 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3341 return tswap16(val);
3344 /* warning: addr must be aligned. The ram page is not masked as dirty
3345 and the code inside is not invalidated. It is useful if the dirty
3346 bits are used to track modified PTEs */
3347 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3349 int io_index;
3350 uint8_t *ptr;
3351 unsigned long pd;
3352 PhysPageDesc *p;
3354 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3355 if (!p) {
3356 pd = IO_MEM_UNASSIGNED;
3357 } else {
3358 pd = p->phys_offset;
3361 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3362 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3363 if (p)
3364 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3365 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3366 } else {
3367 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3368 ptr = phys_ram_base + addr1;
3369 stl_p(ptr, val);
3371 if (unlikely(in_migration)) {
3372 if (!cpu_physical_memory_is_dirty(addr1)) {
3373 /* invalidate code */
3374 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3375 /* set dirty bit */
3376 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3377 (0xff & ~CODE_DIRTY_FLAG);
3383 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3385 int io_index;
3386 uint8_t *ptr;
3387 unsigned long pd;
3388 PhysPageDesc *p;
3390 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3391 if (!p) {
3392 pd = IO_MEM_UNASSIGNED;
3393 } else {
3394 pd = p->phys_offset;
3397 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3398 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3399 if (p)
3400 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3401 #ifdef TARGET_WORDS_BIGENDIAN
3402 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3403 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3404 #else
3405 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3406 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3407 #endif
3408 } else {
3409 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3410 (addr & ~TARGET_PAGE_MASK);
3411 stq_p(ptr, val);
3415 /* warning: addr must be aligned */
3416 void stl_phys(target_phys_addr_t addr, uint32_t val)
3418 int io_index;
3419 uint8_t *ptr;
3420 unsigned long pd;
3421 PhysPageDesc *p;
3423 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3424 if (!p) {
3425 pd = IO_MEM_UNASSIGNED;
3426 } else {
3427 pd = p->phys_offset;
3430 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3431 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3432 if (p)
3433 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3434 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3435 } else {
3436 unsigned long addr1;
3437 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3438 /* RAM case */
3439 ptr = phys_ram_base + addr1;
3440 stl_p(ptr, val);
3441 if (!cpu_physical_memory_is_dirty(addr1)) {
3442 /* invalidate code */
3443 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3444 /* set dirty bit */
3445 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3446 (0xff & ~CODE_DIRTY_FLAG);
3451 /* XXX: optimize */
3452 void stb_phys(target_phys_addr_t addr, uint32_t val)
3454 uint8_t v = val;
3455 cpu_physical_memory_write(addr, &v, 1);
3458 /* XXX: optimize */
3459 void stw_phys(target_phys_addr_t addr, uint32_t val)
3461 uint16_t v = tswap16(val);
3462 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3465 /* XXX: optimize */
3466 void stq_phys(target_phys_addr_t addr, uint64_t val)
3468 val = tswap64(val);
3469 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3472 #endif
3474 /* virtual memory access for debug */
3475 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3476 uint8_t *buf, int len, int is_write)
3478 int l;
3479 target_phys_addr_t phys_addr;
3480 target_ulong page;
3482 while (len > 0) {
3483 page = addr & TARGET_PAGE_MASK;
3484 phys_addr = cpu_get_phys_page_debug(env, page);
3485 /* if no physical page mapped, return an error */
3486 if (phys_addr == -1)
3487 return -1;
3488 l = (page + TARGET_PAGE_SIZE) - addr;
3489 if (l > len)
3490 l = len;
3491 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3492 buf, l, is_write);
3493 len -= l;
3494 buf += l;
3495 addr += l;
3497 return 0;
3500 /* in deterministic execution mode, instructions doing device I/Os
3501 must be at the end of the TB */
3502 void cpu_io_recompile(CPUState *env, void *retaddr)
3504 TranslationBlock *tb;
3505 uint32_t n, cflags;
3506 target_ulong pc, cs_base;
3507 uint64_t flags;
3509 tb = tb_find_pc((unsigned long)retaddr);
3510 if (!tb) {
3511 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3512 retaddr);
3514 n = env->icount_decr.u16.low + tb->icount;
3515 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3516 /* Calculate how many instructions had been executed before the fault
3517 occurred. */
3518 n = n - env->icount_decr.u16.low;
3519 /* Generate a new TB ending on the I/O insn. */
3520 n++;
3521 /* On MIPS and SH, delay slot instructions can only be restarted if
3522 they were already the first instruction in the TB. If this is not
3523 the first instruction in a TB then re-execute the preceding
3524 branch. */
3525 #if defined(TARGET_MIPS)
3526 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3527 env->active_tc.PC -= 4;
3528 env->icount_decr.u16.low++;
3529 env->hflags &= ~MIPS_HFLAG_BMASK;
3531 #elif defined(TARGET_SH4)
3532 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3533 && n > 1) {
3534 env->pc -= 2;
3535 env->icount_decr.u16.low++;
3536 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3538 #endif
3539 /* This should never happen. */
3540 if (n > CF_COUNT_MASK)
3541 cpu_abort(env, "TB too big during recompile");
3543 cflags = n | CF_LAST_IO;
3544 pc = tb->pc;
3545 cs_base = tb->cs_base;
3546 flags = tb->flags;
3547 tb_phys_invalidate(tb, -1);
3548 /* FIXME: In theory this could raise an exception. In practice
3549 we have already translated the block once so it's probably ok. */
3550 tb_gen_code(env, pc, cs_base, flags, cflags);
3551 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3552 the first in the TB) then we end up generating a whole new TB and
3553 repeating the fault, which is horribly inefficient.
3554 Better would be to execute just this insn uncached, or generate a
3555 second new TB. */
3556 cpu_resume_from_signal(env, NULL);
3559 void dump_exec_info(FILE *f,
3560 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3562 int i, target_code_size, max_target_code_size;
3563 int direct_jmp_count, direct_jmp2_count, cross_page;
3564 TranslationBlock *tb;
3566 target_code_size = 0;
3567 max_target_code_size = 0;
3568 cross_page = 0;
3569 direct_jmp_count = 0;
3570 direct_jmp2_count = 0;
3571 for(i = 0; i < nb_tbs; i++) {
3572 tb = &tbs[i];
3573 target_code_size += tb->size;
3574 if (tb->size > max_target_code_size)
3575 max_target_code_size = tb->size;
3576 if (tb->page_addr[1] != -1)
3577 cross_page++;
3578 if (tb->tb_next_offset[0] != 0xffff) {
3579 direct_jmp_count++;
3580 if (tb->tb_next_offset[1] != 0xffff) {
3581 direct_jmp2_count++;
3585 /* XXX: avoid using doubles ? */
3586 cpu_fprintf(f, "Translation buffer state:\n");
3587 cpu_fprintf(f, "gen code size %ld/%ld\n",
3588 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3589 cpu_fprintf(f, "TB count %d/%d\n",
3590 nb_tbs, code_gen_max_blocks);
3591 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3592 nb_tbs ? target_code_size / nb_tbs : 0,
3593 max_target_code_size);
3594 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3595 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3596 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3597 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3598 cross_page,
3599 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3600 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3601 direct_jmp_count,
3602 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3603 direct_jmp2_count,
3604 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3605 cpu_fprintf(f, "\nStatistics:\n");
3606 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3607 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3608 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3609 tcg_dump_info(f, cpu_fprintf);
3612 #if !defined(CONFIG_USER_ONLY)
3614 #define MMUSUFFIX _cmmu
3615 #define GETPC() NULL
3616 #define env cpu_single_env
3617 #define SOFTMMU_CODE_ACCESS
3619 #define SHIFT 0
3620 #include "softmmu_template.h"
3622 #define SHIFT 1
3623 #include "softmmu_template.h"
3625 #define SHIFT 2
3626 #include "softmmu_template.h"
3628 #define SHIFT 3
3629 #include "softmmu_template.h"
3631 #undef env
3633 #endif