configure xen in a single place
[qemu/aliguori-queue.git] / exec.c
blob65c89b08b7f88fe82027f5d85c34b5648503c01a
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <unistd.h>
32 #include <inttypes.h>
34 #include "cpu.h"
35 #include "exec-all.h"
36 #include "qemu-common.h"
37 #include "tcg.h"
38 #include "hw/hw.h"
39 #include "osdep.h"
40 #include "kvm.h"
41 #if defined(CONFIG_USER_ONLY)
42 #include <qemu.h>
43 #endif
45 //#define DEBUG_TB_INVALIDATE
46 //#define DEBUG_FLUSH
47 //#define DEBUG_TLB
48 //#define DEBUG_UNASSIGNED
50 /* make various TB consistency checks */
51 //#define DEBUG_TB_CHECK
52 //#define DEBUG_TLB_CHECK
54 //#define DEBUG_IOPORT
55 //#define DEBUG_SUBPAGE
57 #if !defined(CONFIG_USER_ONLY)
58 /* TB consistency checks only implemented for usermode emulation. */
59 #undef DEBUG_TB_CHECK
60 #endif
62 #define SMC_BITMAP_USE_THRESHOLD 10
64 #if defined(TARGET_SPARC64)
65 #define TARGET_PHYS_ADDR_SPACE_BITS 41
66 #elif defined(TARGET_SPARC)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 36
68 #elif defined(TARGET_ALPHA)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 42
70 #define TARGET_VIRT_ADDR_SPACE_BITS 42
71 #elif defined(TARGET_PPC64)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 36
77 #else
78 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
79 #define TARGET_PHYS_ADDR_SPACE_BITS 32
80 #endif
82 static TranslationBlock *tbs;
83 int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 static int nb_tbs;
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32)
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
100 #else
101 #define code_gen_section \
102 __attribute__((aligned (32)))
103 #endif
105 uint8_t code_gen_prologue[1024] code_gen_section;
106 static uint8_t *code_gen_buffer;
107 static unsigned long code_gen_buffer_size;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size;
110 uint8_t *code_gen_ptr;
112 #if !defined(CONFIG_USER_ONLY)
113 int phys_ram_fd;
114 uint8_t *phys_ram_dirty;
115 static int in_migration;
117 typedef struct RAMBlock {
118 uint8_t *host;
119 ram_addr_t offset;
120 ram_addr_t length;
121 struct RAMBlock *next;
122 } RAMBlock;
124 static RAMBlock *ram_blocks;
125 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126 then we can no longer assume contiguous ram offsets, and external uses
127 of this variable will break. */
128 ram_addr_t last_ram_offset;
129 #endif
131 CPUState *first_cpu;
132 /* current CPU in the current thread. It is only valid inside
133 cpu_exec() */
134 CPUState *cpu_single_env;
135 /* 0 = Do not count executed instructions.
136 1 = Precise instruction counting.
137 2 = Adaptive rate instruction counting. */
138 int use_icount = 0;
139 /* Current instruction counter. While executing translated code this may
140 include some instructions that have not yet been executed. */
141 int64_t qemu_icount;
143 typedef struct PageDesc {
144 /* list of TBs intersecting this ram page */
145 TranslationBlock *first_tb;
146 /* in order to optimize self modifying code, we count the number
147 of lookups we do to a given page to use a bitmap */
148 unsigned int code_write_count;
149 uint8_t *code_bitmap;
150 #if defined(CONFIG_USER_ONLY)
151 unsigned long flags;
152 #endif
153 } PageDesc;
155 typedef struct PhysPageDesc {
156 /* offset in host memory of the page + io_index in the low bits */
157 ram_addr_t phys_offset;
158 ram_addr_t region_offset;
159 } PhysPageDesc;
161 #define L2_BITS 10
162 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163 /* XXX: this is a temporary hack for alpha target.
164 * In the future, this is to be replaced by a multi-level table
165 * to actually be able to handle the complete 64 bits address space.
167 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168 #else
169 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
170 #endif
172 #define L1_SIZE (1 << L1_BITS)
173 #define L2_SIZE (1 << L2_BITS)
175 unsigned long qemu_real_host_page_size;
176 unsigned long qemu_host_page_bits;
177 unsigned long qemu_host_page_size;
178 unsigned long qemu_host_page_mask;
180 /* XXX: for system emulation, it could just be an array */
181 static PageDesc *l1_map[L1_SIZE];
182 static PhysPageDesc **l1_phys_map;
184 #if !defined(CONFIG_USER_ONLY)
185 static void io_mem_init(void);
187 /* io memory support */
188 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
189 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
190 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
191 static char io_mem_used[IO_MEM_NB_ENTRIES];
192 static int io_mem_watch;
193 #endif
195 /* log support */
196 static const char *logfilename = "/tmp/qemu.log";
197 FILE *logfile;
198 int loglevel;
199 static int log_append = 0;
201 /* statistics */
202 static int tlb_flush_count;
203 static int tb_flush_count;
204 static int tb_phys_invalidate_count;
206 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
207 typedef struct subpage_t {
208 target_phys_addr_t base;
209 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
210 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
211 void *opaque[TARGET_PAGE_SIZE][2][4];
212 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
213 } subpage_t;
215 #ifdef _WIN32
216 static void map_exec(void *addr, long size)
218 DWORD old_protect;
219 VirtualProtect(addr, size,
220 PAGE_EXECUTE_READWRITE, &old_protect);
223 #else
224 static void map_exec(void *addr, long size)
226 unsigned long start, end, page_size;
228 page_size = getpagesize();
229 start = (unsigned long)addr;
230 start &= ~(page_size - 1);
232 end = (unsigned long)addr + size;
233 end += page_size - 1;
234 end &= ~(page_size - 1);
236 mprotect((void *)start, end - start,
237 PROT_READ | PROT_WRITE | PROT_EXEC);
239 #endif
241 static void page_init(void)
243 /* NOTE: we can always suppose that qemu_host_page_size >=
244 TARGET_PAGE_SIZE */
245 #ifdef _WIN32
247 SYSTEM_INFO system_info;
249 GetSystemInfo(&system_info);
250 qemu_real_host_page_size = system_info.dwPageSize;
252 #else
253 qemu_real_host_page_size = getpagesize();
254 #endif
255 if (qemu_host_page_size == 0)
256 qemu_host_page_size = qemu_real_host_page_size;
257 if (qemu_host_page_size < TARGET_PAGE_SIZE)
258 qemu_host_page_size = TARGET_PAGE_SIZE;
259 qemu_host_page_bits = 0;
260 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
261 qemu_host_page_bits++;
262 qemu_host_page_mask = ~(qemu_host_page_size - 1);
263 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
264 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
266 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
268 long long startaddr, endaddr;
269 FILE *f;
270 int n;
272 mmap_lock();
273 last_brk = (unsigned long)sbrk(0);
274 f = fopen("/proc/self/maps", "r");
275 if (f) {
276 do {
277 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
278 if (n == 2) {
279 startaddr = MIN(startaddr,
280 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
281 endaddr = MIN(endaddr,
282 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
283 page_set_flags(startaddr & TARGET_PAGE_MASK,
284 TARGET_PAGE_ALIGN(endaddr),
285 PAGE_RESERVED);
287 } while (!feof(f));
288 fclose(f);
290 mmap_unlock();
292 #endif
295 static inline PageDesc **page_l1_map(target_ulong index)
297 #if TARGET_LONG_BITS > 32
298 /* Host memory outside guest VM. For 32-bit targets we have already
299 excluded high addresses. */
300 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
301 return NULL;
302 #endif
303 return &l1_map[index >> L2_BITS];
306 static inline PageDesc *page_find_alloc(target_ulong index)
308 PageDesc **lp, *p;
309 lp = page_l1_map(index);
310 if (!lp)
311 return NULL;
313 p = *lp;
314 if (!p) {
315 /* allocate if not found */
316 #if defined(CONFIG_USER_ONLY)
317 size_t len = sizeof(PageDesc) * L2_SIZE;
318 /* Don't use qemu_malloc because it may recurse. */
319 p = mmap(0, len, PROT_READ | PROT_WRITE,
320 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
321 *lp = p;
322 if (h2g_valid(p)) {
323 unsigned long addr = h2g(p);
324 page_set_flags(addr & TARGET_PAGE_MASK,
325 TARGET_PAGE_ALIGN(addr + len),
326 PAGE_RESERVED);
328 #else
329 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
330 *lp = p;
331 #endif
333 return p + (index & (L2_SIZE - 1));
336 static inline PageDesc *page_find(target_ulong index)
338 PageDesc **lp, *p;
339 lp = page_l1_map(index);
340 if (!lp)
341 return NULL;
343 p = *lp;
344 if (!p)
345 return 0;
346 return p + (index & (L2_SIZE - 1));
349 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
351 void **lp, **p;
352 PhysPageDesc *pd;
354 p = (void **)l1_phys_map;
355 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
357 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359 #endif
360 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
361 p = *lp;
362 if (!p) {
363 /* allocate if not found */
364 if (!alloc)
365 return NULL;
366 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367 memset(p, 0, sizeof(void *) * L1_SIZE);
368 *lp = p;
370 #endif
371 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
372 pd = *lp;
373 if (!pd) {
374 int i;
375 /* allocate if not found */
376 if (!alloc)
377 return NULL;
378 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379 *lp = pd;
380 for (i = 0; i < L2_SIZE; i++) {
381 pd[i].phys_offset = IO_MEM_UNASSIGNED;
382 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
385 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
388 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
390 return phys_page_find_alloc(index, 0);
393 #if !defined(CONFIG_USER_ONLY)
394 static void tlb_protect_code(ram_addr_t ram_addr);
395 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
396 target_ulong vaddr);
397 #define mmap_lock() do { } while(0)
398 #define mmap_unlock() do { } while(0)
399 #endif
401 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
403 #if defined(CONFIG_USER_ONLY)
404 /* Currently it is not recommended to allocate big chunks of data in
405 user mode. It will change when a dedicated libc will be used */
406 #define USE_STATIC_CODE_GEN_BUFFER
407 #endif
409 #ifdef USE_STATIC_CODE_GEN_BUFFER
410 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
411 #endif
413 static void code_gen_alloc(unsigned long tb_size)
415 #ifdef USE_STATIC_CODE_GEN_BUFFER
416 code_gen_buffer = static_code_gen_buffer;
417 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
418 map_exec(code_gen_buffer, code_gen_buffer_size);
419 #else
420 code_gen_buffer_size = tb_size;
421 if (code_gen_buffer_size == 0) {
422 #if defined(CONFIG_USER_ONLY)
423 /* in user mode, phys_ram_size is not meaningful */
424 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
425 #else
426 /* XXX: needs adjustments */
427 code_gen_buffer_size = (unsigned long)(ram_size / 4);
428 #endif
430 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
431 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
432 /* The code gen buffer location may have constraints depending on
433 the host cpu and OS */
434 #if defined(__linux__)
436 int flags;
437 void *start = NULL;
439 flags = MAP_PRIVATE | MAP_ANONYMOUS;
440 #if defined(__x86_64__)
441 flags |= MAP_32BIT;
442 /* Cannot map more than that */
443 if (code_gen_buffer_size > (800 * 1024 * 1024))
444 code_gen_buffer_size = (800 * 1024 * 1024);
445 #elif defined(__sparc_v9__)
446 // Map the buffer below 2G, so we can use direct calls and branches
447 flags |= MAP_FIXED;
448 start = (void *) 0x60000000UL;
449 if (code_gen_buffer_size > (512 * 1024 * 1024))
450 code_gen_buffer_size = (512 * 1024 * 1024);
451 #elif defined(__arm__)
452 /* Map the buffer below 32M, so we can use direct calls and branches */
453 flags |= MAP_FIXED;
454 start = (void *) 0x01000000UL;
455 if (code_gen_buffer_size > 16 * 1024 * 1024)
456 code_gen_buffer_size = 16 * 1024 * 1024;
457 #endif
458 code_gen_buffer = mmap(start, code_gen_buffer_size,
459 PROT_WRITE | PROT_READ | PROT_EXEC,
460 flags, -1, 0);
461 if (code_gen_buffer == MAP_FAILED) {
462 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
463 exit(1);
466 #elif defined(__FreeBSD__) || defined(__DragonFly__)
468 int flags;
469 void *addr = NULL;
470 flags = MAP_PRIVATE | MAP_ANONYMOUS;
471 #if defined(__x86_64__)
472 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
473 * 0x40000000 is free */
474 flags |= MAP_FIXED;
475 addr = (void *)0x40000000;
476 /* Cannot map more than that */
477 if (code_gen_buffer_size > (800 * 1024 * 1024))
478 code_gen_buffer_size = (800 * 1024 * 1024);
479 #endif
480 code_gen_buffer = mmap(addr, code_gen_buffer_size,
481 PROT_WRITE | PROT_READ | PROT_EXEC,
482 flags, -1, 0);
483 if (code_gen_buffer == MAP_FAILED) {
484 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
485 exit(1);
488 #else
489 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
490 map_exec(code_gen_buffer, code_gen_buffer_size);
491 #endif
492 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
493 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
494 code_gen_buffer_max_size = code_gen_buffer_size -
495 code_gen_max_block_size();
496 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
497 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
500 /* Must be called before using the QEMU cpus. 'tb_size' is the size
501 (in bytes) allocated to the translation buffer. Zero means default
502 size. */
503 void cpu_exec_init_all(unsigned long tb_size)
505 cpu_gen_init();
506 code_gen_alloc(tb_size);
507 code_gen_ptr = code_gen_buffer;
508 page_init();
509 #if !defined(CONFIG_USER_ONLY)
510 io_mem_init();
511 #endif
514 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
516 #define CPU_COMMON_SAVE_VERSION 1
518 static void cpu_common_save(QEMUFile *f, void *opaque)
520 CPUState *env = opaque;
522 cpu_synchronize_state(env, 0);
524 qemu_put_be32s(f, &env->halted);
525 qemu_put_be32s(f, &env->interrupt_request);
528 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
530 CPUState *env = opaque;
532 if (version_id != CPU_COMMON_SAVE_VERSION)
533 return -EINVAL;
535 qemu_get_be32s(f, &env->halted);
536 qemu_get_be32s(f, &env->interrupt_request);
537 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
538 version_id is increased. */
539 env->interrupt_request &= ~0x01;
540 tlb_flush(env, 1);
541 cpu_synchronize_state(env, 1);
543 return 0;
545 #endif
547 CPUState *qemu_get_cpu(int cpu)
549 CPUState *env = first_cpu;
551 while (env) {
552 if (env->cpu_index == cpu)
553 break;
554 env = env->next_cpu;
557 return env;
560 void cpu_exec_init(CPUState *env)
562 CPUState **penv;
563 int cpu_index;
565 #if defined(CONFIG_USER_ONLY)
566 cpu_list_lock();
567 #endif
568 env->next_cpu = NULL;
569 penv = &first_cpu;
570 cpu_index = 0;
571 while (*penv != NULL) {
572 penv = &(*penv)->next_cpu;
573 cpu_index++;
575 env->cpu_index = cpu_index;
576 env->numa_node = 0;
577 TAILQ_INIT(&env->breakpoints);
578 TAILQ_INIT(&env->watchpoints);
579 *penv = env;
580 #if defined(CONFIG_USER_ONLY)
581 cpu_list_unlock();
582 #endif
583 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
584 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
585 cpu_common_save, cpu_common_load, env);
586 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
587 cpu_save, cpu_load, env);
588 #endif
591 static inline void invalidate_page_bitmap(PageDesc *p)
593 if (p->code_bitmap) {
594 qemu_free(p->code_bitmap);
595 p->code_bitmap = NULL;
597 p->code_write_count = 0;
600 /* set to NULL all the 'first_tb' fields in all PageDescs */
601 static void page_flush_tb(void)
603 int i, j;
604 PageDesc *p;
606 for(i = 0; i < L1_SIZE; i++) {
607 p = l1_map[i];
608 if (p) {
609 for(j = 0; j < L2_SIZE; j++) {
610 p->first_tb = NULL;
611 invalidate_page_bitmap(p);
612 p++;
618 /* flush all the translation blocks */
619 /* XXX: tb_flush is currently not thread safe */
620 void tb_flush(CPUState *env1)
622 CPUState *env;
623 #if defined(DEBUG_FLUSH)
624 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
625 (unsigned long)(code_gen_ptr - code_gen_buffer),
626 nb_tbs, nb_tbs > 0 ?
627 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
628 #endif
629 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
630 cpu_abort(env1, "Internal error: code buffer overflow\n");
632 nb_tbs = 0;
634 for(env = first_cpu; env != NULL; env = env->next_cpu) {
635 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
638 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
639 page_flush_tb();
641 code_gen_ptr = code_gen_buffer;
642 /* XXX: flush processor icache at this point if cache flush is
643 expensive */
644 tb_flush_count++;
647 #ifdef DEBUG_TB_CHECK
649 static void tb_invalidate_check(target_ulong address)
651 TranslationBlock *tb;
652 int i;
653 address &= TARGET_PAGE_MASK;
654 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
655 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
656 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
657 address >= tb->pc + tb->size)) {
658 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
659 address, (long)tb->pc, tb->size);
665 /* verify that all the pages have correct rights for code */
666 static void tb_page_check(void)
668 TranslationBlock *tb;
669 int i, flags1, flags2;
671 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
672 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
673 flags1 = page_get_flags(tb->pc);
674 flags2 = page_get_flags(tb->pc + tb->size - 1);
675 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
676 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
677 (long)tb->pc, tb->size, flags1, flags2);
683 static void tb_jmp_check(TranslationBlock *tb)
685 TranslationBlock *tb1;
686 unsigned int n1;
688 /* suppress any remaining jumps to this TB */
689 tb1 = tb->jmp_first;
690 for(;;) {
691 n1 = (long)tb1 & 3;
692 tb1 = (TranslationBlock *)((long)tb1 & ~3);
693 if (n1 == 2)
694 break;
695 tb1 = tb1->jmp_next[n1];
697 /* check end of list */
698 if (tb1 != tb) {
699 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
703 #endif
705 /* invalidate one TB */
706 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
707 int next_offset)
709 TranslationBlock *tb1;
710 for(;;) {
711 tb1 = *ptb;
712 if (tb1 == tb) {
713 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
714 break;
716 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
720 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
722 TranslationBlock *tb1;
723 unsigned int n1;
725 for(;;) {
726 tb1 = *ptb;
727 n1 = (long)tb1 & 3;
728 tb1 = (TranslationBlock *)((long)tb1 & ~3);
729 if (tb1 == tb) {
730 *ptb = tb1->page_next[n1];
731 break;
733 ptb = &tb1->page_next[n1];
737 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
739 TranslationBlock *tb1, **ptb;
740 unsigned int n1;
742 ptb = &tb->jmp_next[n];
743 tb1 = *ptb;
744 if (tb1) {
745 /* find tb(n) in circular list */
746 for(;;) {
747 tb1 = *ptb;
748 n1 = (long)tb1 & 3;
749 tb1 = (TranslationBlock *)((long)tb1 & ~3);
750 if (n1 == n && tb1 == tb)
751 break;
752 if (n1 == 2) {
753 ptb = &tb1->jmp_first;
754 } else {
755 ptb = &tb1->jmp_next[n1];
758 /* now we can suppress tb(n) from the list */
759 *ptb = tb->jmp_next[n];
761 tb->jmp_next[n] = NULL;
765 /* reset the jump entry 'n' of a TB so that it is not chained to
766 another TB */
767 static inline void tb_reset_jump(TranslationBlock *tb, int n)
769 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
772 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
774 CPUState *env;
775 PageDesc *p;
776 unsigned int h, n1;
777 target_phys_addr_t phys_pc;
778 TranslationBlock *tb1, *tb2;
780 /* remove the TB from the hash list */
781 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
782 h = tb_phys_hash_func(phys_pc);
783 tb_remove(&tb_phys_hash[h], tb,
784 offsetof(TranslationBlock, phys_hash_next));
786 /* remove the TB from the page list */
787 if (tb->page_addr[0] != page_addr) {
788 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
789 tb_page_remove(&p->first_tb, tb);
790 invalidate_page_bitmap(p);
792 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
793 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
794 tb_page_remove(&p->first_tb, tb);
795 invalidate_page_bitmap(p);
798 tb_invalidated_flag = 1;
800 /* remove the TB from the hash list */
801 h = tb_jmp_cache_hash_func(tb->pc);
802 for(env = first_cpu; env != NULL; env = env->next_cpu) {
803 if (env->tb_jmp_cache[h] == tb)
804 env->tb_jmp_cache[h] = NULL;
807 /* suppress this TB from the two jump lists */
808 tb_jmp_remove(tb, 0);
809 tb_jmp_remove(tb, 1);
811 /* suppress any remaining jumps to this TB */
812 tb1 = tb->jmp_first;
813 for(;;) {
814 n1 = (long)tb1 & 3;
815 if (n1 == 2)
816 break;
817 tb1 = (TranslationBlock *)((long)tb1 & ~3);
818 tb2 = tb1->jmp_next[n1];
819 tb_reset_jump(tb1, n1);
820 tb1->jmp_next[n1] = NULL;
821 tb1 = tb2;
823 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
825 tb_phys_invalidate_count++;
828 static inline void set_bits(uint8_t *tab, int start, int len)
830 int end, mask, end1;
832 end = start + len;
833 tab += start >> 3;
834 mask = 0xff << (start & 7);
835 if ((start & ~7) == (end & ~7)) {
836 if (start < end) {
837 mask &= ~(0xff << (end & 7));
838 *tab |= mask;
840 } else {
841 *tab++ |= mask;
842 start = (start + 8) & ~7;
843 end1 = end & ~7;
844 while (start < end1) {
845 *tab++ = 0xff;
846 start += 8;
848 if (start < end) {
849 mask = ~(0xff << (end & 7));
850 *tab |= mask;
855 static void build_page_bitmap(PageDesc *p)
857 int n, tb_start, tb_end;
858 TranslationBlock *tb;
860 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
862 tb = p->first_tb;
863 while (tb != NULL) {
864 n = (long)tb & 3;
865 tb = (TranslationBlock *)((long)tb & ~3);
866 /* NOTE: this is subtle as a TB may span two physical pages */
867 if (n == 0) {
868 /* NOTE: tb_end may be after the end of the page, but
869 it is not a problem */
870 tb_start = tb->pc & ~TARGET_PAGE_MASK;
871 tb_end = tb_start + tb->size;
872 if (tb_end > TARGET_PAGE_SIZE)
873 tb_end = TARGET_PAGE_SIZE;
874 } else {
875 tb_start = 0;
876 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
878 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
879 tb = tb->page_next[n];
883 TranslationBlock *tb_gen_code(CPUState *env,
884 target_ulong pc, target_ulong cs_base,
885 int flags, int cflags)
887 TranslationBlock *tb;
888 uint8_t *tc_ptr;
889 target_ulong phys_pc, phys_page2, virt_page2;
890 int code_gen_size;
892 phys_pc = get_phys_addr_code(env, pc);
893 tb = tb_alloc(pc);
894 if (!tb) {
895 /* flush must be done */
896 tb_flush(env);
897 /* cannot fail at this point */
898 tb = tb_alloc(pc);
899 /* Don't forget to invalidate previous TB info. */
900 tb_invalidated_flag = 1;
902 tc_ptr = code_gen_ptr;
903 tb->tc_ptr = tc_ptr;
904 tb->cs_base = cs_base;
905 tb->flags = flags;
906 tb->cflags = cflags;
907 cpu_gen_code(env, tb, &code_gen_size);
908 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
910 /* check next page if needed */
911 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
912 phys_page2 = -1;
913 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
914 phys_page2 = get_phys_addr_code(env, virt_page2);
916 tb_link_phys(tb, phys_pc, phys_page2);
917 return tb;
920 /* invalidate all TBs which intersect with the target physical page
921 starting in range [start;end[. NOTE: start and end must refer to
922 the same physical page. 'is_cpu_write_access' should be true if called
923 from a real cpu write access: the virtual CPU will exit the current
924 TB if code is modified inside this TB. */
925 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
926 int is_cpu_write_access)
928 TranslationBlock *tb, *tb_next, *saved_tb;
929 CPUState *env = cpu_single_env;
930 target_ulong tb_start, tb_end;
931 PageDesc *p;
932 int n;
933 #ifdef TARGET_HAS_PRECISE_SMC
934 int current_tb_not_found = is_cpu_write_access;
935 TranslationBlock *current_tb = NULL;
936 int current_tb_modified = 0;
937 target_ulong current_pc = 0;
938 target_ulong current_cs_base = 0;
939 int current_flags = 0;
940 #endif /* TARGET_HAS_PRECISE_SMC */
942 p = page_find(start >> TARGET_PAGE_BITS);
943 if (!p)
944 return;
945 if (!p->code_bitmap &&
946 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
947 is_cpu_write_access) {
948 /* build code bitmap */
949 build_page_bitmap(p);
952 /* we remove all the TBs in the range [start, end[ */
953 /* XXX: see if in some cases it could be faster to invalidate all the code */
954 tb = p->first_tb;
955 while (tb != NULL) {
956 n = (long)tb & 3;
957 tb = (TranslationBlock *)((long)tb & ~3);
958 tb_next = tb->page_next[n];
959 /* NOTE: this is subtle as a TB may span two physical pages */
960 if (n == 0) {
961 /* NOTE: tb_end may be after the end of the page, but
962 it is not a problem */
963 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
964 tb_end = tb_start + tb->size;
965 } else {
966 tb_start = tb->page_addr[1];
967 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
969 if (!(tb_end <= start || tb_start >= end)) {
970 #ifdef TARGET_HAS_PRECISE_SMC
971 if (current_tb_not_found) {
972 current_tb_not_found = 0;
973 current_tb = NULL;
974 if (env->mem_io_pc) {
975 /* now we have a real cpu fault */
976 current_tb = tb_find_pc(env->mem_io_pc);
979 if (current_tb == tb &&
980 (current_tb->cflags & CF_COUNT_MASK) != 1) {
981 /* If we are modifying the current TB, we must stop
982 its execution. We could be more precise by checking
983 that the modification is after the current PC, but it
984 would require a specialized function to partially
985 restore the CPU state */
987 current_tb_modified = 1;
988 cpu_restore_state(current_tb, env,
989 env->mem_io_pc, NULL);
990 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
991 &current_flags);
993 #endif /* TARGET_HAS_PRECISE_SMC */
994 /* we need to do that to handle the case where a signal
995 occurs while doing tb_phys_invalidate() */
996 saved_tb = NULL;
997 if (env) {
998 saved_tb = env->current_tb;
999 env->current_tb = NULL;
1001 tb_phys_invalidate(tb, -1);
1002 if (env) {
1003 env->current_tb = saved_tb;
1004 if (env->interrupt_request && env->current_tb)
1005 cpu_interrupt(env, env->interrupt_request);
1008 tb = tb_next;
1010 #if !defined(CONFIG_USER_ONLY)
1011 /* if no code remaining, no need to continue to use slow writes */
1012 if (!p->first_tb) {
1013 invalidate_page_bitmap(p);
1014 if (is_cpu_write_access) {
1015 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1018 #endif
1019 #ifdef TARGET_HAS_PRECISE_SMC
1020 if (current_tb_modified) {
1021 /* we generate a block containing just the instruction
1022 modifying the memory. It will ensure that it cannot modify
1023 itself */
1024 env->current_tb = NULL;
1025 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1026 cpu_resume_from_signal(env, NULL);
1028 #endif
1031 /* len must be <= 8 and start must be a multiple of len */
1032 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1034 PageDesc *p;
1035 int offset, b;
1036 #if 0
1037 if (1) {
1038 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1039 cpu_single_env->mem_io_vaddr, len,
1040 cpu_single_env->eip,
1041 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1043 #endif
1044 p = page_find(start >> TARGET_PAGE_BITS);
1045 if (!p)
1046 return;
1047 if (p->code_bitmap) {
1048 offset = start & ~TARGET_PAGE_MASK;
1049 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1050 if (b & ((1 << len) - 1))
1051 goto do_invalidate;
1052 } else {
1053 do_invalidate:
1054 tb_invalidate_phys_page_range(start, start + len, 1);
1058 #if !defined(CONFIG_SOFTMMU)
1059 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1060 unsigned long pc, void *puc)
1062 TranslationBlock *tb;
1063 PageDesc *p;
1064 int n;
1065 #ifdef TARGET_HAS_PRECISE_SMC
1066 TranslationBlock *current_tb = NULL;
1067 CPUState *env = cpu_single_env;
1068 int current_tb_modified = 0;
1069 target_ulong current_pc = 0;
1070 target_ulong current_cs_base = 0;
1071 int current_flags = 0;
1072 #endif
1074 addr &= TARGET_PAGE_MASK;
1075 p = page_find(addr >> TARGET_PAGE_BITS);
1076 if (!p)
1077 return;
1078 tb = p->first_tb;
1079 #ifdef TARGET_HAS_PRECISE_SMC
1080 if (tb && pc != 0) {
1081 current_tb = tb_find_pc(pc);
1083 #endif
1084 while (tb != NULL) {
1085 n = (long)tb & 3;
1086 tb = (TranslationBlock *)((long)tb & ~3);
1087 #ifdef TARGET_HAS_PRECISE_SMC
1088 if (current_tb == tb &&
1089 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
1096 current_tb_modified = 1;
1097 cpu_restore_state(current_tb, env, pc, puc);
1098 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1099 &current_flags);
1101 #endif /* TARGET_HAS_PRECISE_SMC */
1102 tb_phys_invalidate(tb, addr);
1103 tb = tb->page_next[n];
1105 p->first_tb = NULL;
1106 #ifdef TARGET_HAS_PRECISE_SMC
1107 if (current_tb_modified) {
1108 /* we generate a block containing just the instruction
1109 modifying the memory. It will ensure that it cannot modify
1110 itself */
1111 env->current_tb = NULL;
1112 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1113 cpu_resume_from_signal(env, puc);
1115 #endif
1117 #endif
1119 /* add the tb in the target page and protect it if necessary */
1120 static inline void tb_alloc_page(TranslationBlock *tb,
1121 unsigned int n, target_ulong page_addr)
1123 PageDesc *p;
1124 TranslationBlock *last_first_tb;
1126 tb->page_addr[n] = page_addr;
1127 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1128 tb->page_next[n] = p->first_tb;
1129 last_first_tb = p->first_tb;
1130 p->first_tb = (TranslationBlock *)((long)tb | n);
1131 invalidate_page_bitmap(p);
1133 #if defined(TARGET_HAS_SMC) || 1
1135 #if defined(CONFIG_USER_ONLY)
1136 if (p->flags & PAGE_WRITE) {
1137 target_ulong addr;
1138 PageDesc *p2;
1139 int prot;
1141 /* force the host page as non writable (writes will have a
1142 page fault + mprotect overhead) */
1143 page_addr &= qemu_host_page_mask;
1144 prot = 0;
1145 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1146 addr += TARGET_PAGE_SIZE) {
1148 p2 = page_find (addr >> TARGET_PAGE_BITS);
1149 if (!p2)
1150 continue;
1151 prot |= p2->flags;
1152 p2->flags &= ~PAGE_WRITE;
1153 page_get_flags(addr);
1155 mprotect(g2h(page_addr), qemu_host_page_size,
1156 (prot & PAGE_BITS) & ~PAGE_WRITE);
1157 #ifdef DEBUG_TB_INVALIDATE
1158 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1159 page_addr);
1160 #endif
1162 #else
1163 /* if some code is already present, then the pages are already
1164 protected. So we handle the case where only the first TB is
1165 allocated in a physical page */
1166 if (!last_first_tb) {
1167 tlb_protect_code(page_addr);
1169 #endif
1171 #endif /* TARGET_HAS_SMC */
1174 /* Allocate a new translation block. Flush the translation buffer if
1175 too many translation blocks or too much generated code. */
1176 TranslationBlock *tb_alloc(target_ulong pc)
1178 TranslationBlock *tb;
1180 if (nb_tbs >= code_gen_max_blocks ||
1181 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1182 return NULL;
1183 tb = &tbs[nb_tbs++];
1184 tb->pc = pc;
1185 tb->cflags = 0;
1186 return tb;
1189 void tb_free(TranslationBlock *tb)
1191 /* In practice this is mostly used for single use temporary TB
1192 Ignore the hard cases and just back up if this TB happens to
1193 be the last one generated. */
1194 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1195 code_gen_ptr = tb->tc_ptr;
1196 nb_tbs--;
1200 /* add a new TB and link it to the physical page tables. phys_page2 is
1201 (-1) to indicate that only one page contains the TB. */
1202 void tb_link_phys(TranslationBlock *tb,
1203 target_ulong phys_pc, target_ulong phys_page2)
1205 unsigned int h;
1206 TranslationBlock **ptb;
1208 /* Grab the mmap lock to stop another thread invalidating this TB
1209 before we are done. */
1210 mmap_lock();
1211 /* add in the physical hash table */
1212 h = tb_phys_hash_func(phys_pc);
1213 ptb = &tb_phys_hash[h];
1214 tb->phys_hash_next = *ptb;
1215 *ptb = tb;
1217 /* add in the page list */
1218 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1219 if (phys_page2 != -1)
1220 tb_alloc_page(tb, 1, phys_page2);
1221 else
1222 tb->page_addr[1] = -1;
1224 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1225 tb->jmp_next[0] = NULL;
1226 tb->jmp_next[1] = NULL;
1228 /* init original jump addresses */
1229 if (tb->tb_next_offset[0] != 0xffff)
1230 tb_reset_jump(tb, 0);
1231 if (tb->tb_next_offset[1] != 0xffff)
1232 tb_reset_jump(tb, 1);
1234 #ifdef DEBUG_TB_CHECK
1235 tb_page_check();
1236 #endif
1237 mmap_unlock();
1240 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1241 tb[1].tc_ptr. Return NULL if not found */
1242 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1244 int m_min, m_max, m;
1245 unsigned long v;
1246 TranslationBlock *tb;
1248 if (nb_tbs <= 0)
1249 return NULL;
1250 if (tc_ptr < (unsigned long)code_gen_buffer ||
1251 tc_ptr >= (unsigned long)code_gen_ptr)
1252 return NULL;
1253 /* binary search (cf Knuth) */
1254 m_min = 0;
1255 m_max = nb_tbs - 1;
1256 while (m_min <= m_max) {
1257 m = (m_min + m_max) >> 1;
1258 tb = &tbs[m];
1259 v = (unsigned long)tb->tc_ptr;
1260 if (v == tc_ptr)
1261 return tb;
1262 else if (tc_ptr < v) {
1263 m_max = m - 1;
1264 } else {
1265 m_min = m + 1;
1268 return &tbs[m_max];
1271 static void tb_reset_jump_recursive(TranslationBlock *tb);
1273 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1275 TranslationBlock *tb1, *tb_next, **ptb;
1276 unsigned int n1;
1278 tb1 = tb->jmp_next[n];
1279 if (tb1 != NULL) {
1280 /* find head of list */
1281 for(;;) {
1282 n1 = (long)tb1 & 3;
1283 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1284 if (n1 == 2)
1285 break;
1286 tb1 = tb1->jmp_next[n1];
1288 /* we are now sure now that tb jumps to tb1 */
1289 tb_next = tb1;
1291 /* remove tb from the jmp_first list */
1292 ptb = &tb_next->jmp_first;
1293 for(;;) {
1294 tb1 = *ptb;
1295 n1 = (long)tb1 & 3;
1296 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1297 if (n1 == n && tb1 == tb)
1298 break;
1299 ptb = &tb1->jmp_next[n1];
1301 *ptb = tb->jmp_next[n];
1302 tb->jmp_next[n] = NULL;
1304 /* suppress the jump to next tb in generated code */
1305 tb_reset_jump(tb, n);
1307 /* suppress jumps in the tb on which we could have jumped */
1308 tb_reset_jump_recursive(tb_next);
1312 static void tb_reset_jump_recursive(TranslationBlock *tb)
1314 tb_reset_jump_recursive2(tb, 0);
1315 tb_reset_jump_recursive2(tb, 1);
1318 #if defined(TARGET_HAS_ICE)
1319 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1321 target_phys_addr_t addr;
1322 target_ulong pd;
1323 ram_addr_t ram_addr;
1324 PhysPageDesc *p;
1326 addr = cpu_get_phys_page_debug(env, pc);
1327 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1328 if (!p) {
1329 pd = IO_MEM_UNASSIGNED;
1330 } else {
1331 pd = p->phys_offset;
1333 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1334 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1336 #endif
1338 /* Add a watchpoint. */
1339 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1340 int flags, CPUWatchpoint **watchpoint)
1342 target_ulong len_mask = ~(len - 1);
1343 CPUWatchpoint *wp;
1345 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1346 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1347 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1348 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1349 return -EINVAL;
1351 wp = qemu_malloc(sizeof(*wp));
1353 wp->vaddr = addr;
1354 wp->len_mask = len_mask;
1355 wp->flags = flags;
1357 /* keep all GDB-injected watchpoints in front */
1358 if (flags & BP_GDB)
1359 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1360 else
1361 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1363 tlb_flush_page(env, addr);
1365 if (watchpoint)
1366 *watchpoint = wp;
1367 return 0;
1370 /* Remove a specific watchpoint. */
1371 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1372 int flags)
1374 target_ulong len_mask = ~(len - 1);
1375 CPUWatchpoint *wp;
1377 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1378 if (addr == wp->vaddr && len_mask == wp->len_mask
1379 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1380 cpu_watchpoint_remove_by_ref(env, wp);
1381 return 0;
1384 return -ENOENT;
1387 /* Remove a specific watchpoint by reference. */
1388 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1390 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1392 tlb_flush_page(env, watchpoint->vaddr);
1394 qemu_free(watchpoint);
1397 /* Remove all matching watchpoints. */
1398 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1400 CPUWatchpoint *wp, *next;
1402 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1403 if (wp->flags & mask)
1404 cpu_watchpoint_remove_by_ref(env, wp);
1408 /* Add a breakpoint. */
1409 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1410 CPUBreakpoint **breakpoint)
1412 #if defined(TARGET_HAS_ICE)
1413 CPUBreakpoint *bp;
1415 bp = qemu_malloc(sizeof(*bp));
1417 bp->pc = pc;
1418 bp->flags = flags;
1420 /* keep all GDB-injected breakpoints in front */
1421 if (flags & BP_GDB)
1422 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1423 else
1424 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1426 breakpoint_invalidate(env, pc);
1428 if (breakpoint)
1429 *breakpoint = bp;
1430 return 0;
1431 #else
1432 return -ENOSYS;
1433 #endif
1436 /* Remove a specific breakpoint. */
1437 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1439 #if defined(TARGET_HAS_ICE)
1440 CPUBreakpoint *bp;
1442 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1443 if (bp->pc == pc && bp->flags == flags) {
1444 cpu_breakpoint_remove_by_ref(env, bp);
1445 return 0;
1448 return -ENOENT;
1449 #else
1450 return -ENOSYS;
1451 #endif
1454 /* Remove a specific breakpoint by reference. */
1455 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1457 #if defined(TARGET_HAS_ICE)
1458 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1460 breakpoint_invalidate(env, breakpoint->pc);
1462 qemu_free(breakpoint);
1463 #endif
1466 /* Remove all matching breakpoints. */
1467 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1469 #if defined(TARGET_HAS_ICE)
1470 CPUBreakpoint *bp, *next;
1472 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1473 if (bp->flags & mask)
1474 cpu_breakpoint_remove_by_ref(env, bp);
1476 #endif
1479 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1480 CPU loop after each instruction */
1481 void cpu_single_step(CPUState *env, int enabled)
1483 #if defined(TARGET_HAS_ICE)
1484 if (env->singlestep_enabled != enabled) {
1485 env->singlestep_enabled = enabled;
1486 if (kvm_enabled())
1487 kvm_update_guest_debug(env, 0);
1488 else {
1489 /* must flush all the translated code to avoid inconsistencies */
1490 /* XXX: only flush what is necessary */
1491 tb_flush(env);
1494 #endif
1497 /* enable or disable low levels log */
1498 void cpu_set_log(int log_flags)
1500 loglevel = log_flags;
1501 if (loglevel && !logfile) {
1502 logfile = fopen(logfilename, log_append ? "a" : "w");
1503 if (!logfile) {
1504 perror(logfilename);
1505 _exit(1);
1507 #if !defined(CONFIG_SOFTMMU)
1508 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1510 static char logfile_buf[4096];
1511 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1513 #else
1514 setvbuf(logfile, NULL, _IOLBF, 0);
1515 #endif
1516 log_append = 1;
1518 if (!loglevel && logfile) {
1519 fclose(logfile);
1520 logfile = NULL;
1524 void cpu_set_log_filename(const char *filename)
1526 logfilename = strdup(filename);
1527 if (logfile) {
1528 fclose(logfile);
1529 logfile = NULL;
1531 cpu_set_log(loglevel);
1534 static void cpu_unlink_tb(CPUState *env)
1536 #if defined(USE_NPTL)
1537 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1538 problem and hope the cpu will stop of its own accord. For userspace
1539 emulation this often isn't actually as bad as it sounds. Often
1540 signals are used primarily to interrupt blocking syscalls. */
1541 #else
1542 TranslationBlock *tb;
1543 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1545 tb = env->current_tb;
1546 /* if the cpu is currently executing code, we must unlink it and
1547 all the potentially executing TB */
1548 if (tb && !testandset(&interrupt_lock)) {
1549 env->current_tb = NULL;
1550 tb_reset_jump_recursive(tb);
1551 resetlock(&interrupt_lock);
1553 #endif
1556 /* mask must never be zero, except for A20 change call */
1557 void cpu_interrupt(CPUState *env, int mask)
1559 int old_mask;
1561 old_mask = env->interrupt_request;
1562 env->interrupt_request |= mask;
1564 #ifndef CONFIG_USER_ONLY
1566 * If called from iothread context, wake the target cpu in
1567 * case its halted.
1569 if (!qemu_cpu_self(env)) {
1570 qemu_cpu_kick(env);
1571 return;
1573 #endif
1575 if (use_icount) {
1576 env->icount_decr.u16.high = 0xffff;
1577 #ifndef CONFIG_USER_ONLY
1578 if (!can_do_io(env)
1579 && (mask & ~old_mask) != 0) {
1580 cpu_abort(env, "Raised interrupt while not in I/O function");
1582 #endif
1583 } else {
1584 cpu_unlink_tb(env);
1588 void cpu_reset_interrupt(CPUState *env, int mask)
1590 env->interrupt_request &= ~mask;
1593 void cpu_exit(CPUState *env)
1595 env->exit_request = 1;
1596 cpu_unlink_tb(env);
1599 const CPULogItem cpu_log_items[] = {
1600 { CPU_LOG_TB_OUT_ASM, "out_asm",
1601 "show generated host assembly code for each compiled TB" },
1602 { CPU_LOG_TB_IN_ASM, "in_asm",
1603 "show target assembly code for each compiled TB" },
1604 { CPU_LOG_TB_OP, "op",
1605 "show micro ops for each compiled TB" },
1606 { CPU_LOG_TB_OP_OPT, "op_opt",
1607 "show micro ops "
1608 #ifdef TARGET_I386
1609 "before eflags optimization and "
1610 #endif
1611 "after liveness analysis" },
1612 { CPU_LOG_INT, "int",
1613 "show interrupts/exceptions in short format" },
1614 { CPU_LOG_EXEC, "exec",
1615 "show trace before each executed TB (lots of logs)" },
1616 { CPU_LOG_TB_CPU, "cpu",
1617 "show CPU state before block translation" },
1618 #ifdef TARGET_I386
1619 { CPU_LOG_PCALL, "pcall",
1620 "show protected mode far calls/returns/exceptions" },
1621 { CPU_LOG_RESET, "cpu_reset",
1622 "show CPU state before CPU resets" },
1623 #endif
1624 #ifdef DEBUG_IOPORT
1625 { CPU_LOG_IOPORT, "ioport",
1626 "show all i/o ports accesses" },
1627 #endif
1628 { 0, NULL, NULL },
1631 static int cmp1(const char *s1, int n, const char *s2)
1633 if (strlen(s2) != n)
1634 return 0;
1635 return memcmp(s1, s2, n) == 0;
1638 /* takes a comma separated list of log masks. Return 0 if error. */
1639 int cpu_str_to_log_mask(const char *str)
1641 const CPULogItem *item;
1642 int mask;
1643 const char *p, *p1;
1645 p = str;
1646 mask = 0;
1647 for(;;) {
1648 p1 = strchr(p, ',');
1649 if (!p1)
1650 p1 = p + strlen(p);
1651 if(cmp1(p,p1-p,"all")) {
1652 for(item = cpu_log_items; item->mask != 0; item++) {
1653 mask |= item->mask;
1655 } else {
1656 for(item = cpu_log_items; item->mask != 0; item++) {
1657 if (cmp1(p, p1 - p, item->name))
1658 goto found;
1660 return 0;
1662 found:
1663 mask |= item->mask;
1664 if (*p1 != ',')
1665 break;
1666 p = p1 + 1;
1668 return mask;
1671 void cpu_abort(CPUState *env, const char *fmt, ...)
1673 va_list ap;
1674 va_list ap2;
1676 va_start(ap, fmt);
1677 va_copy(ap2, ap);
1678 fprintf(stderr, "qemu: fatal: ");
1679 vfprintf(stderr, fmt, ap);
1680 fprintf(stderr, "\n");
1681 #ifdef TARGET_I386
1682 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1683 #else
1684 cpu_dump_state(env, stderr, fprintf, 0);
1685 #endif
1686 if (qemu_log_enabled()) {
1687 qemu_log("qemu: fatal: ");
1688 qemu_log_vprintf(fmt, ap2);
1689 qemu_log("\n");
1690 #ifdef TARGET_I386
1691 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1692 #else
1693 log_cpu_state(env, 0);
1694 #endif
1695 qemu_log_flush();
1696 qemu_log_close();
1698 va_end(ap2);
1699 va_end(ap);
1700 abort();
1703 CPUState *cpu_copy(CPUState *env)
1705 CPUState *new_env = cpu_init(env->cpu_model_str);
1706 CPUState *next_cpu = new_env->next_cpu;
1707 int cpu_index = new_env->cpu_index;
1708 #if defined(TARGET_HAS_ICE)
1709 CPUBreakpoint *bp;
1710 CPUWatchpoint *wp;
1711 #endif
1713 memcpy(new_env, env, sizeof(CPUState));
1715 /* Preserve chaining and index. */
1716 new_env->next_cpu = next_cpu;
1717 new_env->cpu_index = cpu_index;
1719 /* Clone all break/watchpoints.
1720 Note: Once we support ptrace with hw-debug register access, make sure
1721 BP_CPU break/watchpoints are handled correctly on clone. */
1722 TAILQ_INIT(&env->breakpoints);
1723 TAILQ_INIT(&env->watchpoints);
1724 #if defined(TARGET_HAS_ICE)
1725 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1726 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1728 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1729 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1730 wp->flags, NULL);
1732 #endif
1734 return new_env;
1737 #if !defined(CONFIG_USER_ONLY)
1739 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1741 unsigned int i;
1743 /* Discard jump cache entries for any tb which might potentially
1744 overlap the flushed page. */
1745 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1746 memset (&env->tb_jmp_cache[i], 0,
1747 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1749 i = tb_jmp_cache_hash_page(addr);
1750 memset (&env->tb_jmp_cache[i], 0,
1751 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1754 static CPUTLBEntry s_cputlb_empty_entry = {
1755 .addr_read = -1,
1756 .addr_write = -1,
1757 .addr_code = -1,
1758 .addend = -1,
1761 /* NOTE: if flush_global is true, also flush global entries (not
1762 implemented yet) */
1763 void tlb_flush(CPUState *env, int flush_global)
1765 int i;
1767 #if defined(DEBUG_TLB)
1768 printf("tlb_flush:\n");
1769 #endif
1770 /* must reset current TB so that interrupts cannot modify the
1771 links while we are modifying them */
1772 env->current_tb = NULL;
1774 for(i = 0; i < CPU_TLB_SIZE; i++) {
1775 int mmu_idx;
1776 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1777 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1781 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1783 #ifdef CONFIG_KQEMU
1784 if (env->kqemu_enabled) {
1785 kqemu_flush(env, flush_global);
1787 #endif
1788 tlb_flush_count++;
1791 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1793 if (addr == (tlb_entry->addr_read &
1794 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1795 addr == (tlb_entry->addr_write &
1796 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1797 addr == (tlb_entry->addr_code &
1798 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1799 *tlb_entry = s_cputlb_empty_entry;
1803 void tlb_flush_page(CPUState *env, target_ulong addr)
1805 int i;
1806 int mmu_idx;
1808 #if defined(DEBUG_TLB)
1809 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1810 #endif
1811 /* must reset current TB so that interrupts cannot modify the
1812 links while we are modifying them */
1813 env->current_tb = NULL;
1815 addr &= TARGET_PAGE_MASK;
1816 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1817 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1818 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1820 tlb_flush_jmp_cache(env, addr);
1822 #ifdef CONFIG_KQEMU
1823 if (env->kqemu_enabled) {
1824 kqemu_flush_page(env, addr);
1826 #endif
1829 /* update the TLBs so that writes to code in the virtual page 'addr'
1830 can be detected */
1831 static void tlb_protect_code(ram_addr_t ram_addr)
1833 cpu_physical_memory_reset_dirty(ram_addr,
1834 ram_addr + TARGET_PAGE_SIZE,
1835 CODE_DIRTY_FLAG);
1838 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1839 tested for self modifying code */
1840 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1841 target_ulong vaddr)
1843 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1846 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1847 unsigned long start, unsigned long length)
1849 unsigned long addr;
1850 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1851 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1852 if ((addr - start) < length) {
1853 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1858 /* Note: start and end must be within the same ram block. */
1859 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1860 int dirty_flags)
1862 CPUState *env;
1863 unsigned long length, start1;
1864 int i, mask, len;
1865 uint8_t *p;
1867 start &= TARGET_PAGE_MASK;
1868 end = TARGET_PAGE_ALIGN(end);
1870 length = end - start;
1871 if (length == 0)
1872 return;
1873 len = length >> TARGET_PAGE_BITS;
1874 #ifdef CONFIG_KQEMU
1875 /* XXX: should not depend on cpu context */
1876 env = first_cpu;
1877 if (env->kqemu_enabled) {
1878 ram_addr_t addr;
1879 addr = start;
1880 for(i = 0; i < len; i++) {
1881 kqemu_set_notdirty(env, addr);
1882 addr += TARGET_PAGE_SIZE;
1885 #endif
1886 mask = ~dirty_flags;
1887 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1888 for(i = 0; i < len; i++)
1889 p[i] &= mask;
1891 /* we modify the TLB cache so that the dirty bit will be set again
1892 when accessing the range */
1893 start1 = (unsigned long)qemu_get_ram_ptr(start);
1894 /* Chek that we don't span multiple blocks - this breaks the
1895 address comparisons below. */
1896 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1897 != (end - 1) - start) {
1898 abort();
1901 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1902 int mmu_idx;
1903 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1904 for(i = 0; i < CPU_TLB_SIZE; i++)
1905 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1906 start1, length);
1911 int cpu_physical_memory_set_dirty_tracking(int enable)
1913 in_migration = enable;
1914 if (kvm_enabled()) {
1915 return kvm_set_migration_log(enable);
1917 return 0;
1920 int cpu_physical_memory_get_dirty_tracking(void)
1922 return in_migration;
1925 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1926 target_phys_addr_t end_addr)
1928 int ret = 0;
1930 if (kvm_enabled())
1931 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1932 return ret;
1935 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1937 ram_addr_t ram_addr;
1938 void *p;
1940 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1941 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1942 + tlb_entry->addend);
1943 ram_addr = qemu_ram_addr_from_host(p);
1944 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1945 tlb_entry->addr_write |= TLB_NOTDIRTY;
1950 /* update the TLB according to the current state of the dirty bits */
1951 void cpu_tlb_update_dirty(CPUState *env)
1953 int i;
1954 int mmu_idx;
1955 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1956 for(i = 0; i < CPU_TLB_SIZE; i++)
1957 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1961 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1963 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1964 tlb_entry->addr_write = vaddr;
1967 /* update the TLB corresponding to virtual page vaddr
1968 so that it is no longer dirty */
1969 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1971 int i;
1972 int mmu_idx;
1974 vaddr &= TARGET_PAGE_MASK;
1975 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1976 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1977 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1980 /* add a new TLB entry. At most one entry for a given virtual address
1981 is permitted. Return 0 if OK or 2 if the page could not be mapped
1982 (can only happen in non SOFTMMU mode for I/O pages or pages
1983 conflicting with the host address space). */
1984 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1985 target_phys_addr_t paddr, int prot,
1986 int mmu_idx, int is_softmmu)
1988 PhysPageDesc *p;
1989 unsigned long pd;
1990 unsigned int index;
1991 target_ulong address;
1992 target_ulong code_address;
1993 target_phys_addr_t addend;
1994 int ret;
1995 CPUTLBEntry *te;
1996 CPUWatchpoint *wp;
1997 target_phys_addr_t iotlb;
1999 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2000 if (!p) {
2001 pd = IO_MEM_UNASSIGNED;
2002 } else {
2003 pd = p->phys_offset;
2005 #if defined(DEBUG_TLB)
2006 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2007 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2008 #endif
2010 ret = 0;
2011 address = vaddr;
2012 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2013 /* IO memory case (romd handled later) */
2014 address |= TLB_MMIO;
2016 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2017 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2018 /* Normal RAM. */
2019 iotlb = pd & TARGET_PAGE_MASK;
2020 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2021 iotlb |= IO_MEM_NOTDIRTY;
2022 else
2023 iotlb |= IO_MEM_ROM;
2024 } else {
2025 /* IO handlers are currently passed a physical address.
2026 It would be nice to pass an offset from the base address
2027 of that region. This would avoid having to special case RAM,
2028 and avoid full address decoding in every device.
2029 We can't use the high bits of pd for this because
2030 IO_MEM_ROMD uses these as a ram address. */
2031 iotlb = (pd & ~TARGET_PAGE_MASK);
2032 if (p) {
2033 iotlb += p->region_offset;
2034 } else {
2035 iotlb += paddr;
2039 code_address = address;
2040 /* Make accesses to pages with watchpoints go via the
2041 watchpoint trap routines. */
2042 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2043 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2044 iotlb = io_mem_watch + paddr;
2045 /* TODO: The memory case can be optimized by not trapping
2046 reads of pages with a write breakpoint. */
2047 address |= TLB_MMIO;
2051 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2052 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2053 te = &env->tlb_table[mmu_idx][index];
2054 te->addend = addend - vaddr;
2055 if (prot & PAGE_READ) {
2056 te->addr_read = address;
2057 } else {
2058 te->addr_read = -1;
2061 if (prot & PAGE_EXEC) {
2062 te->addr_code = code_address;
2063 } else {
2064 te->addr_code = -1;
2066 if (prot & PAGE_WRITE) {
2067 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2068 (pd & IO_MEM_ROMD)) {
2069 /* Write access calls the I/O callback. */
2070 te->addr_write = address | TLB_MMIO;
2071 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2072 !cpu_physical_memory_is_dirty(pd)) {
2073 te->addr_write = address | TLB_NOTDIRTY;
2074 } else {
2075 te->addr_write = address;
2077 } else {
2078 te->addr_write = -1;
2080 return ret;
2083 #else
2085 void tlb_flush(CPUState *env, int flush_global)
2089 void tlb_flush_page(CPUState *env, target_ulong addr)
2093 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2094 target_phys_addr_t paddr, int prot,
2095 int mmu_idx, int is_softmmu)
2097 return 0;
2101 * Walks guest process memory "regions" one by one
2102 * and calls callback function 'fn' for each region.
2104 int walk_memory_regions(void *priv,
2105 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2107 unsigned long start, end;
2108 PageDesc *p = NULL;
2109 int i, j, prot, prot1;
2110 int rc = 0;
2112 start = end = -1;
2113 prot = 0;
2115 for (i = 0; i <= L1_SIZE; i++) {
2116 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2117 for (j = 0; j < L2_SIZE; j++) {
2118 prot1 = (p == NULL) ? 0 : p[j].flags;
2120 * "region" is one continuous chunk of memory
2121 * that has same protection flags set.
2123 if (prot1 != prot) {
2124 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2125 if (start != -1) {
2126 rc = (*fn)(priv, start, end, prot);
2127 /* callback can stop iteration by returning != 0 */
2128 if (rc != 0)
2129 return (rc);
2131 if (prot1 != 0)
2132 start = end;
2133 else
2134 start = -1;
2135 prot = prot1;
2137 if (p == NULL)
2138 break;
2141 return (rc);
2144 static int dump_region(void *priv, unsigned long start,
2145 unsigned long end, unsigned long prot)
2147 FILE *f = (FILE *)priv;
2149 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2150 start, end, end - start,
2151 ((prot & PAGE_READ) ? 'r' : '-'),
2152 ((prot & PAGE_WRITE) ? 'w' : '-'),
2153 ((prot & PAGE_EXEC) ? 'x' : '-'));
2155 return (0);
2158 /* dump memory mappings */
2159 void page_dump(FILE *f)
2161 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2162 "start", "end", "size", "prot");
2163 walk_memory_regions(f, dump_region);
2166 int page_get_flags(target_ulong address)
2168 PageDesc *p;
2170 p = page_find(address >> TARGET_PAGE_BITS);
2171 if (!p)
2172 return 0;
2173 return p->flags;
2176 /* modify the flags of a page and invalidate the code if
2177 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2178 depending on PAGE_WRITE */
2179 void page_set_flags(target_ulong start, target_ulong end, int flags)
2181 PageDesc *p;
2182 target_ulong addr;
2184 /* mmap_lock should already be held. */
2185 start = start & TARGET_PAGE_MASK;
2186 end = TARGET_PAGE_ALIGN(end);
2187 if (flags & PAGE_WRITE)
2188 flags |= PAGE_WRITE_ORG;
2189 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2190 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2191 /* We may be called for host regions that are outside guest
2192 address space. */
2193 if (!p)
2194 return;
2195 /* if the write protection is set, then we invalidate the code
2196 inside */
2197 if (!(p->flags & PAGE_WRITE) &&
2198 (flags & PAGE_WRITE) &&
2199 p->first_tb) {
2200 tb_invalidate_phys_page(addr, 0, NULL);
2202 p->flags = flags;
2206 int page_check_range(target_ulong start, target_ulong len, int flags)
2208 PageDesc *p;
2209 target_ulong end;
2210 target_ulong addr;
2212 if (start + len < start)
2213 /* we've wrapped around */
2214 return -1;
2216 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2217 start = start & TARGET_PAGE_MASK;
2219 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2220 p = page_find(addr >> TARGET_PAGE_BITS);
2221 if( !p )
2222 return -1;
2223 if( !(p->flags & PAGE_VALID) )
2224 return -1;
2226 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2227 return -1;
2228 if (flags & PAGE_WRITE) {
2229 if (!(p->flags & PAGE_WRITE_ORG))
2230 return -1;
2231 /* unprotect the page if it was put read-only because it
2232 contains translated code */
2233 if (!(p->flags & PAGE_WRITE)) {
2234 if (!page_unprotect(addr, 0, NULL))
2235 return -1;
2237 return 0;
2240 return 0;
2243 /* called from signal handler: invalidate the code and unprotect the
2244 page. Return TRUE if the fault was successfully handled. */
2245 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2247 unsigned int page_index, prot, pindex;
2248 PageDesc *p, *p1;
2249 target_ulong host_start, host_end, addr;
2251 /* Technically this isn't safe inside a signal handler. However we
2252 know this only ever happens in a synchronous SEGV handler, so in
2253 practice it seems to be ok. */
2254 mmap_lock();
2256 host_start = address & qemu_host_page_mask;
2257 page_index = host_start >> TARGET_PAGE_BITS;
2258 p1 = page_find(page_index);
2259 if (!p1) {
2260 mmap_unlock();
2261 return 0;
2263 host_end = host_start + qemu_host_page_size;
2264 p = p1;
2265 prot = 0;
2266 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2267 prot |= p->flags;
2268 p++;
2270 /* if the page was really writable, then we change its
2271 protection back to writable */
2272 if (prot & PAGE_WRITE_ORG) {
2273 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2274 if (!(p1[pindex].flags & PAGE_WRITE)) {
2275 mprotect((void *)g2h(host_start), qemu_host_page_size,
2276 (prot & PAGE_BITS) | PAGE_WRITE);
2277 p1[pindex].flags |= PAGE_WRITE;
2278 /* and since the content will be modified, we must invalidate
2279 the corresponding translated code. */
2280 tb_invalidate_phys_page(address, pc, puc);
2281 #ifdef DEBUG_TB_CHECK
2282 tb_invalidate_check(address);
2283 #endif
2284 mmap_unlock();
2285 return 1;
2288 mmap_unlock();
2289 return 0;
2292 static inline void tlb_set_dirty(CPUState *env,
2293 unsigned long addr, target_ulong vaddr)
2296 #endif /* defined(CONFIG_USER_ONLY) */
2298 #if !defined(CONFIG_USER_ONLY)
2300 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2301 ram_addr_t memory, ram_addr_t region_offset);
2302 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2303 ram_addr_t orig_memory, ram_addr_t region_offset);
2304 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2305 need_subpage) \
2306 do { \
2307 if (addr > start_addr) \
2308 start_addr2 = 0; \
2309 else { \
2310 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2311 if (start_addr2 > 0) \
2312 need_subpage = 1; \
2315 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2316 end_addr2 = TARGET_PAGE_SIZE - 1; \
2317 else { \
2318 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2319 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2320 need_subpage = 1; \
2322 } while (0)
2324 /* register physical memory. 'size' must be a multiple of the target
2325 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2326 io memory page. The address used when calling the IO function is
2327 the offset from the start of the region, plus region_offset. Both
2328 start_addr and region_offset are rounded down to a page boundary
2329 before calculating this offset. This should not be a problem unless
2330 the low bits of start_addr and region_offset differ. */
2331 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2332 ram_addr_t size,
2333 ram_addr_t phys_offset,
2334 ram_addr_t region_offset)
2336 target_phys_addr_t addr, end_addr;
2337 PhysPageDesc *p;
2338 CPUState *env;
2339 ram_addr_t orig_size = size;
2340 void *subpage;
2342 #ifdef CONFIG_KQEMU
2343 /* XXX: should not depend on cpu context */
2344 env = first_cpu;
2345 if (env->kqemu_enabled) {
2346 kqemu_set_phys_mem(start_addr, size, phys_offset);
2348 #endif
2349 if (kvm_enabled())
2350 kvm_set_phys_mem(start_addr, size, phys_offset);
2352 if (phys_offset == IO_MEM_UNASSIGNED) {
2353 region_offset = start_addr;
2355 region_offset &= TARGET_PAGE_MASK;
2356 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2357 end_addr = start_addr + (target_phys_addr_t)size;
2358 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2359 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2360 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2361 ram_addr_t orig_memory = p->phys_offset;
2362 target_phys_addr_t start_addr2, end_addr2;
2363 int need_subpage = 0;
2365 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2366 need_subpage);
2367 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2368 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2369 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2370 &p->phys_offset, orig_memory,
2371 p->region_offset);
2372 } else {
2373 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2374 >> IO_MEM_SHIFT];
2376 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2377 region_offset);
2378 p->region_offset = 0;
2379 } else {
2380 p->phys_offset = phys_offset;
2381 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2382 (phys_offset & IO_MEM_ROMD))
2383 phys_offset += TARGET_PAGE_SIZE;
2385 } else {
2386 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2387 p->phys_offset = phys_offset;
2388 p->region_offset = region_offset;
2389 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2390 (phys_offset & IO_MEM_ROMD)) {
2391 phys_offset += TARGET_PAGE_SIZE;
2392 } else {
2393 target_phys_addr_t start_addr2, end_addr2;
2394 int need_subpage = 0;
2396 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2397 end_addr2, need_subpage);
2399 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2400 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2401 &p->phys_offset, IO_MEM_UNASSIGNED,
2402 addr & TARGET_PAGE_MASK);
2403 subpage_register(subpage, start_addr2, end_addr2,
2404 phys_offset, region_offset);
2405 p->region_offset = 0;
2409 region_offset += TARGET_PAGE_SIZE;
2412 /* since each CPU stores ram addresses in its TLB cache, we must
2413 reset the modified entries */
2414 /* XXX: slow ! */
2415 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2416 tlb_flush(env, 1);
2420 /* XXX: temporary until new memory mapping API */
2421 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2423 PhysPageDesc *p;
2425 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2426 if (!p)
2427 return IO_MEM_UNASSIGNED;
2428 return p->phys_offset;
2431 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2433 if (kvm_enabled())
2434 kvm_coalesce_mmio_region(addr, size);
2437 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2439 if (kvm_enabled())
2440 kvm_uncoalesce_mmio_region(addr, size);
2443 #ifdef CONFIG_KQEMU
2444 /* XXX: better than nothing */
2445 static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2447 ram_addr_t addr;
2448 if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2449 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2450 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2451 abort();
2453 addr = last_ram_offset;
2454 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2455 return addr;
2457 #endif
2459 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2461 RAMBlock *new_block;
2463 #ifdef CONFIG_KQEMU
2464 if (kqemu_phys_ram_base) {
2465 return kqemu_ram_alloc(size);
2467 #endif
2469 size = TARGET_PAGE_ALIGN(size);
2470 new_block = qemu_malloc(sizeof(*new_block));
2472 new_block->host = qemu_vmalloc(size);
2473 new_block->offset = last_ram_offset;
2474 new_block->length = size;
2476 new_block->next = ram_blocks;
2477 ram_blocks = new_block;
2479 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2480 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2481 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2482 0xff, size >> TARGET_PAGE_BITS);
2484 last_ram_offset += size;
2486 if (kvm_enabled())
2487 kvm_setup_guest_memory(new_block->host, size);
2489 return new_block->offset;
2492 void qemu_ram_free(ram_addr_t addr)
2494 /* TODO: implement this. */
2497 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2498 With the exception of the softmmu code in this file, this should
2499 only be used for local memory (e.g. video ram) that the device owns,
2500 and knows it isn't going to access beyond the end of the block.
2502 It should not be used for general purpose DMA.
2503 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2505 void *qemu_get_ram_ptr(ram_addr_t addr)
2507 RAMBlock *prev;
2508 RAMBlock **prevp;
2509 RAMBlock *block;
2511 #ifdef CONFIG_KQEMU
2512 if (kqemu_phys_ram_base) {
2513 return kqemu_phys_ram_base + addr;
2515 #endif
2517 prev = NULL;
2518 prevp = &ram_blocks;
2519 block = ram_blocks;
2520 while (block && (block->offset > addr
2521 || block->offset + block->length <= addr)) {
2522 if (prev)
2523 prevp = &prev->next;
2524 prev = block;
2525 block = block->next;
2527 if (!block) {
2528 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2529 abort();
2531 /* Move this entry to to start of the list. */
2532 if (prev) {
2533 prev->next = block->next;
2534 block->next = *prevp;
2535 *prevp = block;
2537 return block->host + (addr - block->offset);
2540 /* Some of the softmmu routines need to translate from a host pointer
2541 (typically a TLB entry) back to a ram offset. */
2542 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2544 RAMBlock *prev;
2545 RAMBlock **prevp;
2546 RAMBlock *block;
2547 uint8_t *host = ptr;
2549 #ifdef CONFIG_KQEMU
2550 if (kqemu_phys_ram_base) {
2551 return host - kqemu_phys_ram_base;
2553 #endif
2555 prev = NULL;
2556 prevp = &ram_blocks;
2557 block = ram_blocks;
2558 while (block && (block->host > host
2559 || block->host + block->length <= host)) {
2560 if (prev)
2561 prevp = &prev->next;
2562 prev = block;
2563 block = block->next;
2565 if (!block) {
2566 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2567 abort();
2569 return block->offset + (host - block->host);
2572 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2574 #ifdef DEBUG_UNASSIGNED
2575 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2576 #endif
2577 #if defined(TARGET_SPARC)
2578 do_unassigned_access(addr, 0, 0, 0, 1);
2579 #endif
2580 return 0;
2583 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2585 #ifdef DEBUG_UNASSIGNED
2586 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2587 #endif
2588 #if defined(TARGET_SPARC)
2589 do_unassigned_access(addr, 0, 0, 0, 2);
2590 #endif
2591 return 0;
2594 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2596 #ifdef DEBUG_UNASSIGNED
2597 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2598 #endif
2599 #if defined(TARGET_SPARC)
2600 do_unassigned_access(addr, 0, 0, 0, 4);
2601 #endif
2602 return 0;
2605 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2607 #ifdef DEBUG_UNASSIGNED
2608 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2609 #endif
2610 #if defined(TARGET_SPARC)
2611 do_unassigned_access(addr, 1, 0, 0, 1);
2612 #endif
2615 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2617 #ifdef DEBUG_UNASSIGNED
2618 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2619 #endif
2620 #if defined(TARGET_SPARC)
2621 do_unassigned_access(addr, 1, 0, 0, 2);
2622 #endif
2625 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2627 #ifdef DEBUG_UNASSIGNED
2628 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2629 #endif
2630 #if defined(TARGET_SPARC)
2631 do_unassigned_access(addr, 1, 0, 0, 4);
2632 #endif
2635 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2636 unassigned_mem_readb,
2637 unassigned_mem_readw,
2638 unassigned_mem_readl,
2641 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2642 unassigned_mem_writeb,
2643 unassigned_mem_writew,
2644 unassigned_mem_writel,
2647 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2648 uint32_t val)
2650 int dirty_flags;
2651 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2652 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2653 #if !defined(CONFIG_USER_ONLY)
2654 tb_invalidate_phys_page_fast(ram_addr, 1);
2655 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2656 #endif
2658 stb_p(qemu_get_ram_ptr(ram_addr), val);
2659 #ifdef CONFIG_KQEMU
2660 if (cpu_single_env->kqemu_enabled &&
2661 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2662 kqemu_modify_page(cpu_single_env, ram_addr);
2663 #endif
2664 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2665 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2666 /* we remove the notdirty callback only if the code has been
2667 flushed */
2668 if (dirty_flags == 0xff)
2669 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2672 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2673 uint32_t val)
2675 int dirty_flags;
2676 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2677 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2678 #if !defined(CONFIG_USER_ONLY)
2679 tb_invalidate_phys_page_fast(ram_addr, 2);
2680 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2681 #endif
2683 stw_p(qemu_get_ram_ptr(ram_addr), val);
2684 #ifdef CONFIG_KQEMU
2685 if (cpu_single_env->kqemu_enabled &&
2686 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2687 kqemu_modify_page(cpu_single_env, ram_addr);
2688 #endif
2689 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2690 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2691 /* we remove the notdirty callback only if the code has been
2692 flushed */
2693 if (dirty_flags == 0xff)
2694 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2697 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2698 uint32_t val)
2700 int dirty_flags;
2701 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2702 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2703 #if !defined(CONFIG_USER_ONLY)
2704 tb_invalidate_phys_page_fast(ram_addr, 4);
2705 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2706 #endif
2708 stl_p(qemu_get_ram_ptr(ram_addr), val);
2709 #ifdef CONFIG_KQEMU
2710 if (cpu_single_env->kqemu_enabled &&
2711 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2712 kqemu_modify_page(cpu_single_env, ram_addr);
2713 #endif
2714 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2715 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2716 /* we remove the notdirty callback only if the code has been
2717 flushed */
2718 if (dirty_flags == 0xff)
2719 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2722 static CPUReadMemoryFunc *error_mem_read[3] = {
2723 NULL, /* never used */
2724 NULL, /* never used */
2725 NULL, /* never used */
2728 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2729 notdirty_mem_writeb,
2730 notdirty_mem_writew,
2731 notdirty_mem_writel,
2734 /* Generate a debug exception if a watchpoint has been hit. */
2735 static void check_watchpoint(int offset, int len_mask, int flags)
2737 CPUState *env = cpu_single_env;
2738 target_ulong pc, cs_base;
2739 TranslationBlock *tb;
2740 target_ulong vaddr;
2741 CPUWatchpoint *wp;
2742 int cpu_flags;
2744 if (env->watchpoint_hit) {
2745 /* We re-entered the check after replacing the TB. Now raise
2746 * the debug interrupt so that is will trigger after the
2747 * current instruction. */
2748 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2749 return;
2751 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2752 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2753 if ((vaddr == (wp->vaddr & len_mask) ||
2754 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2755 wp->flags |= BP_WATCHPOINT_HIT;
2756 if (!env->watchpoint_hit) {
2757 env->watchpoint_hit = wp;
2758 tb = tb_find_pc(env->mem_io_pc);
2759 if (!tb) {
2760 cpu_abort(env, "check_watchpoint: could not find TB for "
2761 "pc=%p", (void *)env->mem_io_pc);
2763 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2764 tb_phys_invalidate(tb, -1);
2765 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2766 env->exception_index = EXCP_DEBUG;
2767 } else {
2768 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2769 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2771 cpu_resume_from_signal(env, NULL);
2773 } else {
2774 wp->flags &= ~BP_WATCHPOINT_HIT;
2779 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2780 so these check for a hit then pass through to the normal out-of-line
2781 phys routines. */
2782 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2784 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2785 return ldub_phys(addr);
2788 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2790 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2791 return lduw_phys(addr);
2794 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2796 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2797 return ldl_phys(addr);
2800 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2801 uint32_t val)
2803 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2804 stb_phys(addr, val);
2807 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2808 uint32_t val)
2810 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2811 stw_phys(addr, val);
2814 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2815 uint32_t val)
2817 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2818 stl_phys(addr, val);
2821 static CPUReadMemoryFunc *watch_mem_read[3] = {
2822 watch_mem_readb,
2823 watch_mem_readw,
2824 watch_mem_readl,
2827 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2828 watch_mem_writeb,
2829 watch_mem_writew,
2830 watch_mem_writel,
2833 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2834 unsigned int len)
2836 uint32_t ret;
2837 unsigned int idx;
2839 idx = SUBPAGE_IDX(addr);
2840 #if defined(DEBUG_SUBPAGE)
2841 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2842 mmio, len, addr, idx);
2843 #endif
2844 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2845 addr + mmio->region_offset[idx][0][len]);
2847 return ret;
2850 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2851 uint32_t value, unsigned int len)
2853 unsigned int idx;
2855 idx = SUBPAGE_IDX(addr);
2856 #if defined(DEBUG_SUBPAGE)
2857 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2858 mmio, len, addr, idx, value);
2859 #endif
2860 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2861 addr + mmio->region_offset[idx][1][len],
2862 value);
2865 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2867 #if defined(DEBUG_SUBPAGE)
2868 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2869 #endif
2871 return subpage_readlen(opaque, addr, 0);
2874 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2875 uint32_t value)
2877 #if defined(DEBUG_SUBPAGE)
2878 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2879 #endif
2880 subpage_writelen(opaque, addr, value, 0);
2883 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2885 #if defined(DEBUG_SUBPAGE)
2886 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2887 #endif
2889 return subpage_readlen(opaque, addr, 1);
2892 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2893 uint32_t value)
2895 #if defined(DEBUG_SUBPAGE)
2896 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2897 #endif
2898 subpage_writelen(opaque, addr, value, 1);
2901 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2903 #if defined(DEBUG_SUBPAGE)
2904 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2905 #endif
2907 return subpage_readlen(opaque, addr, 2);
2910 static void subpage_writel (void *opaque,
2911 target_phys_addr_t addr, uint32_t value)
2913 #if defined(DEBUG_SUBPAGE)
2914 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2915 #endif
2916 subpage_writelen(opaque, addr, value, 2);
2919 static CPUReadMemoryFunc *subpage_read[] = {
2920 &subpage_readb,
2921 &subpage_readw,
2922 &subpage_readl,
2925 static CPUWriteMemoryFunc *subpage_write[] = {
2926 &subpage_writeb,
2927 &subpage_writew,
2928 &subpage_writel,
2931 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2932 ram_addr_t memory, ram_addr_t region_offset)
2934 int idx, eidx;
2935 unsigned int i;
2937 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2938 return -1;
2939 idx = SUBPAGE_IDX(start);
2940 eidx = SUBPAGE_IDX(end);
2941 #if defined(DEBUG_SUBPAGE)
2942 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2943 mmio, start, end, idx, eidx, memory);
2944 #endif
2945 memory >>= IO_MEM_SHIFT;
2946 for (; idx <= eidx; idx++) {
2947 for (i = 0; i < 4; i++) {
2948 if (io_mem_read[memory][i]) {
2949 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2950 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2951 mmio->region_offset[idx][0][i] = region_offset;
2953 if (io_mem_write[memory][i]) {
2954 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2955 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2956 mmio->region_offset[idx][1][i] = region_offset;
2961 return 0;
2964 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2965 ram_addr_t orig_memory, ram_addr_t region_offset)
2967 subpage_t *mmio;
2968 int subpage_memory;
2970 mmio = qemu_mallocz(sizeof(subpage_t));
2972 mmio->base = base;
2973 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2974 #if defined(DEBUG_SUBPAGE)
2975 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2976 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2977 #endif
2978 *phys = subpage_memory | IO_MEM_SUBPAGE;
2979 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2980 region_offset);
2982 return mmio;
2985 static int get_free_io_mem_idx(void)
2987 int i;
2989 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2990 if (!io_mem_used[i]) {
2991 io_mem_used[i] = 1;
2992 return i;
2995 return -1;
2998 /* mem_read and mem_write are arrays of functions containing the
2999 function to access byte (index 0), word (index 1) and dword (index
3000 2). Functions can be omitted with a NULL function pointer.
3001 If io_index is non zero, the corresponding io zone is
3002 modified. If it is zero, a new io zone is allocated. The return
3003 value can be used with cpu_register_physical_memory(). (-1) is
3004 returned if error. */
3005 static int cpu_register_io_memory_fixed(int io_index,
3006 CPUReadMemoryFunc **mem_read,
3007 CPUWriteMemoryFunc **mem_write,
3008 void *opaque)
3010 int i, subwidth = 0;
3012 if (io_index <= 0) {
3013 io_index = get_free_io_mem_idx();
3014 if (io_index == -1)
3015 return io_index;
3016 } else {
3017 io_index >>= IO_MEM_SHIFT;
3018 if (io_index >= IO_MEM_NB_ENTRIES)
3019 return -1;
3022 for(i = 0;i < 3; i++) {
3023 if (!mem_read[i] || !mem_write[i])
3024 subwidth = IO_MEM_SUBWIDTH;
3025 io_mem_read[io_index][i] = mem_read[i];
3026 io_mem_write[io_index][i] = mem_write[i];
3028 io_mem_opaque[io_index] = opaque;
3029 return (io_index << IO_MEM_SHIFT) | subwidth;
3032 int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
3033 CPUWriteMemoryFunc **mem_write,
3034 void *opaque)
3036 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3039 void cpu_unregister_io_memory(int io_table_address)
3041 int i;
3042 int io_index = io_table_address >> IO_MEM_SHIFT;
3044 for (i=0;i < 3; i++) {
3045 io_mem_read[io_index][i] = unassigned_mem_read[i];
3046 io_mem_write[io_index][i] = unassigned_mem_write[i];
3048 io_mem_opaque[io_index] = NULL;
3049 io_mem_used[io_index] = 0;
3052 static void io_mem_init(void)
3054 int i;
3056 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3057 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3058 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3059 for (i=0; i<5; i++)
3060 io_mem_used[i] = 1;
3062 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3063 watch_mem_write, NULL);
3064 #ifdef CONFIG_KQEMU
3065 if (kqemu_phys_ram_base) {
3066 /* alloc dirty bits array */
3067 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3068 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3070 #endif
3073 #endif /* !defined(CONFIG_USER_ONLY) */
3075 /* physical memory access (slow version, mainly for debug) */
3076 #if defined(CONFIG_USER_ONLY)
3077 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3078 int len, int is_write)
3080 int l, flags;
3081 target_ulong page;
3082 void * p;
3084 while (len > 0) {
3085 page = addr & TARGET_PAGE_MASK;
3086 l = (page + TARGET_PAGE_SIZE) - addr;
3087 if (l > len)
3088 l = len;
3089 flags = page_get_flags(page);
3090 if (!(flags & PAGE_VALID))
3091 return;
3092 if (is_write) {
3093 if (!(flags & PAGE_WRITE))
3094 return;
3095 /* XXX: this code should not depend on lock_user */
3096 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3097 /* FIXME - should this return an error rather than just fail? */
3098 return;
3099 memcpy(p, buf, l);
3100 unlock_user(p, addr, l);
3101 } else {
3102 if (!(flags & PAGE_READ))
3103 return;
3104 /* XXX: this code should not depend on lock_user */
3105 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3106 /* FIXME - should this return an error rather than just fail? */
3107 return;
3108 memcpy(buf, p, l);
3109 unlock_user(p, addr, 0);
3111 len -= l;
3112 buf += l;
3113 addr += l;
3117 #else
3118 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3119 int len, int is_write)
3121 int l, io_index;
3122 uint8_t *ptr;
3123 uint32_t val;
3124 target_phys_addr_t page;
3125 unsigned long pd;
3126 PhysPageDesc *p;
3128 while (len > 0) {
3129 page = addr & TARGET_PAGE_MASK;
3130 l = (page + TARGET_PAGE_SIZE) - addr;
3131 if (l > len)
3132 l = len;
3133 p = phys_page_find(page >> TARGET_PAGE_BITS);
3134 if (!p) {
3135 pd = IO_MEM_UNASSIGNED;
3136 } else {
3137 pd = p->phys_offset;
3140 if (is_write) {
3141 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3142 target_phys_addr_t addr1 = addr;
3143 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3144 if (p)
3145 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3146 /* XXX: could force cpu_single_env to NULL to avoid
3147 potential bugs */
3148 if (l >= 4 && ((addr1 & 3) == 0)) {
3149 /* 32 bit write access */
3150 val = ldl_p(buf);
3151 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3152 l = 4;
3153 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3154 /* 16 bit write access */
3155 val = lduw_p(buf);
3156 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3157 l = 2;
3158 } else {
3159 /* 8 bit write access */
3160 val = ldub_p(buf);
3161 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3162 l = 1;
3164 } else {
3165 unsigned long addr1;
3166 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3167 /* RAM case */
3168 ptr = qemu_get_ram_ptr(addr1);
3169 memcpy(ptr, buf, l);
3170 if (!cpu_physical_memory_is_dirty(addr1)) {
3171 /* invalidate code */
3172 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3173 /* set dirty bit */
3174 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3175 (0xff & ~CODE_DIRTY_FLAG);
3178 } else {
3179 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3180 !(pd & IO_MEM_ROMD)) {
3181 target_phys_addr_t addr1 = addr;
3182 /* I/O case */
3183 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3184 if (p)
3185 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3186 if (l >= 4 && ((addr1 & 3) == 0)) {
3187 /* 32 bit read access */
3188 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3189 stl_p(buf, val);
3190 l = 4;
3191 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3192 /* 16 bit read access */
3193 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3194 stw_p(buf, val);
3195 l = 2;
3196 } else {
3197 /* 8 bit read access */
3198 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3199 stb_p(buf, val);
3200 l = 1;
3202 } else {
3203 /* RAM case */
3204 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3205 (addr & ~TARGET_PAGE_MASK);
3206 memcpy(buf, ptr, l);
3209 len -= l;
3210 buf += l;
3211 addr += l;
3215 /* used for ROM loading : can write in RAM and ROM */
3216 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3217 const uint8_t *buf, int len)
3219 int l;
3220 uint8_t *ptr;
3221 target_phys_addr_t page;
3222 unsigned long pd;
3223 PhysPageDesc *p;
3225 while (len > 0) {
3226 page = addr & TARGET_PAGE_MASK;
3227 l = (page + TARGET_PAGE_SIZE) - addr;
3228 if (l > len)
3229 l = len;
3230 p = phys_page_find(page >> TARGET_PAGE_BITS);
3231 if (!p) {
3232 pd = IO_MEM_UNASSIGNED;
3233 } else {
3234 pd = p->phys_offset;
3237 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3238 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3239 !(pd & IO_MEM_ROMD)) {
3240 /* do nothing */
3241 } else {
3242 unsigned long addr1;
3243 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3244 /* ROM/RAM case */
3245 ptr = qemu_get_ram_ptr(addr1);
3246 memcpy(ptr, buf, l);
3248 len -= l;
3249 buf += l;
3250 addr += l;
3254 typedef struct {
3255 void *buffer;
3256 target_phys_addr_t addr;
3257 target_phys_addr_t len;
3258 } BounceBuffer;
3260 static BounceBuffer bounce;
3262 typedef struct MapClient {
3263 void *opaque;
3264 void (*callback)(void *opaque);
3265 LIST_ENTRY(MapClient) link;
3266 } MapClient;
3268 static LIST_HEAD(map_client_list, MapClient) map_client_list
3269 = LIST_HEAD_INITIALIZER(map_client_list);
3271 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3273 MapClient *client = qemu_malloc(sizeof(*client));
3275 client->opaque = opaque;
3276 client->callback = callback;
3277 LIST_INSERT_HEAD(&map_client_list, client, link);
3278 return client;
3281 void cpu_unregister_map_client(void *_client)
3283 MapClient *client = (MapClient *)_client;
3285 LIST_REMOVE(client, link);
3286 qemu_free(client);
3289 static void cpu_notify_map_clients(void)
3291 MapClient *client;
3293 while (!LIST_EMPTY(&map_client_list)) {
3294 client = LIST_FIRST(&map_client_list);
3295 client->callback(client->opaque);
3296 cpu_unregister_map_client(client);
3300 /* Map a physical memory region into a host virtual address.
3301 * May map a subset of the requested range, given by and returned in *plen.
3302 * May return NULL if resources needed to perform the mapping are exhausted.
3303 * Use only for reads OR writes - not for read-modify-write operations.
3304 * Use cpu_register_map_client() to know when retrying the map operation is
3305 * likely to succeed.
3307 void *cpu_physical_memory_map(target_phys_addr_t addr,
3308 target_phys_addr_t *plen,
3309 int is_write)
3311 target_phys_addr_t len = *plen;
3312 target_phys_addr_t done = 0;
3313 int l;
3314 uint8_t *ret = NULL;
3315 uint8_t *ptr;
3316 target_phys_addr_t page;
3317 unsigned long pd;
3318 PhysPageDesc *p;
3319 unsigned long addr1;
3321 while (len > 0) {
3322 page = addr & TARGET_PAGE_MASK;
3323 l = (page + TARGET_PAGE_SIZE) - addr;
3324 if (l > len)
3325 l = len;
3326 p = phys_page_find(page >> TARGET_PAGE_BITS);
3327 if (!p) {
3328 pd = IO_MEM_UNASSIGNED;
3329 } else {
3330 pd = p->phys_offset;
3333 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3334 if (done || bounce.buffer) {
3335 break;
3337 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3338 bounce.addr = addr;
3339 bounce.len = l;
3340 if (!is_write) {
3341 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3343 ptr = bounce.buffer;
3344 } else {
3345 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3346 ptr = qemu_get_ram_ptr(addr1);
3348 if (!done) {
3349 ret = ptr;
3350 } else if (ret + done != ptr) {
3351 break;
3354 len -= l;
3355 addr += l;
3356 done += l;
3358 *plen = done;
3359 return ret;
3362 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3363 * Will also mark the memory as dirty if is_write == 1. access_len gives
3364 * the amount of memory that was actually read or written by the caller.
3366 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3367 int is_write, target_phys_addr_t access_len)
3369 if (buffer != bounce.buffer) {
3370 if (is_write) {
3371 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3372 while (access_len) {
3373 unsigned l;
3374 l = TARGET_PAGE_SIZE;
3375 if (l > access_len)
3376 l = access_len;
3377 if (!cpu_physical_memory_is_dirty(addr1)) {
3378 /* invalidate code */
3379 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3380 /* set dirty bit */
3381 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3382 (0xff & ~CODE_DIRTY_FLAG);
3384 addr1 += l;
3385 access_len -= l;
3388 return;
3390 if (is_write) {
3391 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3393 qemu_free(bounce.buffer);
3394 bounce.buffer = NULL;
3395 cpu_notify_map_clients();
3398 /* warning: addr must be aligned */
3399 uint32_t ldl_phys(target_phys_addr_t addr)
3401 int io_index;
3402 uint8_t *ptr;
3403 uint32_t val;
3404 unsigned long pd;
3405 PhysPageDesc *p;
3407 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3408 if (!p) {
3409 pd = IO_MEM_UNASSIGNED;
3410 } else {
3411 pd = p->phys_offset;
3414 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3415 !(pd & IO_MEM_ROMD)) {
3416 /* I/O case */
3417 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3418 if (p)
3419 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3420 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3421 } else {
3422 /* RAM case */
3423 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3424 (addr & ~TARGET_PAGE_MASK);
3425 val = ldl_p(ptr);
3427 return val;
3430 /* warning: addr must be aligned */
3431 uint64_t ldq_phys(target_phys_addr_t addr)
3433 int io_index;
3434 uint8_t *ptr;
3435 uint64_t val;
3436 unsigned long pd;
3437 PhysPageDesc *p;
3439 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3440 if (!p) {
3441 pd = IO_MEM_UNASSIGNED;
3442 } else {
3443 pd = p->phys_offset;
3446 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3447 !(pd & IO_MEM_ROMD)) {
3448 /* I/O case */
3449 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3450 if (p)
3451 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3452 #ifdef TARGET_WORDS_BIGENDIAN
3453 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3454 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3455 #else
3456 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3457 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3458 #endif
3459 } else {
3460 /* RAM case */
3461 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3462 (addr & ~TARGET_PAGE_MASK);
3463 val = ldq_p(ptr);
3465 return val;
3468 /* XXX: optimize */
3469 uint32_t ldub_phys(target_phys_addr_t addr)
3471 uint8_t val;
3472 cpu_physical_memory_read(addr, &val, 1);
3473 return val;
3476 /* XXX: optimize */
3477 uint32_t lduw_phys(target_phys_addr_t addr)
3479 uint16_t val;
3480 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3481 return tswap16(val);
3484 /* warning: addr must be aligned. The ram page is not masked as dirty
3485 and the code inside is not invalidated. It is useful if the dirty
3486 bits are used to track modified PTEs */
3487 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3489 int io_index;
3490 uint8_t *ptr;
3491 unsigned long pd;
3492 PhysPageDesc *p;
3494 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3495 if (!p) {
3496 pd = IO_MEM_UNASSIGNED;
3497 } else {
3498 pd = p->phys_offset;
3501 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3502 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3503 if (p)
3504 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3505 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3506 } else {
3507 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3508 ptr = qemu_get_ram_ptr(addr1);
3509 stl_p(ptr, val);
3511 if (unlikely(in_migration)) {
3512 if (!cpu_physical_memory_is_dirty(addr1)) {
3513 /* invalidate code */
3514 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3515 /* set dirty bit */
3516 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3517 (0xff & ~CODE_DIRTY_FLAG);
3523 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3525 int io_index;
3526 uint8_t *ptr;
3527 unsigned long pd;
3528 PhysPageDesc *p;
3530 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3531 if (!p) {
3532 pd = IO_MEM_UNASSIGNED;
3533 } else {
3534 pd = p->phys_offset;
3537 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3538 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3539 if (p)
3540 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3541 #ifdef TARGET_WORDS_BIGENDIAN
3542 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3543 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3544 #else
3545 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3546 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3547 #endif
3548 } else {
3549 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3550 (addr & ~TARGET_PAGE_MASK);
3551 stq_p(ptr, val);
3555 /* warning: addr must be aligned */
3556 void stl_phys(target_phys_addr_t addr, uint32_t val)
3558 int io_index;
3559 uint8_t *ptr;
3560 unsigned long pd;
3561 PhysPageDesc *p;
3563 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3564 if (!p) {
3565 pd = IO_MEM_UNASSIGNED;
3566 } else {
3567 pd = p->phys_offset;
3570 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3571 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3572 if (p)
3573 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3574 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3575 } else {
3576 unsigned long addr1;
3577 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3578 /* RAM case */
3579 ptr = qemu_get_ram_ptr(addr1);
3580 stl_p(ptr, val);
3581 if (!cpu_physical_memory_is_dirty(addr1)) {
3582 /* invalidate code */
3583 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3584 /* set dirty bit */
3585 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3586 (0xff & ~CODE_DIRTY_FLAG);
3591 /* XXX: optimize */
3592 void stb_phys(target_phys_addr_t addr, uint32_t val)
3594 uint8_t v = val;
3595 cpu_physical_memory_write(addr, &v, 1);
3598 /* XXX: optimize */
3599 void stw_phys(target_phys_addr_t addr, uint32_t val)
3601 uint16_t v = tswap16(val);
3602 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3605 /* XXX: optimize */
3606 void stq_phys(target_phys_addr_t addr, uint64_t val)
3608 val = tswap64(val);
3609 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3612 #endif
3614 /* virtual memory access for debug (includes writing to ROM) */
3615 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3616 uint8_t *buf, int len, int is_write)
3618 int l;
3619 target_phys_addr_t phys_addr;
3620 target_ulong page;
3622 while (len > 0) {
3623 page = addr & TARGET_PAGE_MASK;
3624 phys_addr = cpu_get_phys_page_debug(env, page);
3625 /* if no physical page mapped, return an error */
3626 if (phys_addr == -1)
3627 return -1;
3628 l = (page + TARGET_PAGE_SIZE) - addr;
3629 if (l > len)
3630 l = len;
3631 phys_addr += (addr & ~TARGET_PAGE_MASK);
3632 #if !defined(CONFIG_USER_ONLY)
3633 if (is_write)
3634 cpu_physical_memory_write_rom(phys_addr, buf, l);
3635 else
3636 #endif
3637 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3638 len -= l;
3639 buf += l;
3640 addr += l;
3642 return 0;
3645 /* in deterministic execution mode, instructions doing device I/Os
3646 must be at the end of the TB */
3647 void cpu_io_recompile(CPUState *env, void *retaddr)
3649 TranslationBlock *tb;
3650 uint32_t n, cflags;
3651 target_ulong pc, cs_base;
3652 uint64_t flags;
3654 tb = tb_find_pc((unsigned long)retaddr);
3655 if (!tb) {
3656 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3657 retaddr);
3659 n = env->icount_decr.u16.low + tb->icount;
3660 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3661 /* Calculate how many instructions had been executed before the fault
3662 occurred. */
3663 n = n - env->icount_decr.u16.low;
3664 /* Generate a new TB ending on the I/O insn. */
3665 n++;
3666 /* On MIPS and SH, delay slot instructions can only be restarted if
3667 they were already the first instruction in the TB. If this is not
3668 the first instruction in a TB then re-execute the preceding
3669 branch. */
3670 #if defined(TARGET_MIPS)
3671 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3672 env->active_tc.PC -= 4;
3673 env->icount_decr.u16.low++;
3674 env->hflags &= ~MIPS_HFLAG_BMASK;
3676 #elif defined(TARGET_SH4)
3677 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3678 && n > 1) {
3679 env->pc -= 2;
3680 env->icount_decr.u16.low++;
3681 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3683 #endif
3684 /* This should never happen. */
3685 if (n > CF_COUNT_MASK)
3686 cpu_abort(env, "TB too big during recompile");
3688 cflags = n | CF_LAST_IO;
3689 pc = tb->pc;
3690 cs_base = tb->cs_base;
3691 flags = tb->flags;
3692 tb_phys_invalidate(tb, -1);
3693 /* FIXME: In theory this could raise an exception. In practice
3694 we have already translated the block once so it's probably ok. */
3695 tb_gen_code(env, pc, cs_base, flags, cflags);
3696 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3697 the first in the TB) then we end up generating a whole new TB and
3698 repeating the fault, which is horribly inefficient.
3699 Better would be to execute just this insn uncached, or generate a
3700 second new TB. */
3701 cpu_resume_from_signal(env, NULL);
3704 void dump_exec_info(FILE *f,
3705 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3707 int i, target_code_size, max_target_code_size;
3708 int direct_jmp_count, direct_jmp2_count, cross_page;
3709 TranslationBlock *tb;
3711 target_code_size = 0;
3712 max_target_code_size = 0;
3713 cross_page = 0;
3714 direct_jmp_count = 0;
3715 direct_jmp2_count = 0;
3716 for(i = 0; i < nb_tbs; i++) {
3717 tb = &tbs[i];
3718 target_code_size += tb->size;
3719 if (tb->size > max_target_code_size)
3720 max_target_code_size = tb->size;
3721 if (tb->page_addr[1] != -1)
3722 cross_page++;
3723 if (tb->tb_next_offset[0] != 0xffff) {
3724 direct_jmp_count++;
3725 if (tb->tb_next_offset[1] != 0xffff) {
3726 direct_jmp2_count++;
3730 /* XXX: avoid using doubles ? */
3731 cpu_fprintf(f, "Translation buffer state:\n");
3732 cpu_fprintf(f, "gen code size %ld/%ld\n",
3733 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3734 cpu_fprintf(f, "TB count %d/%d\n",
3735 nb_tbs, code_gen_max_blocks);
3736 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3737 nb_tbs ? target_code_size / nb_tbs : 0,
3738 max_target_code_size);
3739 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3740 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3741 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3742 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3743 cross_page,
3744 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3745 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3746 direct_jmp_count,
3747 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3748 direct_jmp2_count,
3749 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3750 cpu_fprintf(f, "\nStatistics:\n");
3751 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3752 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3753 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3754 tcg_dump_info(f, cpu_fprintf);
3757 #if !defined(CONFIG_USER_ONLY)
3759 #define MMUSUFFIX _cmmu
3760 #define GETPC() NULL
3761 #define env cpu_single_env
3762 #define SOFTMMU_CODE_ACCESS
3764 #define SHIFT 0
3765 #include "softmmu_template.h"
3767 #define SHIFT 1
3768 #include "softmmu_template.h"
3770 #define SHIFT 2
3771 #include "softmmu_template.h"
3773 #define SHIFT 3
3774 #include "softmmu_template.h"
3776 #undef env
3778 #endif