Init pci_option_rom_offset for VGA BIOS absence
[qemu-kvm/fedora.git] / exec.c
blob29c91fb0960623f6c42b1f156a2d9c10aad97cae
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #include "qemu-common.h"
39 #if !defined(TARGET_IA64)
40 #include "tcg.h"
41 #endif
42 #include "qemu-kvm.h"
44 #include "hw/hw.h"
45 #include "osdep.h"
46 #include "kvm.h"
47 #if defined(CONFIG_USER_ONLY)
48 #include <qemu.h>
49 #endif
51 //#define DEBUG_TB_INVALIDATE
52 //#define DEBUG_FLUSH
53 //#define DEBUG_TLB
54 //#define DEBUG_UNASSIGNED
56 /* make various TB consistency checks */
57 //#define DEBUG_TB_CHECK
58 //#define DEBUG_TLB_CHECK
60 //#define DEBUG_IOPORT
61 //#define DEBUG_SUBPAGE
63 #if !defined(CONFIG_USER_ONLY)
64 /* TB consistency checks only implemented for usermode emulation. */
65 #undef DEBUG_TB_CHECK
66 #endif
68 #define SMC_BITMAP_USE_THRESHOLD 10
70 #if defined(TARGET_SPARC64)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 41
72 #elif defined(TARGET_SPARC)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 36
74 #elif defined(TARGET_ALPHA)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #define TARGET_VIRT_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_PPC64)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 #elif defined(TARGET_IA64)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 36
85 #else
86 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
87 #define TARGET_PHYS_ADDR_SPACE_BITS 32
88 #endif
90 static TranslationBlock *tbs;
91 int code_gen_max_blocks;
92 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
93 static int nb_tbs;
94 /* any access to the tbs or the page table must use this lock */
95 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
97 #if defined(__arm__) || defined(__sparc_v9__)
98 /* The prologue must be reachable with a direct jump. ARM and Sparc64
99 have limited branch ranges (possibly also PPC) so place it in a
100 section close to code segment. */
101 #define code_gen_section \
102 __attribute__((__section__(".gen_code"))) \
103 __attribute__((aligned (32)))
104 #else
105 #define code_gen_section \
106 __attribute__((aligned (32)))
107 #endif
109 uint8_t code_gen_prologue[1024] code_gen_section;
110 static uint8_t *code_gen_buffer;
111 static unsigned long code_gen_buffer_size;
112 /* threshold to flush the translated code buffer */
113 static unsigned long code_gen_buffer_max_size;
114 uint8_t *code_gen_ptr;
116 #if !defined(CONFIG_USER_ONLY)
117 int phys_ram_fd;
118 uint8_t *phys_ram_dirty;
119 uint8_t *bios_mem;
120 static int in_migration;
122 typedef struct RAMBlock {
123 uint8_t *host;
124 ram_addr_t offset;
125 ram_addr_t length;
126 struct RAMBlock *next;
127 } RAMBlock;
129 static RAMBlock *ram_blocks;
130 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
131 then we can no longer assume contiguous ram offsets, and external uses
132 of this variable will break. */
133 ram_addr_t last_ram_offset;
134 #endif
136 CPUState *first_cpu;
137 /* current CPU in the current thread. It is only valid inside
138 cpu_exec() */
139 CPUState *cpu_single_env;
140 /* 0 = Do not count executed instructions.
141 1 = Precise instruction counting.
142 2 = Adaptive rate instruction counting. */
143 int use_icount = 0;
144 /* Current instruction counter. While executing translated code this may
145 include some instructions that have not yet been executed. */
146 int64_t qemu_icount;
148 typedef struct PageDesc {
149 /* list of TBs intersecting this ram page */
150 TranslationBlock *first_tb;
151 /* in order to optimize self modifying code, we count the number
152 of lookups we do to a given page to use a bitmap */
153 unsigned int code_write_count;
154 uint8_t *code_bitmap;
155 #if defined(CONFIG_USER_ONLY)
156 unsigned long flags;
157 #endif
158 } PageDesc;
160 typedef struct PhysPageDesc {
161 /* offset in host memory of the page + io_index in the low bits */
162 ram_addr_t phys_offset;
163 ram_addr_t region_offset;
164 } PhysPageDesc;
166 #define L2_BITS 10
167 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
168 /* XXX: this is a temporary hack for alpha target.
169 * In the future, this is to be replaced by a multi-level table
170 * to actually be able to handle the complete 64 bits address space.
172 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
173 #else
174 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
175 #endif
177 #define L1_SIZE (1 << L1_BITS)
178 #define L2_SIZE (1 << L2_BITS)
180 unsigned long qemu_real_host_page_size;
181 unsigned long qemu_host_page_bits;
182 unsigned long qemu_host_page_size;
183 unsigned long qemu_host_page_mask;
185 /* XXX: for system emulation, it could just be an array */
186 static PageDesc *l1_map[L1_SIZE];
187 static PhysPageDesc **l1_phys_map;
189 #if !defined(CONFIG_USER_ONLY)
190 static void io_mem_init(void);
192 /* io memory support */
193 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
194 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
195 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
196 static char io_mem_used[IO_MEM_NB_ENTRIES];
197 static int io_mem_watch;
198 #endif
200 /* log support */
201 static const char *logfilename = "/tmp/qemu.log";
202 FILE *logfile;
203 int loglevel;
204 static int log_append = 0;
206 /* statistics */
207 static int tlb_flush_count;
208 static int tb_flush_count;
209 static int tb_phys_invalidate_count;
211 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
212 typedef struct subpage_t {
213 target_phys_addr_t base;
214 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
215 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
216 void *opaque[TARGET_PAGE_SIZE][2][4];
217 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
218 } subpage_t;
220 #ifdef _WIN32
221 static void map_exec(void *addr, long size)
223 DWORD old_protect;
224 VirtualProtect(addr, size,
225 PAGE_EXECUTE_READWRITE, &old_protect);
228 #else
229 static void map_exec(void *addr, long size)
231 unsigned long start, end, page_size;
233 page_size = getpagesize();
234 start = (unsigned long)addr;
235 start &= ~(page_size - 1);
237 end = (unsigned long)addr + size;
238 end += page_size - 1;
239 end &= ~(page_size - 1);
241 mprotect((void *)start, end - start,
242 PROT_READ | PROT_WRITE | PROT_EXEC);
244 #endif
246 static void page_init(void)
248 /* NOTE: we can always suppose that qemu_host_page_size >=
249 TARGET_PAGE_SIZE */
250 #ifdef _WIN32
252 SYSTEM_INFO system_info;
254 GetSystemInfo(&system_info);
255 qemu_real_host_page_size = system_info.dwPageSize;
257 #else
258 qemu_real_host_page_size = getpagesize();
259 #endif
260 if (qemu_host_page_size == 0)
261 qemu_host_page_size = qemu_real_host_page_size;
262 if (qemu_host_page_size < TARGET_PAGE_SIZE)
263 qemu_host_page_size = TARGET_PAGE_SIZE;
264 qemu_host_page_bits = 0;
265 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
266 qemu_host_page_bits++;
267 qemu_host_page_mask = ~(qemu_host_page_size - 1);
268 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
269 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
271 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
273 long long startaddr, endaddr;
274 FILE *f;
275 int n;
277 mmap_lock();
278 last_brk = (unsigned long)sbrk(0);
279 f = fopen("/proc/self/maps", "r");
280 if (f) {
281 do {
282 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
283 if (n == 2) {
284 startaddr = MIN(startaddr,
285 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
286 endaddr = MIN(endaddr,
287 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
288 page_set_flags(startaddr & TARGET_PAGE_MASK,
289 TARGET_PAGE_ALIGN(endaddr),
290 PAGE_RESERVED);
292 } while (!feof(f));
293 fclose(f);
295 mmap_unlock();
297 #endif
300 static inline PageDesc **page_l1_map(target_ulong index)
302 #if TARGET_LONG_BITS > 32
303 /* Host memory outside guest VM. For 32-bit targets we have already
304 excluded high addresses. */
305 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
306 return NULL;
307 #endif
308 return &l1_map[index >> L2_BITS];
311 static inline PageDesc *page_find_alloc(target_ulong index)
313 PageDesc **lp, *p;
314 lp = page_l1_map(index);
315 if (!lp)
316 return NULL;
318 p = *lp;
319 if (!p) {
320 /* allocate if not found */
321 #if defined(CONFIG_USER_ONLY)
322 size_t len = sizeof(PageDesc) * L2_SIZE;
323 /* Don't use qemu_malloc because it may recurse. */
324 p = mmap(0, len, PROT_READ | PROT_WRITE,
325 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
326 *lp = p;
327 if (h2g_valid(p)) {
328 unsigned long addr = h2g(p);
329 page_set_flags(addr & TARGET_PAGE_MASK,
330 TARGET_PAGE_ALIGN(addr + len),
331 PAGE_RESERVED);
333 #else
334 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
335 *lp = p;
336 #endif
338 return p + (index & (L2_SIZE - 1));
341 static inline PageDesc *page_find(target_ulong index)
343 PageDesc **lp, *p;
344 lp = page_l1_map(index);
345 if (!lp)
346 return NULL;
348 p = *lp;
349 if (!p)
350 return 0;
351 return p + (index & (L2_SIZE - 1));
354 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
356 void **lp, **p;
357 PhysPageDesc *pd;
359 p = (void **)l1_phys_map;
360 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
362 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
363 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
364 #endif
365 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
366 p = *lp;
367 if (!p) {
368 /* allocate if not found */
369 if (!alloc)
370 return NULL;
371 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
372 memset(p, 0, sizeof(void *) * L1_SIZE);
373 *lp = p;
375 #endif
376 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
377 pd = *lp;
378 if (!pd) {
379 int i;
380 /* allocate if not found */
381 if (!alloc)
382 return NULL;
383 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
384 *lp = pd;
385 for (i = 0; i < L2_SIZE; i++) {
386 pd[i].phys_offset = IO_MEM_UNASSIGNED;
387 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
390 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
393 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
395 return phys_page_find_alloc(index, 0);
398 #if !defined(CONFIG_USER_ONLY)
399 static void tlb_protect_code(ram_addr_t ram_addr);
400 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
401 target_ulong vaddr);
402 #define mmap_lock() do { } while(0)
403 #define mmap_unlock() do { } while(0)
404 #endif
406 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
408 #if defined(CONFIG_USER_ONLY)
409 /* Currently it is not recommended to allocate big chunks of data in
410 user mode. It will change when a dedicated libc will be used */
411 #define USE_STATIC_CODE_GEN_BUFFER
412 #endif
414 #ifdef USE_STATIC_CODE_GEN_BUFFER
415 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
416 #endif
418 static void code_gen_alloc(unsigned long tb_size)
420 if (kvm_enabled())
421 return;
423 #ifdef USE_STATIC_CODE_GEN_BUFFER
424 code_gen_buffer = static_code_gen_buffer;
425 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
426 map_exec(code_gen_buffer, code_gen_buffer_size);
427 #else
428 code_gen_buffer_size = tb_size;
429 if (code_gen_buffer_size == 0) {
430 #if defined(CONFIG_USER_ONLY)
431 /* in user mode, phys_ram_size is not meaningful */
432 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
433 #else
434 /* XXX: needs adjustments */
435 code_gen_buffer_size = (unsigned long)(ram_size / 4);
436 #endif
438 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
439 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
440 /* The code gen buffer location may have constraints depending on
441 the host cpu and OS */
442 #if defined(__linux__)
444 int flags;
445 void *start = NULL;
447 flags = MAP_PRIVATE | MAP_ANONYMOUS;
448 #if defined(__x86_64__)
449 flags |= MAP_32BIT;
450 /* Cannot map more than that */
451 if (code_gen_buffer_size > (800 * 1024 * 1024))
452 code_gen_buffer_size = (800 * 1024 * 1024);
453 #elif defined(__sparc_v9__)
454 // Map the buffer below 2G, so we can use direct calls and branches
455 flags |= MAP_FIXED;
456 start = (void *) 0x60000000UL;
457 if (code_gen_buffer_size > (512 * 1024 * 1024))
458 code_gen_buffer_size = (512 * 1024 * 1024);
459 #elif defined(__arm__)
460 /* Map the buffer below 32M, so we can use direct calls and branches */
461 flags |= MAP_FIXED;
462 start = (void *) 0x01000000UL;
463 if (code_gen_buffer_size > 16 * 1024 * 1024)
464 code_gen_buffer_size = 16 * 1024 * 1024;
465 #endif
466 code_gen_buffer = mmap(start, code_gen_buffer_size,
467 PROT_WRITE | PROT_READ | PROT_EXEC,
468 flags, -1, 0);
469 if (code_gen_buffer == MAP_FAILED) {
470 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
471 exit(1);
474 #elif defined(__FreeBSD__) || defined(__DragonFly__)
476 int flags;
477 void *addr = NULL;
478 flags = MAP_PRIVATE | MAP_ANONYMOUS;
479 #if defined(__x86_64__)
480 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
481 * 0x40000000 is free */
482 flags |= MAP_FIXED;
483 addr = (void *)0x40000000;
484 /* Cannot map more than that */
485 if (code_gen_buffer_size > (800 * 1024 * 1024))
486 code_gen_buffer_size = (800 * 1024 * 1024);
487 #endif
488 code_gen_buffer = mmap(addr, code_gen_buffer_size,
489 PROT_WRITE | PROT_READ | PROT_EXEC,
490 flags, -1, 0);
491 if (code_gen_buffer == MAP_FAILED) {
492 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
493 exit(1);
496 #else
497 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
498 map_exec(code_gen_buffer, code_gen_buffer_size);
499 #endif
500 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
501 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
502 code_gen_buffer_max_size = code_gen_buffer_size -
503 code_gen_max_block_size();
504 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
505 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
508 /* Must be called before using the QEMU cpus. 'tb_size' is the size
509 (in bytes) allocated to the translation buffer. Zero means default
510 size. */
511 void cpu_exec_init_all(unsigned long tb_size)
513 cpu_gen_init();
514 code_gen_alloc(tb_size);
515 code_gen_ptr = code_gen_buffer;
516 page_init();
517 #if !defined(CONFIG_USER_ONLY)
518 io_mem_init();
519 #endif
522 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
524 #define CPU_COMMON_SAVE_VERSION 1
526 static void cpu_common_save(QEMUFile *f, void *opaque)
528 CPUState *env = opaque;
530 qemu_put_be32s(f, &env->halted);
531 qemu_put_be32s(f, &env->interrupt_request);
534 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
536 CPUState *env = opaque;
538 if (version_id != CPU_COMMON_SAVE_VERSION)
539 return -EINVAL;
541 qemu_get_be32s(f, &env->halted);
542 qemu_get_be32s(f, &env->interrupt_request);
543 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
544 version_id is increased. */
545 env->interrupt_request &= ~0x01;
546 tlb_flush(env, 1);
548 return 0;
550 #endif
552 void cpu_exec_init(CPUState *env)
554 CPUState **penv;
555 int cpu_index;
557 #if defined(CONFIG_USER_ONLY)
558 cpu_list_lock();
559 #endif
560 env->next_cpu = NULL;
561 penv = &first_cpu;
562 cpu_index = 0;
563 while (*penv != NULL) {
564 penv = (CPUState **)&(*penv)->next_cpu;
565 cpu_index++;
567 env->cpu_index = cpu_index;
568 env->numa_node = 0;
569 TAILQ_INIT(&env->breakpoints);
570 TAILQ_INIT(&env->watchpoints);
571 #ifdef __WIN32
572 env->thread_id = GetCurrentProcessId();
573 #else
574 env->thread_id = getpid();
575 #endif
576 *penv = env;
577 #if defined(CONFIG_USER_ONLY)
578 cpu_list_unlock();
579 #endif
580 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
581 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
582 cpu_common_save, cpu_common_load, env);
583 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
584 cpu_save, cpu_load, env);
585 #endif
588 static inline void invalidate_page_bitmap(PageDesc *p)
590 if (p->code_bitmap) {
591 qemu_free(p->code_bitmap);
592 p->code_bitmap = NULL;
594 p->code_write_count = 0;
597 /* set to NULL all the 'first_tb' fields in all PageDescs */
598 static void page_flush_tb(void)
600 int i, j;
601 PageDesc *p;
603 for(i = 0; i < L1_SIZE; i++) {
604 p = l1_map[i];
605 if (p) {
606 for(j = 0; j < L2_SIZE; j++) {
607 p->first_tb = NULL;
608 invalidate_page_bitmap(p);
609 p++;
615 /* flush all the translation blocks */
616 /* XXX: tb_flush is currently not thread safe */
617 void tb_flush(CPUState *env1)
619 CPUState *env;
620 #if defined(DEBUG_FLUSH)
621 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
622 (unsigned long)(code_gen_ptr - code_gen_buffer),
623 nb_tbs, nb_tbs > 0 ?
624 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
625 #endif
626 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
627 cpu_abort(env1, "Internal error: code buffer overflow\n");
629 nb_tbs = 0;
631 for(env = first_cpu; env != NULL; env = env->next_cpu) {
632 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
635 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
636 page_flush_tb();
638 code_gen_ptr = code_gen_buffer;
639 /* XXX: flush processor icache at this point if cache flush is
640 expensive */
641 tb_flush_count++;
644 #ifdef DEBUG_TB_CHECK
646 static void tb_invalidate_check(target_ulong address)
648 TranslationBlock *tb;
649 int i;
650 address &= TARGET_PAGE_MASK;
651 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
652 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
653 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
654 address >= tb->pc + tb->size)) {
655 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
656 address, (long)tb->pc, tb->size);
662 /* verify that all the pages have correct rights for code */
663 static void tb_page_check(void)
665 TranslationBlock *tb;
666 int i, flags1, flags2;
668 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
669 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
670 flags1 = page_get_flags(tb->pc);
671 flags2 = page_get_flags(tb->pc + tb->size - 1);
672 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
673 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
674 (long)tb->pc, tb->size, flags1, flags2);
680 static void tb_jmp_check(TranslationBlock *tb)
682 TranslationBlock *tb1;
683 unsigned int n1;
685 /* suppress any remaining jumps to this TB */
686 tb1 = tb->jmp_first;
687 for(;;) {
688 n1 = (long)tb1 & 3;
689 tb1 = (TranslationBlock *)((long)tb1 & ~3);
690 if (n1 == 2)
691 break;
692 tb1 = tb1->jmp_next[n1];
694 /* check end of list */
695 if (tb1 != tb) {
696 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
700 #endif
702 /* invalidate one TB */
703 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
704 int next_offset)
706 TranslationBlock *tb1;
707 for(;;) {
708 tb1 = *ptb;
709 if (tb1 == tb) {
710 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
711 break;
713 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
717 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
719 TranslationBlock *tb1;
720 unsigned int n1;
722 for(;;) {
723 tb1 = *ptb;
724 n1 = (long)tb1 & 3;
725 tb1 = (TranslationBlock *)((long)tb1 & ~3);
726 if (tb1 == tb) {
727 *ptb = tb1->page_next[n1];
728 break;
730 ptb = &tb1->page_next[n1];
734 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
736 TranslationBlock *tb1, **ptb;
737 unsigned int n1;
739 ptb = &tb->jmp_next[n];
740 tb1 = *ptb;
741 if (tb1) {
742 /* find tb(n) in circular list */
743 for(;;) {
744 tb1 = *ptb;
745 n1 = (long)tb1 & 3;
746 tb1 = (TranslationBlock *)((long)tb1 & ~3);
747 if (n1 == n && tb1 == tb)
748 break;
749 if (n1 == 2) {
750 ptb = &tb1->jmp_first;
751 } else {
752 ptb = &tb1->jmp_next[n1];
755 /* now we can suppress tb(n) from the list */
756 *ptb = tb->jmp_next[n];
758 tb->jmp_next[n] = NULL;
762 /* reset the jump entry 'n' of a TB so that it is not chained to
763 another TB */
764 static inline void tb_reset_jump(TranslationBlock *tb, int n)
766 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
769 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
771 CPUState *env;
772 PageDesc *p;
773 unsigned int h, n1;
774 target_phys_addr_t phys_pc;
775 TranslationBlock *tb1, *tb2;
777 /* remove the TB from the hash list */
778 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
779 h = tb_phys_hash_func(phys_pc);
780 tb_remove(&tb_phys_hash[h], tb,
781 offsetof(TranslationBlock, phys_hash_next));
783 /* remove the TB from the page list */
784 if (tb->page_addr[0] != page_addr) {
785 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
786 tb_page_remove(&p->first_tb, tb);
787 invalidate_page_bitmap(p);
789 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
790 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
791 tb_page_remove(&p->first_tb, tb);
792 invalidate_page_bitmap(p);
795 tb_invalidated_flag = 1;
797 /* remove the TB from the hash list */
798 h = tb_jmp_cache_hash_func(tb->pc);
799 for(env = first_cpu; env != NULL; env = env->next_cpu) {
800 if (env->tb_jmp_cache[h] == tb)
801 env->tb_jmp_cache[h] = NULL;
804 /* suppress this TB from the two jump lists */
805 tb_jmp_remove(tb, 0);
806 tb_jmp_remove(tb, 1);
808 /* suppress any remaining jumps to this TB */
809 tb1 = tb->jmp_first;
810 for(;;) {
811 n1 = (long)tb1 & 3;
812 if (n1 == 2)
813 break;
814 tb1 = (TranslationBlock *)((long)tb1 & ~3);
815 tb2 = tb1->jmp_next[n1];
816 tb_reset_jump(tb1, n1);
817 tb1->jmp_next[n1] = NULL;
818 tb1 = tb2;
820 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
822 tb_phys_invalidate_count++;
825 static inline void set_bits(uint8_t *tab, int start, int len)
827 int end, mask, end1;
829 end = start + len;
830 tab += start >> 3;
831 mask = 0xff << (start & 7);
832 if ((start & ~7) == (end & ~7)) {
833 if (start < end) {
834 mask &= ~(0xff << (end & 7));
835 *tab |= mask;
837 } else {
838 *tab++ |= mask;
839 start = (start + 8) & ~7;
840 end1 = end & ~7;
841 while (start < end1) {
842 *tab++ = 0xff;
843 start += 8;
845 if (start < end) {
846 mask = ~(0xff << (end & 7));
847 *tab |= mask;
852 static void build_page_bitmap(PageDesc *p)
854 int n, tb_start, tb_end;
855 TranslationBlock *tb;
857 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
859 tb = p->first_tb;
860 while (tb != NULL) {
861 n = (long)tb & 3;
862 tb = (TranslationBlock *)((long)tb & ~3);
863 /* NOTE: this is subtle as a TB may span two physical pages */
864 if (n == 0) {
865 /* NOTE: tb_end may be after the end of the page, but
866 it is not a problem */
867 tb_start = tb->pc & ~TARGET_PAGE_MASK;
868 tb_end = tb_start + tb->size;
869 if (tb_end > TARGET_PAGE_SIZE)
870 tb_end = TARGET_PAGE_SIZE;
871 } else {
872 tb_start = 0;
873 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
875 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
876 tb = tb->page_next[n];
880 TranslationBlock *tb_gen_code(CPUState *env,
881 target_ulong pc, target_ulong cs_base,
882 int flags, int cflags)
884 TranslationBlock *tb;
885 uint8_t *tc_ptr;
886 target_ulong phys_pc, phys_page2, virt_page2;
887 int code_gen_size;
889 phys_pc = get_phys_addr_code(env, pc);
890 tb = tb_alloc(pc);
891 if (!tb) {
892 /* flush must be done */
893 tb_flush(env);
894 /* cannot fail at this point */
895 tb = tb_alloc(pc);
896 /* Don't forget to invalidate previous TB info. */
897 tb_invalidated_flag = 1;
899 tc_ptr = code_gen_ptr;
900 tb->tc_ptr = tc_ptr;
901 tb->cs_base = cs_base;
902 tb->flags = flags;
903 tb->cflags = cflags;
904 cpu_gen_code(env, tb, &code_gen_size);
905 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
907 /* check next page if needed */
908 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
909 phys_page2 = -1;
910 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
911 phys_page2 = get_phys_addr_code(env, virt_page2);
913 tb_link_phys(tb, phys_pc, phys_page2);
914 return tb;
917 /* invalidate all TBs which intersect with the target physical page
918 starting in range [start;end[. NOTE: start and end must refer to
919 the same physical page. 'is_cpu_write_access' should be true if called
920 from a real cpu write access: the virtual CPU will exit the current
921 TB if code is modified inside this TB. */
922 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
923 int is_cpu_write_access)
925 TranslationBlock *tb, *tb_next, *saved_tb;
926 CPUState *env = cpu_single_env;
927 target_ulong tb_start, tb_end;
928 PageDesc *p;
929 int n;
930 #ifdef TARGET_HAS_PRECISE_SMC
931 int current_tb_not_found = is_cpu_write_access;
932 TranslationBlock *current_tb = NULL;
933 int current_tb_modified = 0;
934 target_ulong current_pc = 0;
935 target_ulong current_cs_base = 0;
936 int current_flags = 0;
937 #endif /* TARGET_HAS_PRECISE_SMC */
939 p = page_find(start >> TARGET_PAGE_BITS);
940 if (!p)
941 return;
942 if (!p->code_bitmap &&
943 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
944 is_cpu_write_access) {
945 /* build code bitmap */
946 build_page_bitmap(p);
949 /* we remove all the TBs in the range [start, end[ */
950 /* XXX: see if in some cases it could be faster to invalidate all the code */
951 tb = p->first_tb;
952 while (tb != NULL) {
953 n = (long)tb & 3;
954 tb = (TranslationBlock *)((long)tb & ~3);
955 tb_next = tb->page_next[n];
956 /* NOTE: this is subtle as a TB may span two physical pages */
957 if (n == 0) {
958 /* NOTE: tb_end may be after the end of the page, but
959 it is not a problem */
960 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
961 tb_end = tb_start + tb->size;
962 } else {
963 tb_start = tb->page_addr[1];
964 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
966 if (!(tb_end <= start || tb_start >= end)) {
967 #ifdef TARGET_HAS_PRECISE_SMC
968 if (current_tb_not_found) {
969 current_tb_not_found = 0;
970 current_tb = NULL;
971 if (env->mem_io_pc) {
972 /* now we have a real cpu fault */
973 current_tb = tb_find_pc(env->mem_io_pc);
976 if (current_tb == tb &&
977 (current_tb->cflags & CF_COUNT_MASK) != 1) {
978 /* If we are modifying the current TB, we must stop
979 its execution. We could be more precise by checking
980 that the modification is after the current PC, but it
981 would require a specialized function to partially
982 restore the CPU state */
984 current_tb_modified = 1;
985 cpu_restore_state(current_tb, env,
986 env->mem_io_pc, NULL);
987 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
988 &current_flags);
990 #endif /* TARGET_HAS_PRECISE_SMC */
991 /* we need to do that to handle the case where a signal
992 occurs while doing tb_phys_invalidate() */
993 saved_tb = NULL;
994 if (env) {
995 saved_tb = env->current_tb;
996 env->current_tb = NULL;
998 tb_phys_invalidate(tb, -1);
999 if (env) {
1000 env->current_tb = saved_tb;
1001 if (env->interrupt_request && env->current_tb)
1002 cpu_interrupt(env, env->interrupt_request);
1005 tb = tb_next;
1007 #if !defined(CONFIG_USER_ONLY)
1008 /* if no code remaining, no need to continue to use slow writes */
1009 if (!p->first_tb) {
1010 invalidate_page_bitmap(p);
1011 if (is_cpu_write_access) {
1012 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1015 #endif
1016 #ifdef TARGET_HAS_PRECISE_SMC
1017 if (current_tb_modified) {
1018 /* we generate a block containing just the instruction
1019 modifying the memory. It will ensure that it cannot modify
1020 itself */
1021 env->current_tb = NULL;
1022 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1023 cpu_resume_from_signal(env, NULL);
1025 #endif
1028 /* len must be <= 8 and start must be a multiple of len */
1029 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1031 PageDesc *p;
1032 int offset, b;
1033 #if 0
1034 if (1) {
1035 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1036 cpu_single_env->mem_io_vaddr, len,
1037 cpu_single_env->eip,
1038 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1040 #endif
1041 p = page_find(start >> TARGET_PAGE_BITS);
1042 if (!p)
1043 return;
1044 if (p->code_bitmap) {
1045 offset = start & ~TARGET_PAGE_MASK;
1046 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1047 if (b & ((1 << len) - 1))
1048 goto do_invalidate;
1049 } else {
1050 do_invalidate:
1051 tb_invalidate_phys_page_range(start, start + len, 1);
1055 #if !defined(CONFIG_SOFTMMU)
1056 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1057 unsigned long pc, void *puc)
1059 TranslationBlock *tb;
1060 PageDesc *p;
1061 int n;
1062 #ifdef TARGET_HAS_PRECISE_SMC
1063 TranslationBlock *current_tb = NULL;
1064 CPUState *env = cpu_single_env;
1065 int current_tb_modified = 0;
1066 target_ulong current_pc = 0;
1067 target_ulong current_cs_base = 0;
1068 int current_flags = 0;
1069 #endif
1071 addr &= TARGET_PAGE_MASK;
1072 p = page_find(addr >> TARGET_PAGE_BITS);
1073 if (!p)
1074 return;
1075 tb = p->first_tb;
1076 #ifdef TARGET_HAS_PRECISE_SMC
1077 if (tb && pc != 0) {
1078 current_tb = tb_find_pc(pc);
1080 #endif
1081 while (tb != NULL) {
1082 n = (long)tb & 3;
1083 tb = (TranslationBlock *)((long)tb & ~3);
1084 #ifdef TARGET_HAS_PRECISE_SMC
1085 if (current_tb == tb &&
1086 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1087 /* If we are modifying the current TB, we must stop
1088 its execution. We could be more precise by checking
1089 that the modification is after the current PC, but it
1090 would require a specialized function to partially
1091 restore the CPU state */
1093 current_tb_modified = 1;
1094 cpu_restore_state(current_tb, env, pc, puc);
1095 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1096 &current_flags);
1098 #endif /* TARGET_HAS_PRECISE_SMC */
1099 tb_phys_invalidate(tb, addr);
1100 tb = tb->page_next[n];
1102 p->first_tb = NULL;
1103 #ifdef TARGET_HAS_PRECISE_SMC
1104 if (current_tb_modified) {
1105 /* we generate a block containing just the instruction
1106 modifying the memory. It will ensure that it cannot modify
1107 itself */
1108 env->current_tb = NULL;
1109 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1110 cpu_resume_from_signal(env, puc);
1112 #endif
1114 #endif
1116 /* add the tb in the target page and protect it if necessary */
1117 static inline void tb_alloc_page(TranslationBlock *tb,
1118 unsigned int n, target_ulong page_addr)
1120 PageDesc *p;
1121 TranslationBlock *last_first_tb;
1123 tb->page_addr[n] = page_addr;
1124 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1125 tb->page_next[n] = p->first_tb;
1126 last_first_tb = p->first_tb;
1127 p->first_tb = (TranslationBlock *)((long)tb | n);
1128 invalidate_page_bitmap(p);
1130 #if defined(TARGET_HAS_SMC) || 1
1132 #if defined(CONFIG_USER_ONLY)
1133 if (p->flags & PAGE_WRITE) {
1134 target_ulong addr;
1135 PageDesc *p2;
1136 int prot;
1138 /* force the host page as non writable (writes will have a
1139 page fault + mprotect overhead) */
1140 page_addr &= qemu_host_page_mask;
1141 prot = 0;
1142 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1143 addr += TARGET_PAGE_SIZE) {
1145 p2 = page_find (addr >> TARGET_PAGE_BITS);
1146 if (!p2)
1147 continue;
1148 prot |= p2->flags;
1149 p2->flags &= ~PAGE_WRITE;
1150 page_get_flags(addr);
1152 mprotect(g2h(page_addr), qemu_host_page_size,
1153 (prot & PAGE_BITS) & ~PAGE_WRITE);
1154 #ifdef DEBUG_TB_INVALIDATE
1155 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1156 page_addr);
1157 #endif
1159 #else
1160 /* if some code is already present, then the pages are already
1161 protected. So we handle the case where only the first TB is
1162 allocated in a physical page */
1163 if (!last_first_tb) {
1164 tlb_protect_code(page_addr);
1166 #endif
1168 #endif /* TARGET_HAS_SMC */
1171 /* Allocate a new translation block. Flush the translation buffer if
1172 too many translation blocks or too much generated code. */
1173 TranslationBlock *tb_alloc(target_ulong pc)
1175 TranslationBlock *tb;
1177 if (nb_tbs >= code_gen_max_blocks ||
1178 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1179 return NULL;
1180 tb = &tbs[nb_tbs++];
1181 tb->pc = pc;
1182 tb->cflags = 0;
1183 return tb;
1186 void tb_free(TranslationBlock *tb)
1188 /* In practice this is mostly used for single use temporary TB
1189 Ignore the hard cases and just back up if this TB happens to
1190 be the last one generated. */
1191 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1192 code_gen_ptr = tb->tc_ptr;
1193 nb_tbs--;
1197 /* add a new TB and link it to the physical page tables. phys_page2 is
1198 (-1) to indicate that only one page contains the TB. */
1199 void tb_link_phys(TranslationBlock *tb,
1200 target_ulong phys_pc, target_ulong phys_page2)
1202 unsigned int h;
1203 TranslationBlock **ptb;
1205 /* Grab the mmap lock to stop another thread invalidating this TB
1206 before we are done. */
1207 mmap_lock();
1208 /* add in the physical hash table */
1209 h = tb_phys_hash_func(phys_pc);
1210 ptb = &tb_phys_hash[h];
1211 tb->phys_hash_next = *ptb;
1212 *ptb = tb;
1214 /* add in the page list */
1215 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1216 if (phys_page2 != -1)
1217 tb_alloc_page(tb, 1, phys_page2);
1218 else
1219 tb->page_addr[1] = -1;
1221 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1222 tb->jmp_next[0] = NULL;
1223 tb->jmp_next[1] = NULL;
1225 /* init original jump addresses */
1226 if (tb->tb_next_offset[0] != 0xffff)
1227 tb_reset_jump(tb, 0);
1228 if (tb->tb_next_offset[1] != 0xffff)
1229 tb_reset_jump(tb, 1);
1231 #ifdef DEBUG_TB_CHECK
1232 tb_page_check();
1233 #endif
1234 mmap_unlock();
1237 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1238 tb[1].tc_ptr. Return NULL if not found */
1239 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1241 int m_min, m_max, m;
1242 unsigned long v;
1243 TranslationBlock *tb;
1245 if (nb_tbs <= 0)
1246 return NULL;
1247 if (tc_ptr < (unsigned long)code_gen_buffer ||
1248 tc_ptr >= (unsigned long)code_gen_ptr)
1249 return NULL;
1250 /* binary search (cf Knuth) */
1251 m_min = 0;
1252 m_max = nb_tbs - 1;
1253 while (m_min <= m_max) {
1254 m = (m_min + m_max) >> 1;
1255 tb = &tbs[m];
1256 v = (unsigned long)tb->tc_ptr;
1257 if (v == tc_ptr)
1258 return tb;
1259 else if (tc_ptr < v) {
1260 m_max = m - 1;
1261 } else {
1262 m_min = m + 1;
1265 return &tbs[m_max];
1268 static void tb_reset_jump_recursive(TranslationBlock *tb);
1270 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1272 TranslationBlock *tb1, *tb_next, **ptb;
1273 unsigned int n1;
1275 tb1 = tb->jmp_next[n];
1276 if (tb1 != NULL) {
1277 /* find head of list */
1278 for(;;) {
1279 n1 = (long)tb1 & 3;
1280 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1281 if (n1 == 2)
1282 break;
1283 tb1 = tb1->jmp_next[n1];
1285 /* we are now sure now that tb jumps to tb1 */
1286 tb_next = tb1;
1288 /* remove tb from the jmp_first list */
1289 ptb = &tb_next->jmp_first;
1290 for(;;) {
1291 tb1 = *ptb;
1292 n1 = (long)tb1 & 3;
1293 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1294 if (n1 == n && tb1 == tb)
1295 break;
1296 ptb = &tb1->jmp_next[n1];
1298 *ptb = tb->jmp_next[n];
1299 tb->jmp_next[n] = NULL;
1301 /* suppress the jump to next tb in generated code */
1302 tb_reset_jump(tb, n);
1304 /* suppress jumps in the tb on which we could have jumped */
1305 tb_reset_jump_recursive(tb_next);
1309 static void tb_reset_jump_recursive(TranslationBlock *tb)
1311 tb_reset_jump_recursive2(tb, 0);
1312 tb_reset_jump_recursive2(tb, 1);
1315 #if defined(TARGET_HAS_ICE)
1316 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1318 target_phys_addr_t addr;
1319 target_ulong pd;
1320 ram_addr_t ram_addr;
1321 PhysPageDesc *p;
1323 addr = cpu_get_phys_page_debug(env, pc);
1324 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1325 if (!p) {
1326 pd = IO_MEM_UNASSIGNED;
1327 } else {
1328 pd = p->phys_offset;
1330 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1331 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1333 #endif
1335 /* Add a watchpoint. */
1336 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1337 int flags, CPUWatchpoint **watchpoint)
1339 target_ulong len_mask = ~(len - 1);
1340 CPUWatchpoint *wp;
1342 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1343 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1344 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1345 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1346 return -EINVAL;
1348 wp = qemu_malloc(sizeof(*wp));
1350 wp->vaddr = addr;
1351 wp->len_mask = len_mask;
1352 wp->flags = flags;
1354 /* keep all GDB-injected watchpoints in front */
1355 if (flags & BP_GDB)
1356 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1357 else
1358 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1360 tlb_flush_page(env, addr);
1362 if (watchpoint)
1363 *watchpoint = wp;
1364 return 0;
1367 /* Remove a specific watchpoint. */
1368 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1369 int flags)
1371 target_ulong len_mask = ~(len - 1);
1372 CPUWatchpoint *wp;
1374 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1375 if (addr == wp->vaddr && len_mask == wp->len_mask
1376 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1377 cpu_watchpoint_remove_by_ref(env, wp);
1378 return 0;
1381 return -ENOENT;
1384 /* Remove a specific watchpoint by reference. */
1385 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1387 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1389 tlb_flush_page(env, watchpoint->vaddr);
1391 qemu_free(watchpoint);
1394 /* Remove all matching watchpoints. */
1395 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1397 CPUWatchpoint *wp, *next;
1399 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1400 if (wp->flags & mask)
1401 cpu_watchpoint_remove_by_ref(env, wp);
1405 /* Add a breakpoint. */
1406 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1407 CPUBreakpoint **breakpoint)
1409 #if defined(TARGET_HAS_ICE)
1410 CPUBreakpoint *bp;
1412 bp = qemu_malloc(sizeof(*bp));
1414 bp->pc = pc;
1415 bp->flags = flags;
1417 /* keep all GDB-injected breakpoints in front */
1418 if (flags & BP_GDB)
1419 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1420 else
1421 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1423 breakpoint_invalidate(env, pc);
1425 if (breakpoint)
1426 *breakpoint = bp;
1427 return 0;
1428 #else
1429 return -ENOSYS;
1430 #endif
1433 /* Remove a specific breakpoint. */
1434 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1436 #if defined(TARGET_HAS_ICE)
1437 CPUBreakpoint *bp;
1439 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1440 if (bp->pc == pc && bp->flags == flags) {
1441 cpu_breakpoint_remove_by_ref(env, bp);
1442 return 0;
1445 return -ENOENT;
1446 #else
1447 return -ENOSYS;
1448 #endif
1451 /* Remove a specific breakpoint by reference. */
1452 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1454 #if defined(TARGET_HAS_ICE)
1455 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1457 breakpoint_invalidate(env, breakpoint->pc);
1459 qemu_free(breakpoint);
1460 #endif
1463 /* Remove all matching breakpoints. */
1464 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1466 #if defined(TARGET_HAS_ICE)
1467 CPUBreakpoint *bp, *next;
1469 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1470 if (bp->flags & mask)
1471 cpu_breakpoint_remove_by_ref(env, bp);
1473 #endif
1476 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1477 CPU loop after each instruction */
1478 void cpu_single_step(CPUState *env, int enabled)
1480 #if defined(TARGET_HAS_ICE)
1481 if (env->singlestep_enabled != enabled) {
1482 env->singlestep_enabled = enabled;
1483 if (kvm_enabled())
1484 kvm_update_guest_debug(env, 0);
1485 else {
1486 /* must flush all the translated code to avoid inconsistencies */
1487 /* XXX: only flush what is necessary */
1488 tb_flush(env);
1491 #endif
1494 /* enable or disable low levels log */
1495 void cpu_set_log(int log_flags)
1497 loglevel = log_flags;
1498 if (loglevel && !logfile) {
1499 logfile = fopen(logfilename, log_append ? "a" : "w");
1500 if (!logfile) {
1501 perror(logfilename);
1502 _exit(1);
1504 #if !defined(CONFIG_SOFTMMU)
1505 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1507 static char logfile_buf[4096];
1508 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1510 #else
1511 setvbuf(logfile, NULL, _IOLBF, 0);
1512 #endif
1513 log_append = 1;
1515 if (!loglevel && logfile) {
1516 fclose(logfile);
1517 logfile = NULL;
1521 void cpu_set_log_filename(const char *filename)
1523 logfilename = strdup(filename);
1524 if (logfile) {
1525 fclose(logfile);
1526 logfile = NULL;
1528 cpu_set_log(loglevel);
1531 static void cpu_unlink_tb(CPUState *env)
1533 #if defined(USE_NPTL)
1534 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1535 problem and hope the cpu will stop of its own accord. For userspace
1536 emulation this often isn't actually as bad as it sounds. Often
1537 signals are used primarily to interrupt blocking syscalls. */
1538 #else
1539 TranslationBlock *tb;
1540 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1542 tb = env->current_tb;
1543 /* if the cpu is currently executing code, we must unlink it and
1544 all the potentially executing TB */
1545 if (tb && !testandset(&interrupt_lock)) {
1546 env->current_tb = NULL;
1547 tb_reset_jump_recursive(tb);
1548 resetlock(&interrupt_lock);
1550 #endif
1553 /* mask must never be zero, except for A20 change call */
1554 void cpu_interrupt(CPUState *env, int mask)
1556 int old_mask;
1558 old_mask = env->interrupt_request;
1559 env->interrupt_request |= mask;
1560 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1561 kvm_update_interrupt_request(env);
1563 #ifndef CONFIG_USER_ONLY
1565 * If called from iothread context, wake the target cpu in
1566 * case its halted.
1568 if (!qemu_cpu_self(env)) {
1569 qemu_cpu_kick(env);
1570 return;
1572 #endif
1574 if (use_icount) {
1575 env->icount_decr.u16.high = 0xffff;
1576 #ifndef CONFIG_USER_ONLY
1577 if (!can_do_io(env)
1578 && (mask & ~old_mask) != 0) {
1579 cpu_abort(env, "Raised interrupt while not in I/O function");
1581 #endif
1582 } else {
1583 cpu_unlink_tb(env);
1587 void cpu_reset_interrupt(CPUState *env, int mask)
1589 env->interrupt_request &= ~mask;
1592 void cpu_exit(CPUState *env)
1594 env->exit_request = 1;
1595 cpu_unlink_tb(env);
1598 const CPULogItem cpu_log_items[] = {
1599 { CPU_LOG_TB_OUT_ASM, "out_asm",
1600 "show generated host assembly code for each compiled TB" },
1601 { CPU_LOG_TB_IN_ASM, "in_asm",
1602 "show target assembly code for each compiled TB" },
1603 { CPU_LOG_TB_OP, "op",
1604 "show micro ops for each compiled TB" },
1605 { CPU_LOG_TB_OP_OPT, "op_opt",
1606 "show micro ops "
1607 #ifdef TARGET_I386
1608 "before eflags optimization and "
1609 #endif
1610 "after liveness analysis" },
1611 { CPU_LOG_INT, "int",
1612 "show interrupts/exceptions in short format" },
1613 { CPU_LOG_EXEC, "exec",
1614 "show trace before each executed TB (lots of logs)" },
1615 { CPU_LOG_TB_CPU, "cpu",
1616 "show CPU state before block translation" },
1617 #ifdef TARGET_I386
1618 { CPU_LOG_PCALL, "pcall",
1619 "show protected mode far calls/returns/exceptions" },
1620 { CPU_LOG_RESET, "cpu_reset",
1621 "show CPU state before CPU resets" },
1622 #endif
1623 #ifdef DEBUG_IOPORT
1624 { CPU_LOG_IOPORT, "ioport",
1625 "show all i/o ports accesses" },
1626 #endif
1627 { 0, NULL, NULL },
1630 static int cmp1(const char *s1, int n, const char *s2)
1632 if (strlen(s2) != n)
1633 return 0;
1634 return memcmp(s1, s2, n) == 0;
1637 /* takes a comma separated list of log masks. Return 0 if error. */
1638 int cpu_str_to_log_mask(const char *str)
1640 const CPULogItem *item;
1641 int mask;
1642 const char *p, *p1;
1644 p = str;
1645 mask = 0;
1646 for(;;) {
1647 p1 = strchr(p, ',');
1648 if (!p1)
1649 p1 = p + strlen(p);
1650 if(cmp1(p,p1-p,"all")) {
1651 for(item = cpu_log_items; item->mask != 0; item++) {
1652 mask |= item->mask;
1654 } else {
1655 for(item = cpu_log_items; item->mask != 0; item++) {
1656 if (cmp1(p, p1 - p, item->name))
1657 goto found;
1659 return 0;
1661 found:
1662 mask |= item->mask;
1663 if (*p1 != ',')
1664 break;
1665 p = p1 + 1;
1667 return mask;
1670 void cpu_abort(CPUState *env, const char *fmt, ...)
1672 va_list ap;
1673 va_list ap2;
1675 va_start(ap, fmt);
1676 va_copy(ap2, ap);
1677 fprintf(stderr, "qemu: fatal: ");
1678 vfprintf(stderr, fmt, ap);
1679 fprintf(stderr, "\n");
1680 #ifdef TARGET_I386
1681 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1682 #else
1683 cpu_dump_state(env, stderr, fprintf, 0);
1684 #endif
1685 if (qemu_log_enabled()) {
1686 qemu_log("qemu: fatal: ");
1687 qemu_log_vprintf(fmt, ap2);
1688 qemu_log("\n");
1689 #ifdef TARGET_I386
1690 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1691 #else
1692 log_cpu_state(env, 0);
1693 #endif
1694 qemu_log_flush();
1695 qemu_log_close();
1697 va_end(ap2);
1698 va_end(ap);
1699 abort();
1702 CPUState *cpu_copy(CPUState *env)
1704 CPUState *new_env = cpu_init(env->cpu_model_str);
1705 CPUState *next_cpu = new_env->next_cpu;
1706 int cpu_index = new_env->cpu_index;
1707 #if defined(TARGET_HAS_ICE)
1708 CPUBreakpoint *bp;
1709 CPUWatchpoint *wp;
1710 #endif
1712 memcpy(new_env, env, sizeof(CPUState));
1714 /* Preserve chaining and index. */
1715 new_env->next_cpu = next_cpu;
1716 new_env->cpu_index = cpu_index;
1718 /* Clone all break/watchpoints.
1719 Note: Once we support ptrace with hw-debug register access, make sure
1720 BP_CPU break/watchpoints are handled correctly on clone. */
1721 TAILQ_INIT(&env->breakpoints);
1722 TAILQ_INIT(&env->watchpoints);
1723 #if defined(TARGET_HAS_ICE)
1724 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1725 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1727 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1728 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1729 wp->flags, NULL);
1731 #endif
1733 return new_env;
1736 #if !defined(CONFIG_USER_ONLY)
1738 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1740 unsigned int i;
1742 /* Discard jump cache entries for any tb which might potentially
1743 overlap the flushed page. */
1744 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1745 memset (&env->tb_jmp_cache[i], 0,
1746 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1748 i = tb_jmp_cache_hash_page(addr);
1749 memset (&env->tb_jmp_cache[i], 0,
1750 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1753 /* NOTE: if flush_global is true, also flush global entries (not
1754 implemented yet) */
1755 void tlb_flush(CPUState *env, int flush_global)
1757 int i;
1759 #if defined(DEBUG_TLB)
1760 printf("tlb_flush:\n");
1761 #endif
1762 /* must reset current TB so that interrupts cannot modify the
1763 links while we are modifying them */
1764 env->current_tb = NULL;
1766 for(i = 0; i < CPU_TLB_SIZE; i++) {
1767 env->tlb_table[0][i].addr_read = -1;
1768 env->tlb_table[0][i].addr_write = -1;
1769 env->tlb_table[0][i].addr_code = -1;
1770 env->tlb_table[1][i].addr_read = -1;
1771 env->tlb_table[1][i].addr_write = -1;
1772 env->tlb_table[1][i].addr_code = -1;
1773 #if (NB_MMU_MODES >= 3)
1774 env->tlb_table[2][i].addr_read = -1;
1775 env->tlb_table[2][i].addr_write = -1;
1776 env->tlb_table[2][i].addr_code = -1;
1777 #endif
1778 #if (NB_MMU_MODES >= 4)
1779 env->tlb_table[3][i].addr_read = -1;
1780 env->tlb_table[3][i].addr_write = -1;
1781 env->tlb_table[3][i].addr_code = -1;
1782 #endif
1783 #if (NB_MMU_MODES >= 5)
1784 env->tlb_table[4][i].addr_read = -1;
1785 env->tlb_table[4][i].addr_write = -1;
1786 env->tlb_table[4][i].addr_code = -1;
1787 #endif
1791 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1793 #ifdef CONFIG_KQEMU
1794 if (env->kqemu_enabled) {
1795 kqemu_flush(env, flush_global);
1797 #endif
1798 tlb_flush_count++;
1801 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1803 if (addr == (tlb_entry->addr_read &
1804 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1805 addr == (tlb_entry->addr_write &
1806 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1807 addr == (tlb_entry->addr_code &
1808 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1809 tlb_entry->addr_read = -1;
1810 tlb_entry->addr_write = -1;
1811 tlb_entry->addr_code = -1;
1815 void tlb_flush_page(CPUState *env, target_ulong addr)
1817 int i;
1819 #if defined(DEBUG_TLB)
1820 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1821 #endif
1822 /* must reset current TB so that interrupts cannot modify the
1823 links while we are modifying them */
1824 env->current_tb = NULL;
1826 addr &= TARGET_PAGE_MASK;
1827 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1828 tlb_flush_entry(&env->tlb_table[0][i], addr);
1829 tlb_flush_entry(&env->tlb_table[1][i], addr);
1830 #if (NB_MMU_MODES >= 3)
1831 tlb_flush_entry(&env->tlb_table[2][i], addr);
1832 #endif
1833 #if (NB_MMU_MODES >= 4)
1834 tlb_flush_entry(&env->tlb_table[3][i], addr);
1835 #endif
1836 #if (NB_MMU_MODES >= 5)
1837 tlb_flush_entry(&env->tlb_table[4][i], addr);
1838 #endif
1840 tlb_flush_jmp_cache(env, addr);
1842 #ifdef CONFIG_KQEMU
1843 if (env->kqemu_enabled) {
1844 kqemu_flush_page(env, addr);
1846 #endif
1849 /* update the TLBs so that writes to code in the virtual page 'addr'
1850 can be detected */
1851 static void tlb_protect_code(ram_addr_t ram_addr)
1853 cpu_physical_memory_reset_dirty(ram_addr,
1854 ram_addr + TARGET_PAGE_SIZE,
1855 CODE_DIRTY_FLAG);
1858 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1859 tested for self modifying code */
1860 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1861 target_ulong vaddr)
1863 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1866 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1867 unsigned long start, unsigned long length)
1869 unsigned long addr;
1870 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1871 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1872 if ((addr - start) < length) {
1873 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1878 /* Note: start and end must be within the same ram block. */
1879 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1880 int dirty_flags)
1882 CPUState *env;
1883 unsigned long length, start1;
1884 int i, mask, len;
1885 uint8_t *p;
1887 start &= TARGET_PAGE_MASK;
1888 end = TARGET_PAGE_ALIGN(end);
1890 length = end - start;
1891 if (length == 0)
1892 return;
1893 len = length >> TARGET_PAGE_BITS;
1894 #ifdef CONFIG_KQEMU
1895 /* XXX: should not depend on cpu context */
1896 env = first_cpu;
1897 if (env->kqemu_enabled) {
1898 ram_addr_t addr;
1899 addr = start;
1900 for(i = 0; i < len; i++) {
1901 kqemu_set_notdirty(env, addr);
1902 addr += TARGET_PAGE_SIZE;
1905 #endif
1906 mask = ~dirty_flags;
1907 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1908 for(i = 0; i < len; i++)
1909 p[i] &= mask;
1911 /* we modify the TLB cache so that the dirty bit will be set again
1912 when accessing the range */
1913 start1 = (unsigned long)qemu_get_ram_ptr(start);
1914 /* Chek that we don't span multiple blocks - this breaks the
1915 address comparisons below. */
1916 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1917 != (end - 1) - start) {
1918 abort();
1921 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1922 for(i = 0; i < CPU_TLB_SIZE; i++)
1923 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1924 for(i = 0; i < CPU_TLB_SIZE; i++)
1925 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1926 #if (NB_MMU_MODES >= 3)
1927 for(i = 0; i < CPU_TLB_SIZE; i++)
1928 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1929 #endif
1930 #if (NB_MMU_MODES >= 4)
1931 for(i = 0; i < CPU_TLB_SIZE; i++)
1932 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1933 #endif
1934 #if (NB_MMU_MODES >= 5)
1935 for(i = 0; i < CPU_TLB_SIZE; i++)
1936 tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1937 #endif
1941 int cpu_physical_memory_set_dirty_tracking(int enable)
1943 int r=0;
1945 if (kvm_enabled())
1946 r = kvm_physical_memory_set_dirty_tracking(enable);
1947 in_migration = enable;
1948 return r;
1951 int cpu_physical_memory_get_dirty_tracking(void)
1953 return in_migration;
1956 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1958 if (kvm_enabled())
1959 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1962 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1964 ram_addr_t ram_addr;
1965 void *p;
1967 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1968 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1969 + tlb_entry->addend);
1970 ram_addr = qemu_ram_addr_from_host(p);
1971 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1972 tlb_entry->addr_write |= TLB_NOTDIRTY;
1977 /* update the TLB according to the current state of the dirty bits */
1978 void cpu_tlb_update_dirty(CPUState *env)
1980 int i;
1981 for(i = 0; i < CPU_TLB_SIZE; i++)
1982 tlb_update_dirty(&env->tlb_table[0][i]);
1983 for(i = 0; i < CPU_TLB_SIZE; i++)
1984 tlb_update_dirty(&env->tlb_table[1][i]);
1985 #if (NB_MMU_MODES >= 3)
1986 for(i = 0; i < CPU_TLB_SIZE; i++)
1987 tlb_update_dirty(&env->tlb_table[2][i]);
1988 #endif
1989 #if (NB_MMU_MODES >= 4)
1990 for(i = 0; i < CPU_TLB_SIZE; i++)
1991 tlb_update_dirty(&env->tlb_table[3][i]);
1992 #endif
1993 #if (NB_MMU_MODES >= 5)
1994 for(i = 0; i < CPU_TLB_SIZE; i++)
1995 tlb_update_dirty(&env->tlb_table[4][i]);
1996 #endif
1999 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2001 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2002 tlb_entry->addr_write = vaddr;
2005 /* update the TLB corresponding to virtual page vaddr
2006 so that it is no longer dirty */
2007 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2009 int i;
2011 vaddr &= TARGET_PAGE_MASK;
2012 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2013 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
2014 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
2015 #if (NB_MMU_MODES >= 3)
2016 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2017 #endif
2018 #if (NB_MMU_MODES >= 4)
2019 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2020 #endif
2021 #if (NB_MMU_MODES >= 5)
2022 tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
2023 #endif
2026 /* add a new TLB entry. At most one entry for a given virtual address
2027 is permitted. Return 0 if OK or 2 if the page could not be mapped
2028 (can only happen in non SOFTMMU mode for I/O pages or pages
2029 conflicting with the host address space). */
2030 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2031 target_phys_addr_t paddr, int prot,
2032 int mmu_idx, int is_softmmu)
2034 PhysPageDesc *p;
2035 unsigned long pd;
2036 unsigned int index;
2037 target_ulong address;
2038 target_ulong code_address;
2039 target_phys_addr_t addend;
2040 int ret;
2041 CPUTLBEntry *te;
2042 CPUWatchpoint *wp;
2043 target_phys_addr_t iotlb;
2045 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2046 if (!p) {
2047 pd = IO_MEM_UNASSIGNED;
2048 } else {
2049 pd = p->phys_offset;
2051 #if defined(DEBUG_TLB)
2052 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2053 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2054 #endif
2056 ret = 0;
2057 address = vaddr;
2058 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2059 /* IO memory case (romd handled later) */
2060 address |= TLB_MMIO;
2062 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2063 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2064 /* Normal RAM. */
2065 iotlb = pd & TARGET_PAGE_MASK;
2066 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2067 iotlb |= IO_MEM_NOTDIRTY;
2068 else
2069 iotlb |= IO_MEM_ROM;
2070 } else {
2071 /* IO handlers are currently passed a physical address.
2072 It would be nice to pass an offset from the base address
2073 of that region. This would avoid having to special case RAM,
2074 and avoid full address decoding in every device.
2075 We can't use the high bits of pd for this because
2076 IO_MEM_ROMD uses these as a ram address. */
2077 iotlb = (pd & ~TARGET_PAGE_MASK);
2078 if (p) {
2079 iotlb += p->region_offset;
2080 } else {
2081 iotlb += paddr;
2085 code_address = address;
2086 /* Make accesses to pages with watchpoints go via the
2087 watchpoint trap routines. */
2088 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2089 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2090 iotlb = io_mem_watch + paddr;
2091 /* TODO: The memory case can be optimized by not trapping
2092 reads of pages with a write breakpoint. */
2093 address |= TLB_MMIO;
2097 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2098 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2099 te = &env->tlb_table[mmu_idx][index];
2100 te->addend = addend - vaddr;
2101 if (prot & PAGE_READ) {
2102 te->addr_read = address;
2103 } else {
2104 te->addr_read = -1;
2107 if (prot & PAGE_EXEC) {
2108 te->addr_code = code_address;
2109 } else {
2110 te->addr_code = -1;
2112 if (prot & PAGE_WRITE) {
2113 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2114 (pd & IO_MEM_ROMD)) {
2115 /* Write access calls the I/O callback. */
2116 te->addr_write = address | TLB_MMIO;
2117 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2118 !cpu_physical_memory_is_dirty(pd)) {
2119 te->addr_write = address | TLB_NOTDIRTY;
2120 } else {
2121 te->addr_write = address;
2123 } else {
2124 te->addr_write = -1;
2126 return ret;
2129 #else
2131 void tlb_flush(CPUState *env, int flush_global)
2135 void tlb_flush_page(CPUState *env, target_ulong addr)
2139 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2140 target_phys_addr_t paddr, int prot,
2141 int mmu_idx, int is_softmmu)
2143 return 0;
2146 /* dump memory mappings */
2147 void page_dump(FILE *f)
2149 unsigned long start, end;
2150 int i, j, prot, prot1;
2151 PageDesc *p;
2153 fprintf(f, "%-8s %-8s %-8s %s\n",
2154 "start", "end", "size", "prot");
2155 start = -1;
2156 end = -1;
2157 prot = 0;
2158 for(i = 0; i <= L1_SIZE; i++) {
2159 if (i < L1_SIZE)
2160 p = l1_map[i];
2161 else
2162 p = NULL;
2163 for(j = 0;j < L2_SIZE; j++) {
2164 if (!p)
2165 prot1 = 0;
2166 else
2167 prot1 = p[j].flags;
2168 if (prot1 != prot) {
2169 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2170 if (start != -1) {
2171 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2172 start, end, end - start,
2173 prot & PAGE_READ ? 'r' : '-',
2174 prot & PAGE_WRITE ? 'w' : '-',
2175 prot & PAGE_EXEC ? 'x' : '-');
2177 if (prot1 != 0)
2178 start = end;
2179 else
2180 start = -1;
2181 prot = prot1;
2183 if (!p)
2184 break;
2189 int page_get_flags(target_ulong address)
2191 PageDesc *p;
2193 p = page_find(address >> TARGET_PAGE_BITS);
2194 if (!p)
2195 return 0;
2196 return p->flags;
2199 /* modify the flags of a page and invalidate the code if
2200 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2201 depending on PAGE_WRITE */
2202 void page_set_flags(target_ulong start, target_ulong end, int flags)
2204 PageDesc *p;
2205 target_ulong addr;
2207 /* mmap_lock should already be held. */
2208 start = start & TARGET_PAGE_MASK;
2209 end = TARGET_PAGE_ALIGN(end);
2210 if (flags & PAGE_WRITE)
2211 flags |= PAGE_WRITE_ORG;
2212 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2213 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2214 /* We may be called for host regions that are outside guest
2215 address space. */
2216 if (!p)
2217 return;
2218 /* if the write protection is set, then we invalidate the code
2219 inside */
2220 if (!(p->flags & PAGE_WRITE) &&
2221 (flags & PAGE_WRITE) &&
2222 p->first_tb) {
2223 tb_invalidate_phys_page(addr, 0, NULL);
2225 p->flags = flags;
2229 int page_check_range(target_ulong start, target_ulong len, int flags)
2231 PageDesc *p;
2232 target_ulong end;
2233 target_ulong addr;
2235 if (start + len < start)
2236 /* we've wrapped around */
2237 return -1;
2239 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2240 start = start & TARGET_PAGE_MASK;
2242 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2243 p = page_find(addr >> TARGET_PAGE_BITS);
2244 if( !p )
2245 return -1;
2246 if( !(p->flags & PAGE_VALID) )
2247 return -1;
2249 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2250 return -1;
2251 if (flags & PAGE_WRITE) {
2252 if (!(p->flags & PAGE_WRITE_ORG))
2253 return -1;
2254 /* unprotect the page if it was put read-only because it
2255 contains translated code */
2256 if (!(p->flags & PAGE_WRITE)) {
2257 if (!page_unprotect(addr, 0, NULL))
2258 return -1;
2260 return 0;
2263 return 0;
2266 /* called from signal handler: invalidate the code and unprotect the
2267 page. Return TRUE if the fault was successfully handled. */
2268 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2270 unsigned int page_index, prot, pindex;
2271 PageDesc *p, *p1;
2272 target_ulong host_start, host_end, addr;
2274 /* Technically this isn't safe inside a signal handler. However we
2275 know this only ever happens in a synchronous SEGV handler, so in
2276 practice it seems to be ok. */
2277 mmap_lock();
2279 host_start = address & qemu_host_page_mask;
2280 page_index = host_start >> TARGET_PAGE_BITS;
2281 p1 = page_find(page_index);
2282 if (!p1) {
2283 mmap_unlock();
2284 return 0;
2286 host_end = host_start + qemu_host_page_size;
2287 p = p1;
2288 prot = 0;
2289 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2290 prot |= p->flags;
2291 p++;
2293 /* if the page was really writable, then we change its
2294 protection back to writable */
2295 if (prot & PAGE_WRITE_ORG) {
2296 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2297 if (!(p1[pindex].flags & PAGE_WRITE)) {
2298 mprotect((void *)g2h(host_start), qemu_host_page_size,
2299 (prot & PAGE_BITS) | PAGE_WRITE);
2300 p1[pindex].flags |= PAGE_WRITE;
2301 /* and since the content will be modified, we must invalidate
2302 the corresponding translated code. */
2303 tb_invalidate_phys_page(address, pc, puc);
2304 #ifdef DEBUG_TB_CHECK
2305 tb_invalidate_check(address);
2306 #endif
2307 mmap_unlock();
2308 return 1;
2311 mmap_unlock();
2312 return 0;
2315 static inline void tlb_set_dirty(CPUState *env,
2316 unsigned long addr, target_ulong vaddr)
2319 #endif /* defined(CONFIG_USER_ONLY) */
2321 #if !defined(CONFIG_USER_ONLY)
2323 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2324 ram_addr_t memory, ram_addr_t region_offset);
2325 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2326 ram_addr_t orig_memory, ram_addr_t region_offset);
2327 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2328 need_subpage) \
2329 do { \
2330 if (addr > start_addr) \
2331 start_addr2 = 0; \
2332 else { \
2333 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2334 if (start_addr2 > 0) \
2335 need_subpage = 1; \
2338 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2339 end_addr2 = TARGET_PAGE_SIZE - 1; \
2340 else { \
2341 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2342 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2343 need_subpage = 1; \
2345 } while (0)
2347 /* register physical memory. 'size' must be a multiple of the target
2348 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2349 io memory page. The address used when calling the IO function is
2350 the offset from the start of the region, plus region_offset. Both
2351 start_addr and region_offset are rounded down to a page boundary
2352 before calculating this offset. This should not be a problem unless
2353 the low bits of start_addr and region_offset differ. */
2354 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2355 ram_addr_t size,
2356 ram_addr_t phys_offset,
2357 ram_addr_t region_offset)
2359 target_phys_addr_t addr, end_addr;
2360 PhysPageDesc *p;
2361 CPUState *env;
2362 ram_addr_t orig_size = size;
2363 void *subpage;
2365 #ifdef CONFIG_KQEMU
2366 /* XXX: should not depend on cpu context */
2367 env = first_cpu;
2368 if (env->kqemu_enabled) {
2369 kqemu_set_phys_mem(start_addr, size, phys_offset);
2371 #endif
2372 if (kvm_enabled())
2373 kvm_set_phys_mem(start_addr, size, phys_offset);
2375 if (phys_offset == IO_MEM_UNASSIGNED) {
2376 region_offset = start_addr;
2378 region_offset &= TARGET_PAGE_MASK;
2379 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2380 end_addr = start_addr + (target_phys_addr_t)size;
2381 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2382 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2383 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2384 ram_addr_t orig_memory = p->phys_offset;
2385 target_phys_addr_t start_addr2, end_addr2;
2386 int need_subpage = 0;
2388 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2389 need_subpage);
2390 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2391 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2392 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2393 &p->phys_offset, orig_memory,
2394 p->region_offset);
2395 } else {
2396 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2397 >> IO_MEM_SHIFT];
2399 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2400 region_offset);
2401 p->region_offset = 0;
2402 } else {
2403 p->phys_offset = phys_offset;
2404 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2405 (phys_offset & IO_MEM_ROMD))
2406 phys_offset += TARGET_PAGE_SIZE;
2408 } else {
2409 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2410 p->phys_offset = phys_offset;
2411 p->region_offset = region_offset;
2412 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2413 (phys_offset & IO_MEM_ROMD)) {
2414 phys_offset += TARGET_PAGE_SIZE;
2415 } else {
2416 target_phys_addr_t start_addr2, end_addr2;
2417 int need_subpage = 0;
2419 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2420 end_addr2, need_subpage);
2422 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2423 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2424 &p->phys_offset, IO_MEM_UNASSIGNED,
2425 addr & TARGET_PAGE_MASK);
2426 subpage_register(subpage, start_addr2, end_addr2,
2427 phys_offset, region_offset);
2428 p->region_offset = 0;
2432 region_offset += TARGET_PAGE_SIZE;
2435 /* since each CPU stores ram addresses in its TLB cache, we must
2436 reset the modified entries */
2437 /* XXX: slow ! */
2438 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2439 tlb_flush(env, 1);
2443 /* XXX: temporary until new memory mapping API */
2444 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2446 PhysPageDesc *p;
2448 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2449 if (!p)
2450 return IO_MEM_UNASSIGNED;
2451 return p->phys_offset;
2454 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2456 if (kvm_enabled())
2457 kvm_coalesce_mmio_region(addr, size);
2460 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2462 if (kvm_enabled())
2463 kvm_uncoalesce_mmio_region(addr, size);
2466 #ifdef CONFIG_KQEMU
2467 /* XXX: better than nothing */
2468 static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2470 ram_addr_t addr;
2471 if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2472 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2473 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2474 abort();
2476 addr = last_ram_offset;
2477 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2478 return addr;
2480 #endif
2482 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2484 RAMBlock *new_block;
2486 #ifdef CONFIG_KQEMU
2487 if (kqemu_phys_ram_base) {
2488 return kqemu_ram_alloc(size);
2490 #endif
2492 size = TARGET_PAGE_ALIGN(size);
2493 new_block = qemu_malloc(sizeof(*new_block));
2495 new_block->host = qemu_vmalloc(size);
2496 new_block->offset = last_ram_offset;
2497 new_block->length = size;
2499 new_block->next = ram_blocks;
2500 ram_blocks = new_block;
2502 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2503 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2504 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2505 0xff, size >> TARGET_PAGE_BITS);
2507 last_ram_offset += size;
2509 if (kvm_enabled())
2510 kvm_setup_guest_memory(new_block->host, size);
2512 return new_block->offset;
2515 void qemu_ram_free(ram_addr_t addr)
2517 /* TODO: implement this. */
2520 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2521 With the exception of the softmmu code in this file, this should
2522 only be used for local memory (e.g. video ram) that the device owns,
2523 and knows it isn't going to access beyond the end of the block.
2525 It should not be used for general purpose DMA.
2526 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2528 void *qemu_get_ram_ptr(ram_addr_t addr)
2530 RAMBlock *prev;
2531 RAMBlock **prevp;
2532 RAMBlock *block;
2534 #ifdef CONFIG_KQEMU
2535 if (kqemu_phys_ram_base) {
2536 return kqemu_phys_ram_base + addr;
2538 #endif
2540 prev = NULL;
2541 prevp = &ram_blocks;
2542 block = ram_blocks;
2543 while (block && (block->offset > addr
2544 || block->offset + block->length <= addr)) {
2545 if (prev)
2546 prevp = &prev->next;
2547 prev = block;
2548 block = block->next;
2550 if (!block) {
2551 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2552 abort();
2554 /* Move this entry to to start of the list. */
2555 if (prev) {
2556 prev->next = block->next;
2557 block->next = *prevp;
2558 *prevp = block;
2560 return block->host + (addr - block->offset);
2563 /* Some of the softmmu routines need to translate from a host pointer
2564 (typically a TLB entry) back to a ram offset. */
2565 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2567 RAMBlock *prev;
2568 RAMBlock **prevp;
2569 RAMBlock *block;
2570 uint8_t *host = ptr;
2572 #ifdef CONFIG_KQEMU
2573 if (kqemu_phys_ram_base) {
2574 return host - kqemu_phys_ram_base;
2576 #endif
2578 prev = NULL;
2579 prevp = &ram_blocks;
2580 block = ram_blocks;
2581 while (block && (block->host > host
2582 || block->host + block->length <= host)) {
2583 if (prev)
2584 prevp = &prev->next;
2585 prev = block;
2586 block = block->next;
2588 if (!block) {
2589 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2590 abort();
2592 return block->offset + (host - block->host);
2595 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2597 #ifdef DEBUG_UNASSIGNED
2598 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2599 #endif
2600 #if defined(TARGET_SPARC)
2601 do_unassigned_access(addr, 0, 0, 0, 1);
2602 #endif
2603 return 0;
2606 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2608 #ifdef DEBUG_UNASSIGNED
2609 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2610 #endif
2611 #if defined(TARGET_SPARC)
2612 do_unassigned_access(addr, 0, 0, 0, 2);
2613 #endif
2614 return 0;
2617 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2619 #ifdef DEBUG_UNASSIGNED
2620 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2621 #endif
2622 #if defined(TARGET_SPARC)
2623 do_unassigned_access(addr, 0, 0, 0, 4);
2624 #endif
2625 return 0;
2628 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2630 #ifdef DEBUG_UNASSIGNED
2631 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2632 #endif
2633 #if defined(TARGET_SPARC)
2634 do_unassigned_access(addr, 1, 0, 0, 1);
2635 #endif
2638 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2640 #ifdef DEBUG_UNASSIGNED
2641 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2642 #endif
2643 #if defined(TARGET_SPARC)
2644 do_unassigned_access(addr, 1, 0, 0, 2);
2645 #endif
2648 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2650 #ifdef DEBUG_UNASSIGNED
2651 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2652 #endif
2653 #if defined(TARGET_SPARC)
2654 do_unassigned_access(addr, 1, 0, 0, 4);
2655 #endif
2658 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2659 unassigned_mem_readb,
2660 unassigned_mem_readw,
2661 unassigned_mem_readl,
2664 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2665 unassigned_mem_writeb,
2666 unassigned_mem_writew,
2667 unassigned_mem_writel,
2670 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2671 uint32_t val)
2673 int dirty_flags;
2674 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2675 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2676 #if !defined(CONFIG_USER_ONLY)
2677 tb_invalidate_phys_page_fast(ram_addr, 1);
2678 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2679 #endif
2681 stb_p(qemu_get_ram_ptr(ram_addr), val);
2682 #ifdef CONFIG_KQEMU
2683 if (cpu_single_env->kqemu_enabled &&
2684 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2685 kqemu_modify_page(cpu_single_env, ram_addr);
2686 #endif
2687 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2688 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2689 /* we remove the notdirty callback only if the code has been
2690 flushed */
2691 if (dirty_flags == 0xff)
2692 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2695 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2696 uint32_t val)
2698 int dirty_flags;
2699 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2700 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2701 #if !defined(CONFIG_USER_ONLY)
2702 tb_invalidate_phys_page_fast(ram_addr, 2);
2703 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2704 #endif
2706 stw_p(qemu_get_ram_ptr(ram_addr), val);
2707 #ifdef CONFIG_KQEMU
2708 if (cpu_single_env->kqemu_enabled &&
2709 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2710 kqemu_modify_page(cpu_single_env, ram_addr);
2711 #endif
2712 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2713 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2714 /* we remove the notdirty callback only if the code has been
2715 flushed */
2716 if (dirty_flags == 0xff)
2717 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2720 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2721 uint32_t val)
2723 int dirty_flags;
2724 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2725 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2726 #if !defined(CONFIG_USER_ONLY)
2727 tb_invalidate_phys_page_fast(ram_addr, 4);
2728 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2729 #endif
2731 stl_p(qemu_get_ram_ptr(ram_addr), val);
2732 #ifdef CONFIG_KQEMU
2733 if (cpu_single_env->kqemu_enabled &&
2734 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2735 kqemu_modify_page(cpu_single_env, ram_addr);
2736 #endif
2737 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2738 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2739 /* we remove the notdirty callback only if the code has been
2740 flushed */
2741 if (dirty_flags == 0xff)
2742 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2745 static CPUReadMemoryFunc *error_mem_read[3] = {
2746 NULL, /* never used */
2747 NULL, /* never used */
2748 NULL, /* never used */
2751 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2752 notdirty_mem_writeb,
2753 notdirty_mem_writew,
2754 notdirty_mem_writel,
2757 /* Generate a debug exception if a watchpoint has been hit. */
2758 static void check_watchpoint(int offset, int len_mask, int flags)
2760 CPUState *env = cpu_single_env;
2761 target_ulong pc, cs_base;
2762 TranslationBlock *tb;
2763 target_ulong vaddr;
2764 CPUWatchpoint *wp;
2765 int cpu_flags;
2767 if (env->watchpoint_hit) {
2768 /* We re-entered the check after replacing the TB. Now raise
2769 * the debug interrupt so that is will trigger after the
2770 * current instruction. */
2771 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2772 return;
2774 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2775 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2776 if ((vaddr == (wp->vaddr & len_mask) ||
2777 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2778 wp->flags |= BP_WATCHPOINT_HIT;
2779 if (!env->watchpoint_hit) {
2780 env->watchpoint_hit = wp;
2781 tb = tb_find_pc(env->mem_io_pc);
2782 if (!tb) {
2783 cpu_abort(env, "check_watchpoint: could not find TB for "
2784 "pc=%p", (void *)env->mem_io_pc);
2786 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2787 tb_phys_invalidate(tb, -1);
2788 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2789 env->exception_index = EXCP_DEBUG;
2790 } else {
2791 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2792 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2794 cpu_resume_from_signal(env, NULL);
2796 } else {
2797 wp->flags &= ~BP_WATCHPOINT_HIT;
2802 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2803 so these check for a hit then pass through to the normal out-of-line
2804 phys routines. */
2805 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2807 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2808 return ldub_phys(addr);
2811 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2813 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2814 return lduw_phys(addr);
2817 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2819 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2820 return ldl_phys(addr);
2823 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2824 uint32_t val)
2826 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2827 stb_phys(addr, val);
2830 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2831 uint32_t val)
2833 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2834 stw_phys(addr, val);
2837 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2838 uint32_t val)
2840 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2841 stl_phys(addr, val);
2844 static CPUReadMemoryFunc *watch_mem_read[3] = {
2845 watch_mem_readb,
2846 watch_mem_readw,
2847 watch_mem_readl,
2850 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2851 watch_mem_writeb,
2852 watch_mem_writew,
2853 watch_mem_writel,
2856 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2857 unsigned int len)
2859 uint32_t ret;
2860 unsigned int idx;
2862 idx = SUBPAGE_IDX(addr);
2863 #if defined(DEBUG_SUBPAGE)
2864 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2865 mmio, len, addr, idx);
2866 #endif
2867 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2868 addr + mmio->region_offset[idx][0][len]);
2870 return ret;
2873 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2874 uint32_t value, unsigned int len)
2876 unsigned int idx;
2878 idx = SUBPAGE_IDX(addr);
2879 #if defined(DEBUG_SUBPAGE)
2880 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2881 mmio, len, addr, idx, value);
2882 #endif
2883 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2884 addr + mmio->region_offset[idx][1][len],
2885 value);
2888 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2890 #if defined(DEBUG_SUBPAGE)
2891 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2892 #endif
2894 return subpage_readlen(opaque, addr, 0);
2897 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2898 uint32_t value)
2900 #if defined(DEBUG_SUBPAGE)
2901 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2902 #endif
2903 subpage_writelen(opaque, addr, value, 0);
2906 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2908 #if defined(DEBUG_SUBPAGE)
2909 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2910 #endif
2912 return subpage_readlen(opaque, addr, 1);
2915 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2916 uint32_t value)
2918 #if defined(DEBUG_SUBPAGE)
2919 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2920 #endif
2921 subpage_writelen(opaque, addr, value, 1);
2924 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2926 #if defined(DEBUG_SUBPAGE)
2927 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2928 #endif
2930 return subpage_readlen(opaque, addr, 2);
2933 static void subpage_writel (void *opaque,
2934 target_phys_addr_t addr, uint32_t value)
2936 #if defined(DEBUG_SUBPAGE)
2937 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2938 #endif
2939 subpage_writelen(opaque, addr, value, 2);
2942 static CPUReadMemoryFunc *subpage_read[] = {
2943 &subpage_readb,
2944 &subpage_readw,
2945 &subpage_readl,
2948 static CPUWriteMemoryFunc *subpage_write[] = {
2949 &subpage_writeb,
2950 &subpage_writew,
2951 &subpage_writel,
2954 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2955 ram_addr_t memory, ram_addr_t region_offset)
2957 int idx, eidx;
2958 unsigned int i;
2960 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2961 return -1;
2962 idx = SUBPAGE_IDX(start);
2963 eidx = SUBPAGE_IDX(end);
2964 #if defined(DEBUG_SUBPAGE)
2965 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2966 mmio, start, end, idx, eidx, memory);
2967 #endif
2968 memory >>= IO_MEM_SHIFT;
2969 for (; idx <= eidx; idx++) {
2970 for (i = 0; i < 4; i++) {
2971 if (io_mem_read[memory][i]) {
2972 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2973 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2974 mmio->region_offset[idx][0][i] = region_offset;
2976 if (io_mem_write[memory][i]) {
2977 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2978 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2979 mmio->region_offset[idx][1][i] = region_offset;
2984 return 0;
2987 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2988 ram_addr_t orig_memory, ram_addr_t region_offset)
2990 subpage_t *mmio;
2991 int subpage_memory;
2993 mmio = qemu_mallocz(sizeof(subpage_t));
2995 mmio->base = base;
2996 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2997 #if defined(DEBUG_SUBPAGE)
2998 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2999 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3000 #endif
3001 *phys = subpage_memory | IO_MEM_SUBPAGE;
3002 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3003 region_offset);
3005 return mmio;
3008 static int get_free_io_mem_idx(void)
3010 int i;
3012 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3013 if (!io_mem_used[i]) {
3014 io_mem_used[i] = 1;
3015 return i;
3018 return -1;
3021 static void io_mem_init(void)
3023 int i;
3025 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3026 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3027 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3028 for (i=0; i<5; i++)
3029 io_mem_used[i] = 1;
3031 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3032 watch_mem_write, NULL);
3033 #ifdef CONFIG_KQEMU
3034 if (kqemu_phys_ram_base) {
3035 /* alloc dirty bits array */
3036 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3037 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3039 #endif
3042 /* mem_read and mem_write are arrays of functions containing the
3043 function to access byte (index 0), word (index 1) and dword (index
3044 2). Functions can be omitted with a NULL function pointer.
3045 If io_index is non zero, the corresponding io zone is
3046 modified. If it is zero, a new io zone is allocated. The return
3047 value can be used with cpu_register_physical_memory(). (-1) is
3048 returned if error. */
3049 int cpu_register_io_memory(int io_index,
3050 CPUReadMemoryFunc **mem_read,
3051 CPUWriteMemoryFunc **mem_write,
3052 void *opaque)
3054 int i, subwidth = 0;
3056 if (io_index <= 0) {
3057 io_index = get_free_io_mem_idx();
3058 if (io_index == -1)
3059 return io_index;
3060 } else {
3061 if (io_index >= IO_MEM_NB_ENTRIES)
3062 return -1;
3065 for(i = 0;i < 3; i++) {
3066 if (!mem_read[i] || !mem_write[i])
3067 subwidth = IO_MEM_SUBWIDTH;
3068 io_mem_read[io_index][i] = mem_read[i];
3069 io_mem_write[io_index][i] = mem_write[i];
3071 io_mem_opaque[io_index] = opaque;
3072 return (io_index << IO_MEM_SHIFT) | subwidth;
3075 void cpu_unregister_io_memory(int io_table_address)
3077 int i;
3078 int io_index = io_table_address >> IO_MEM_SHIFT;
3080 for (i=0;i < 3; i++) {
3081 io_mem_read[io_index][i] = unassigned_mem_read[i];
3082 io_mem_write[io_index][i] = unassigned_mem_write[i];
3084 io_mem_opaque[io_index] = NULL;
3085 io_mem_used[io_index] = 0;
3088 #endif /* !defined(CONFIG_USER_ONLY) */
3090 /* physical memory access (slow version, mainly for debug) */
3091 #if defined(CONFIG_USER_ONLY)
3092 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3093 int len, int is_write)
3095 int l, flags;
3096 target_ulong page;
3097 void * p;
3099 while (len > 0) {
3100 page = addr & TARGET_PAGE_MASK;
3101 l = (page + TARGET_PAGE_SIZE) - addr;
3102 if (l > len)
3103 l = len;
3104 flags = page_get_flags(page);
3105 if (!(flags & PAGE_VALID))
3106 return;
3107 if (is_write) {
3108 if (!(flags & PAGE_WRITE))
3109 return;
3110 /* XXX: this code should not depend on lock_user */
3111 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3112 /* FIXME - should this return an error rather than just fail? */
3113 return;
3114 memcpy(p, buf, l);
3115 unlock_user(p, addr, l);
3116 } else {
3117 if (!(flags & PAGE_READ))
3118 return;
3119 /* XXX: this code should not depend on lock_user */
3120 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3121 /* FIXME - should this return an error rather than just fail? */
3122 return;
3123 memcpy(buf, p, l);
3124 unlock_user(p, addr, 0);
3126 len -= l;
3127 buf += l;
3128 addr += l;
3132 #else
3133 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3134 int len, int is_write)
3136 int l, io_index;
3137 uint8_t *ptr;
3138 uint32_t val;
3139 target_phys_addr_t page;
3140 unsigned long pd;
3141 PhysPageDesc *p;
3143 while (len > 0) {
3144 page = addr & TARGET_PAGE_MASK;
3145 l = (page + TARGET_PAGE_SIZE) - addr;
3146 if (l > len)
3147 l = len;
3148 p = phys_page_find(page >> TARGET_PAGE_BITS);
3149 if (!p) {
3150 pd = IO_MEM_UNASSIGNED;
3151 } else {
3152 pd = p->phys_offset;
3155 if (is_write) {
3156 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3157 target_phys_addr_t addr1 = addr;
3158 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3159 if (p)
3160 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3161 /* XXX: could force cpu_single_env to NULL to avoid
3162 potential bugs */
3163 if (l >= 4 && ((addr1 & 3) == 0)) {
3164 /* 32 bit write access */
3165 val = ldl_p(buf);
3166 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3167 l = 4;
3168 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3169 /* 16 bit write access */
3170 val = lduw_p(buf);
3171 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3172 l = 2;
3173 } else {
3174 /* 8 bit write access */
3175 val = ldub_p(buf);
3176 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3177 l = 1;
3179 } else {
3180 unsigned long addr1;
3181 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3182 /* RAM case */
3183 ptr = qemu_get_ram_ptr(addr1);
3184 memcpy(ptr, buf, l);
3185 if (!cpu_physical_memory_is_dirty(addr1)) {
3186 /* invalidate code */
3187 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3188 /* set dirty bit */
3189 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3190 (0xff & ~CODE_DIRTY_FLAG);
3192 /* qemu doesn't execute guest code directly, but kvm does
3193 therefore flush instruction caches */
3194 if (kvm_enabled())
3195 flush_icache_range((unsigned long)ptr,
3196 ((unsigned long)ptr)+l);
3198 } else {
3199 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3200 !(pd & IO_MEM_ROMD)) {
3201 target_phys_addr_t addr1 = addr;
3202 /* I/O case */
3203 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3204 if (p)
3205 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3206 if (l >= 4 && ((addr1 & 3) == 0)) {
3207 /* 32 bit read access */
3208 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3209 stl_p(buf, val);
3210 l = 4;
3211 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3212 /* 16 bit read access */
3213 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3214 stw_p(buf, val);
3215 l = 2;
3216 } else {
3217 /* 8 bit read access */
3218 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3219 stb_p(buf, val);
3220 l = 1;
3222 } else {
3223 /* RAM case */
3224 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3225 (addr & ~TARGET_PAGE_MASK);
3226 memcpy(buf, ptr, l);
3229 len -= l;
3230 buf += l;
3231 addr += l;
3235 /* used for ROM loading : can write in RAM and ROM */
3236 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3237 const uint8_t *buf, int len)
3239 int l;
3240 uint8_t *ptr;
3241 target_phys_addr_t page;
3242 unsigned long pd;
3243 PhysPageDesc *p;
3245 while (len > 0) {
3246 page = addr & TARGET_PAGE_MASK;
3247 l = (page + TARGET_PAGE_SIZE) - addr;
3248 if (l > len)
3249 l = len;
3250 p = phys_page_find(page >> TARGET_PAGE_BITS);
3251 if (!p) {
3252 pd = IO_MEM_UNASSIGNED;
3253 } else {
3254 pd = p->phys_offset;
3257 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3258 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3259 !(pd & IO_MEM_ROMD)) {
3260 /* do nothing */
3261 } else {
3262 unsigned long addr1;
3263 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3264 /* ROM/RAM case */
3265 ptr = qemu_get_ram_ptr(addr1);
3266 memcpy(ptr, buf, l);
3268 len -= l;
3269 buf += l;
3270 addr += l;
3274 typedef struct {
3275 void *buffer;
3276 target_phys_addr_t addr;
3277 target_phys_addr_t len;
3278 } BounceBuffer;
3280 static BounceBuffer bounce;
3282 typedef struct MapClient {
3283 void *opaque;
3284 void (*callback)(void *opaque);
3285 LIST_ENTRY(MapClient) link;
3286 } MapClient;
3288 static LIST_HEAD(map_client_list, MapClient) map_client_list
3289 = LIST_HEAD_INITIALIZER(map_client_list);
3291 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3293 MapClient *client = qemu_malloc(sizeof(*client));
3295 client->opaque = opaque;
3296 client->callback = callback;
3297 LIST_INSERT_HEAD(&map_client_list, client, link);
3298 return client;
3301 void cpu_unregister_map_client(void *_client)
3303 MapClient *client = (MapClient *)_client;
3305 LIST_REMOVE(client, link);
3308 static void cpu_notify_map_clients(void)
3310 MapClient *client;
3312 while (!LIST_EMPTY(&map_client_list)) {
3313 client = LIST_FIRST(&map_client_list);
3314 client->callback(client->opaque);
3315 LIST_REMOVE(client, link);
3319 /* Map a physical memory region into a host virtual address.
3320 * May map a subset of the requested range, given by and returned in *plen.
3321 * May return NULL if resources needed to perform the mapping are exhausted.
3322 * Use only for reads OR writes - not for read-modify-write operations.
3323 * Use cpu_register_map_client() to know when retrying the map operation is
3324 * likely to succeed.
3326 void *cpu_physical_memory_map(target_phys_addr_t addr,
3327 target_phys_addr_t *plen,
3328 int is_write)
3330 target_phys_addr_t len = *plen;
3331 target_phys_addr_t done = 0;
3332 int l;
3333 uint8_t *ret = NULL;
3334 uint8_t *ptr;
3335 target_phys_addr_t page;
3336 unsigned long pd;
3337 PhysPageDesc *p;
3338 unsigned long addr1;
3340 while (len > 0) {
3341 page = addr & TARGET_PAGE_MASK;
3342 l = (page + TARGET_PAGE_SIZE) - addr;
3343 if (l > len)
3344 l = len;
3345 p = phys_page_find(page >> TARGET_PAGE_BITS);
3346 if (!p) {
3347 pd = IO_MEM_UNASSIGNED;
3348 } else {
3349 pd = p->phys_offset;
3352 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3353 if (done || bounce.buffer) {
3354 break;
3356 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3357 bounce.addr = addr;
3358 bounce.len = l;
3359 if (!is_write) {
3360 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3362 ptr = bounce.buffer;
3363 } else {
3364 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3365 ptr = qemu_get_ram_ptr(addr1);
3367 if (!done) {
3368 ret = ptr;
3369 } else if (ret + done != ptr) {
3370 break;
3373 len -= l;
3374 addr += l;
3375 done += l;
3377 *plen = done;
3378 return ret;
3381 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3382 * Will also mark the memory as dirty if is_write == 1. access_len gives
3383 * the amount of memory that was actually read or written by the caller.
3385 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3386 int is_write, target_phys_addr_t access_len)
3388 if (buffer != bounce.buffer) {
3389 if (is_write) {
3390 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3391 while (access_len) {
3392 unsigned l;
3393 l = TARGET_PAGE_SIZE;
3394 if (l > access_len)
3395 l = access_len;
3396 if (!cpu_physical_memory_is_dirty(addr1)) {
3397 /* invalidate code */
3398 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3399 /* set dirty bit */
3400 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3401 (0xff & ~CODE_DIRTY_FLAG);
3403 addr1 += l;
3404 access_len -= l;
3407 return;
3409 if (is_write) {
3410 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3412 qemu_free(bounce.buffer);
3413 bounce.buffer = NULL;
3414 cpu_notify_map_clients();
3417 /* warning: addr must be aligned */
3418 uint32_t ldl_phys(target_phys_addr_t addr)
3420 int io_index;
3421 uint8_t *ptr;
3422 uint32_t val;
3423 unsigned long pd;
3424 PhysPageDesc *p;
3426 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3427 if (!p) {
3428 pd = IO_MEM_UNASSIGNED;
3429 } else {
3430 pd = p->phys_offset;
3433 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3434 !(pd & IO_MEM_ROMD)) {
3435 /* I/O case */
3436 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3437 if (p)
3438 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3439 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3440 } else {
3441 /* RAM case */
3442 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3443 (addr & ~TARGET_PAGE_MASK);
3444 val = ldl_p(ptr);
3446 return val;
3449 /* warning: addr must be aligned */
3450 uint64_t ldq_phys(target_phys_addr_t addr)
3452 int io_index;
3453 uint8_t *ptr;
3454 uint64_t val;
3455 unsigned long pd;
3456 PhysPageDesc *p;
3458 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3459 if (!p) {
3460 pd = IO_MEM_UNASSIGNED;
3461 } else {
3462 pd = p->phys_offset;
3465 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3466 !(pd & IO_MEM_ROMD)) {
3467 /* I/O case */
3468 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3469 if (p)
3470 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3471 #ifdef TARGET_WORDS_BIGENDIAN
3472 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3473 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3474 #else
3475 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3476 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3477 #endif
3478 } else {
3479 /* RAM case */
3480 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3481 (addr & ~TARGET_PAGE_MASK);
3482 val = ldq_p(ptr);
3484 return val;
3487 /* XXX: optimize */
3488 uint32_t ldub_phys(target_phys_addr_t addr)
3490 uint8_t val;
3491 cpu_physical_memory_read(addr, &val, 1);
3492 return val;
3495 /* XXX: optimize */
3496 uint32_t lduw_phys(target_phys_addr_t addr)
3498 uint16_t val;
3499 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3500 return tswap16(val);
3503 /* warning: addr must be aligned. The ram page is not masked as dirty
3504 and the code inside is not invalidated. It is useful if the dirty
3505 bits are used to track modified PTEs */
3506 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3508 int io_index;
3509 uint8_t *ptr;
3510 unsigned long pd;
3511 PhysPageDesc *p;
3513 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3514 if (!p) {
3515 pd = IO_MEM_UNASSIGNED;
3516 } else {
3517 pd = p->phys_offset;
3520 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3521 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3522 if (p)
3523 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3524 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3525 } else {
3526 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3527 ptr = qemu_get_ram_ptr(addr1);
3528 stl_p(ptr, val);
3530 if (unlikely(in_migration)) {
3531 if (!cpu_physical_memory_is_dirty(addr1)) {
3532 /* invalidate code */
3533 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3534 /* set dirty bit */
3535 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3536 (0xff & ~CODE_DIRTY_FLAG);
3542 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3544 int io_index;
3545 uint8_t *ptr;
3546 unsigned long pd;
3547 PhysPageDesc *p;
3549 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3550 if (!p) {
3551 pd = IO_MEM_UNASSIGNED;
3552 } else {
3553 pd = p->phys_offset;
3556 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3557 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3558 if (p)
3559 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3560 #ifdef TARGET_WORDS_BIGENDIAN
3561 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3562 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3563 #else
3564 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3565 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3566 #endif
3567 } else {
3568 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3569 (addr & ~TARGET_PAGE_MASK);
3570 stq_p(ptr, val);
3574 /* warning: addr must be aligned */
3575 void stl_phys(target_phys_addr_t addr, uint32_t val)
3577 int io_index;
3578 uint8_t *ptr;
3579 unsigned long pd;
3580 PhysPageDesc *p;
3582 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3583 if (!p) {
3584 pd = IO_MEM_UNASSIGNED;
3585 } else {
3586 pd = p->phys_offset;
3589 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3590 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3591 if (p)
3592 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3593 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3594 } else {
3595 unsigned long addr1;
3596 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3597 /* RAM case */
3598 ptr = qemu_get_ram_ptr(addr1);
3599 stl_p(ptr, val);
3600 if (!cpu_physical_memory_is_dirty(addr1)) {
3601 /* invalidate code */
3602 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3603 /* set dirty bit */
3604 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3605 (0xff & ~CODE_DIRTY_FLAG);
3610 /* XXX: optimize */
3611 void stb_phys(target_phys_addr_t addr, uint32_t val)
3613 uint8_t v = val;
3614 cpu_physical_memory_write(addr, &v, 1);
3617 /* XXX: optimize */
3618 void stw_phys(target_phys_addr_t addr, uint32_t val)
3620 uint16_t v = tswap16(val);
3621 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3624 /* XXX: optimize */
3625 void stq_phys(target_phys_addr_t addr, uint64_t val)
3627 val = tswap64(val);
3628 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3631 #endif
3633 /* virtual memory access for debug (includes writing to ROM) */
3634 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3635 uint8_t *buf, int len, int is_write)
3637 int l;
3638 target_phys_addr_t phys_addr;
3639 target_ulong page;
3641 while (len > 0) {
3642 page = addr & TARGET_PAGE_MASK;
3643 phys_addr = cpu_get_phys_page_debug(env, page);
3644 /* if no physical page mapped, return an error */
3645 if (phys_addr == -1)
3646 return -1;
3647 l = (page + TARGET_PAGE_SIZE) - addr;
3648 if (l > len)
3649 l = len;
3650 phys_addr += (addr & ~TARGET_PAGE_MASK);
3651 #if !defined(CONFIG_USER_ONLY)
3652 if (is_write)
3653 cpu_physical_memory_write_rom(phys_addr, buf, l);
3654 else
3655 #endif
3656 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3657 len -= l;
3658 buf += l;
3659 addr += l;
3661 return 0;
3664 /* in deterministic execution mode, instructions doing device I/Os
3665 must be at the end of the TB */
3666 void cpu_io_recompile(CPUState *env, void *retaddr)
3668 TranslationBlock *tb;
3669 uint32_t n, cflags;
3670 target_ulong pc, cs_base;
3671 uint64_t flags;
3673 tb = tb_find_pc((unsigned long)retaddr);
3674 if (!tb) {
3675 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3676 retaddr);
3678 n = env->icount_decr.u16.low + tb->icount;
3679 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3680 /* Calculate how many instructions had been executed before the fault
3681 occurred. */
3682 n = n - env->icount_decr.u16.low;
3683 /* Generate a new TB ending on the I/O insn. */
3684 n++;
3685 /* On MIPS and SH, delay slot instructions can only be restarted if
3686 they were already the first instruction in the TB. If this is not
3687 the first instruction in a TB then re-execute the preceding
3688 branch. */
3689 #if defined(TARGET_MIPS)
3690 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3691 env->active_tc.PC -= 4;
3692 env->icount_decr.u16.low++;
3693 env->hflags &= ~MIPS_HFLAG_BMASK;
3695 #elif defined(TARGET_SH4)
3696 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3697 && n > 1) {
3698 env->pc -= 2;
3699 env->icount_decr.u16.low++;
3700 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3702 #endif
3703 /* This should never happen. */
3704 if (n > CF_COUNT_MASK)
3705 cpu_abort(env, "TB too big during recompile");
3707 cflags = n | CF_LAST_IO;
3708 pc = tb->pc;
3709 cs_base = tb->cs_base;
3710 flags = tb->flags;
3711 tb_phys_invalidate(tb, -1);
3712 /* FIXME: In theory this could raise an exception. In practice
3713 we have already translated the block once so it's probably ok. */
3714 tb_gen_code(env, pc, cs_base, flags, cflags);
3715 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3716 the first in the TB) then we end up generating a whole new TB and
3717 repeating the fault, which is horribly inefficient.
3718 Better would be to execute just this insn uncached, or generate a
3719 second new TB. */
3720 cpu_resume_from_signal(env, NULL);
3723 void dump_exec_info(FILE *f,
3724 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3726 int i, target_code_size, max_target_code_size;
3727 int direct_jmp_count, direct_jmp2_count, cross_page;
3728 TranslationBlock *tb;
3730 target_code_size = 0;
3731 max_target_code_size = 0;
3732 cross_page = 0;
3733 direct_jmp_count = 0;
3734 direct_jmp2_count = 0;
3735 for(i = 0; i < nb_tbs; i++) {
3736 tb = &tbs[i];
3737 target_code_size += tb->size;
3738 if (tb->size > max_target_code_size)
3739 max_target_code_size = tb->size;
3740 if (tb->page_addr[1] != -1)
3741 cross_page++;
3742 if (tb->tb_next_offset[0] != 0xffff) {
3743 direct_jmp_count++;
3744 if (tb->tb_next_offset[1] != 0xffff) {
3745 direct_jmp2_count++;
3749 /* XXX: avoid using doubles ? */
3750 cpu_fprintf(f, "Translation buffer state:\n");
3751 cpu_fprintf(f, "gen code size %ld/%ld\n",
3752 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3753 cpu_fprintf(f, "TB count %d/%d\n",
3754 nb_tbs, code_gen_max_blocks);
3755 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3756 nb_tbs ? target_code_size / nb_tbs : 0,
3757 max_target_code_size);
3758 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3759 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3760 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3761 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3762 cross_page,
3763 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3764 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3765 direct_jmp_count,
3766 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3767 direct_jmp2_count,
3768 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3769 cpu_fprintf(f, "\nStatistics:\n");
3770 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3771 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3772 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3773 tcg_dump_info(f, cpu_fprintf);
3776 #if !defined(CONFIG_USER_ONLY)
3778 #define MMUSUFFIX _cmmu
3779 #define GETPC() NULL
3780 #define env cpu_single_env
3781 #define SOFTMMU_CODE_ACCESS
3783 #define SHIFT 0
3784 #include "softmmu_template.h"
3786 #define SHIFT 1
3787 #include "softmmu_template.h"
3789 #define SHIFT 2
3790 #include "softmmu_template.h"
3792 #define SHIFT 3
3793 #include "softmmu_template.h"
3795 #undef env
3797 #endif