Fix kvm-originated warnings
[qemu-kvm/fedora.git] / exec.c
blobafcaf5cca7dc8975966e41b021bc4d600e4a7e7a
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
41 #include "tcg.h"
42 #endif
43 #include "qemu-kvm.h"
45 #include "hw/hw.h"
46 #include "osdep.h"
47 #include "kvm.h"
48 #if defined(CONFIG_USER_ONLY)
49 #include <qemu.h>
50 #endif
52 //#define DEBUG_TB_INVALIDATE
53 //#define DEBUG_FLUSH
54 //#define DEBUG_TLB
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
66 #undef DEBUG_TB_CHECK
67 #endif
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #define MMAP_AREA_START 0x00000000
72 #define MMAP_AREA_END 0xa8000000
74 #if defined(TARGET_SPARC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 41
76 #elif defined(TARGET_SPARC)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #elif defined(TARGET_ALPHA)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #define TARGET_VIRT_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_PPC64)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 42
85 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
86 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 #elif defined(TARGET_IA64)
88 #define TARGET_PHYS_ADDR_SPACE_BITS 36
89 #else
90 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
91 #define TARGET_PHYS_ADDR_SPACE_BITS 32
92 #endif
94 static TranslationBlock *tbs;
95 int code_gen_max_blocks;
96 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
97 static int nb_tbs;
98 /* any access to the tbs or the page table must use this lock */
99 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
101 #if defined(__arm__) || defined(__sparc_v9__)
102 /* The prologue must be reachable with a direct jump. ARM and Sparc64
103 have limited branch ranges (possibly also PPC) so place it in a
104 section close to code segment. */
105 #define code_gen_section \
106 __attribute__((__section__(".gen_code"))) \
107 __attribute__((aligned (32)))
108 #else
109 #define code_gen_section \
110 __attribute__((aligned (32)))
111 #endif
113 uint8_t code_gen_prologue[1024] code_gen_section;
114 static uint8_t *code_gen_buffer;
115 static unsigned long code_gen_buffer_size;
116 /* threshold to flush the translated code buffer */
117 static unsigned long code_gen_buffer_max_size;
118 uint8_t *code_gen_ptr;
120 #if !defined(CONFIG_USER_ONLY)
121 ram_addr_t phys_ram_size;
122 int phys_ram_fd;
123 uint8_t *phys_ram_base;
124 uint8_t *phys_ram_dirty;
125 uint8_t *bios_mem;
126 static int in_migration;
127 static ram_addr_t phys_ram_alloc_offset = 0;
128 #endif
130 CPUState *first_cpu;
131 /* current CPU in the current thread. It is only valid inside
132 cpu_exec() */
133 CPUState *cpu_single_env;
134 /* 0 = Do not count executed instructions.
135 1 = Precise instruction counting.
136 2 = Adaptive rate instruction counting. */
137 int use_icount = 0;
138 /* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
140 int64_t qemu_icount;
142 typedef struct PageDesc {
143 /* list of TBs intersecting this ram page */
144 TranslationBlock *first_tb;
145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count;
148 uint8_t *code_bitmap;
149 #if defined(CONFIG_USER_ONLY)
150 unsigned long flags;
151 #endif
152 } PageDesc;
154 typedef struct PhysPageDesc {
155 /* offset in host memory of the page + io_index in the low bits */
156 ram_addr_t phys_offset;
157 ram_addr_t region_offset;
158 } PhysPageDesc;
160 #define L2_BITS 10
161 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162 /* XXX: this is a temporary hack for alpha target.
163 * In the future, this is to be replaced by a multi-level table
164 * to actually be able to handle the complete 64 bits address space.
166 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167 #else
168 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169 #endif
171 #define L1_SIZE (1 << L1_BITS)
172 #define L2_SIZE (1 << L2_BITS)
174 unsigned long qemu_real_host_page_size;
175 unsigned long qemu_host_page_bits;
176 unsigned long qemu_host_page_size;
177 unsigned long qemu_host_page_mask;
179 /* XXX: for system emulation, it could just be an array */
180 static PageDesc *l1_map[L1_SIZE];
181 static PhysPageDesc **l1_phys_map;
183 #if !defined(CONFIG_USER_ONLY)
184 static void io_mem_init(void);
186 /* io memory support */
187 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
188 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
189 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
190 char io_mem_used[IO_MEM_NB_ENTRIES];
191 static int io_mem_watch;
192 #endif
194 /* log support */
195 static const char *logfilename = "/tmp/qemu.log";
196 FILE *logfile;
197 int loglevel;
198 static int log_append = 0;
200 /* statistics */
201 static int tlb_flush_count;
202 static int tb_flush_count;
203 static int tb_phys_invalidate_count;
205 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
206 typedef struct subpage_t {
207 target_phys_addr_t base;
208 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
209 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
210 void *opaque[TARGET_PAGE_SIZE][2][4];
211 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
212 } subpage_t;
214 #ifdef _WIN32
215 static void map_exec(void *addr, long size)
217 DWORD old_protect;
218 VirtualProtect(addr, size,
219 PAGE_EXECUTE_READWRITE, &old_protect);
222 #else
223 static void map_exec(void *addr, long size)
225 unsigned long start, end, page_size;
227 page_size = getpagesize();
228 start = (unsigned long)addr;
229 start &= ~(page_size - 1);
231 end = (unsigned long)addr + size;
232 end += page_size - 1;
233 end &= ~(page_size - 1);
235 mprotect((void *)start, end - start,
236 PROT_READ | PROT_WRITE | PROT_EXEC);
238 #endif
240 static void page_init(void)
242 /* NOTE: we can always suppose that qemu_host_page_size >=
243 TARGET_PAGE_SIZE */
244 #ifdef _WIN32
246 SYSTEM_INFO system_info;
248 GetSystemInfo(&system_info);
249 qemu_real_host_page_size = system_info.dwPageSize;
251 #else
252 qemu_real_host_page_size = getpagesize();
253 #endif
254 if (qemu_host_page_size == 0)
255 qemu_host_page_size = qemu_real_host_page_size;
256 if (qemu_host_page_size < TARGET_PAGE_SIZE)
257 qemu_host_page_size = TARGET_PAGE_SIZE;
258 qemu_host_page_bits = 0;
259 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
260 qemu_host_page_bits++;
261 qemu_host_page_mask = ~(qemu_host_page_size - 1);
262 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
263 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
265 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
267 long long startaddr, endaddr;
268 FILE *f;
269 int n;
271 mmap_lock();
272 last_brk = (unsigned long)sbrk(0);
273 f = fopen("/proc/self/maps", "r");
274 if (f) {
275 do {
276 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
277 if (n == 2) {
278 startaddr = MIN(startaddr,
279 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280 endaddr = MIN(endaddr,
281 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
282 page_set_flags(startaddr & TARGET_PAGE_MASK,
283 TARGET_PAGE_ALIGN(endaddr),
284 PAGE_RESERVED);
286 } while (!feof(f));
287 fclose(f);
289 mmap_unlock();
291 #endif
294 static inline PageDesc **page_l1_map(target_ulong index)
296 #if TARGET_LONG_BITS > 32
297 /* Host memory outside guest VM. For 32-bit targets we have already
298 excluded high addresses. */
299 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
300 return NULL;
301 #endif
302 return &l1_map[index >> L2_BITS];
305 static inline PageDesc *page_find_alloc(target_ulong index)
307 PageDesc **lp, *p;
308 lp = page_l1_map(index);
309 if (!lp)
310 return NULL;
312 p = *lp;
313 if (!p) {
314 /* allocate if not found */
315 #if defined(CONFIG_USER_ONLY)
316 size_t len = sizeof(PageDesc) * L2_SIZE;
317 /* Don't use qemu_malloc because it may recurse. */
318 p = mmap(0, len, PROT_READ | PROT_WRITE,
319 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
320 *lp = p;
321 if (h2g_valid(p)) {
322 unsigned long addr = h2g(p);
323 page_set_flags(addr & TARGET_PAGE_MASK,
324 TARGET_PAGE_ALIGN(addr + len),
325 PAGE_RESERVED);
327 #else
328 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
329 *lp = p;
330 #endif
332 return p + (index & (L2_SIZE - 1));
335 static inline PageDesc *page_find(target_ulong index)
337 PageDesc **lp, *p;
338 lp = page_l1_map(index);
339 if (!lp)
340 return NULL;
342 p = *lp;
343 if (!p)
344 return 0;
345 return p + (index & (L2_SIZE - 1));
348 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
350 void **lp, **p;
351 PhysPageDesc *pd;
353 p = (void **)l1_phys_map;
354 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
356 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
357 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
358 #endif
359 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
360 p = *lp;
361 if (!p) {
362 /* allocate if not found */
363 if (!alloc)
364 return NULL;
365 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
366 memset(p, 0, sizeof(void *) * L1_SIZE);
367 *lp = p;
369 #endif
370 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
371 pd = *lp;
372 if (!pd) {
373 int i;
374 /* allocate if not found */
375 if (!alloc)
376 return NULL;
377 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
378 *lp = pd;
379 for (i = 0; i < L2_SIZE; i++)
380 pd[i].phys_offset = IO_MEM_UNASSIGNED;
382 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
385 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
387 return phys_page_find_alloc(index, 0);
390 #if !defined(CONFIG_USER_ONLY)
391 static void tlb_protect_code(ram_addr_t ram_addr);
392 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
393 target_ulong vaddr);
394 #define mmap_lock() do { } while(0)
395 #define mmap_unlock() do { } while(0)
396 #endif
398 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
400 #if defined(CONFIG_USER_ONLY)
401 /* Currently it is not recommanded to allocate big chunks of data in
402 user mode. It will change when a dedicated libc will be used */
403 #define USE_STATIC_CODE_GEN_BUFFER
404 #endif
406 #ifdef USE_STATIC_CODE_GEN_BUFFER
407 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
408 #endif
410 static void code_gen_alloc(unsigned long tb_size)
412 if (kvm_enabled())
413 return;
415 #ifdef USE_STATIC_CODE_GEN_BUFFER
416 code_gen_buffer = static_code_gen_buffer;
417 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
418 map_exec(code_gen_buffer, code_gen_buffer_size);
419 #else
420 code_gen_buffer_size = tb_size;
421 if (code_gen_buffer_size == 0) {
422 #if defined(CONFIG_USER_ONLY)
423 /* in user mode, phys_ram_size is not meaningful */
424 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
425 #else
426 /* XXX: needs ajustments */
427 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
428 #endif
430 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
431 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
432 /* The code gen buffer location may have constraints depending on
433 the host cpu and OS */
434 #if defined(__linux__)
436 int flags;
437 void *start = NULL;
439 flags = MAP_PRIVATE | MAP_ANONYMOUS;
440 #if defined(__x86_64__)
441 flags |= MAP_32BIT;
442 /* Cannot map more than that */
443 if (code_gen_buffer_size > (800 * 1024 * 1024))
444 code_gen_buffer_size = (800 * 1024 * 1024);
445 #elif defined(__sparc_v9__)
446 // Map the buffer below 2G, so we can use direct calls and branches
447 flags |= MAP_FIXED;
448 start = (void *) 0x60000000UL;
449 if (code_gen_buffer_size > (512 * 1024 * 1024))
450 code_gen_buffer_size = (512 * 1024 * 1024);
451 #elif defined(__arm__)
452 /* Map the buffer below 32M, so we can use direct calls and branches */
453 flags |= MAP_FIXED;
454 start = (void *) 0x01000000UL;
455 if (code_gen_buffer_size > 16 * 1024 * 1024)
456 code_gen_buffer_size = 16 * 1024 * 1024;
457 #endif
458 code_gen_buffer = mmap(start, code_gen_buffer_size,
459 PROT_WRITE | PROT_READ | PROT_EXEC,
460 flags, -1, 0);
461 if (code_gen_buffer == MAP_FAILED) {
462 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
463 exit(1);
466 #elif defined(__FreeBSD__)
468 int flags;
469 void *addr = NULL;
470 flags = MAP_PRIVATE | MAP_ANONYMOUS;
471 #if defined(__x86_64__)
472 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
473 * 0x40000000 is free */
474 flags |= MAP_FIXED;
475 addr = (void *)0x40000000;
476 /* Cannot map more than that */
477 if (code_gen_buffer_size > (800 * 1024 * 1024))
478 code_gen_buffer_size = (800 * 1024 * 1024);
479 #endif
480 code_gen_buffer = mmap(addr, code_gen_buffer_size,
481 PROT_WRITE | PROT_READ | PROT_EXEC,
482 flags, -1, 0);
483 if (code_gen_buffer == MAP_FAILED) {
484 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
485 exit(1);
488 #else
489 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
490 map_exec(code_gen_buffer, code_gen_buffer_size);
491 #endif
492 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
493 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
494 code_gen_buffer_max_size = code_gen_buffer_size -
495 code_gen_max_block_size();
496 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
497 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
500 /* Must be called before using the QEMU cpus. 'tb_size' is the size
501 (in bytes) allocated to the translation buffer. Zero means default
502 size. */
503 void cpu_exec_init_all(unsigned long tb_size)
505 cpu_gen_init();
506 code_gen_alloc(tb_size);
507 code_gen_ptr = code_gen_buffer;
508 page_init();
509 #if !defined(CONFIG_USER_ONLY)
510 io_mem_init();
511 #endif
514 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
516 #define CPU_COMMON_SAVE_VERSION 1
518 static void cpu_common_save(QEMUFile *f, void *opaque)
520 CPUState *env = opaque;
522 qemu_put_be32s(f, &env->halted);
523 qemu_put_be32s(f, &env->interrupt_request);
526 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
528 CPUState *env = opaque;
530 if (version_id != CPU_COMMON_SAVE_VERSION)
531 return -EINVAL;
533 qemu_get_be32s(f, &env->halted);
534 qemu_get_be32s(f, &env->interrupt_request);
535 tlb_flush(env, 1);
537 return 0;
539 #endif
541 void cpu_exec_init(CPUState *env)
543 CPUState **penv;
544 int cpu_index;
546 env->next_cpu = NULL;
547 penv = &first_cpu;
548 cpu_index = 0;
549 while (*penv != NULL) {
550 penv = (CPUState **)&(*penv)->next_cpu;
551 cpu_index++;
553 env->cpu_index = cpu_index;
554 TAILQ_INIT(&env->breakpoints);
555 TAILQ_INIT(&env->watchpoints);
556 #ifdef __WIN32
557 env->thread_id = GetCurrentProcessId();
558 #else
559 env->thread_id = getpid();
560 #endif
561 *penv = env;
562 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
563 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
564 cpu_common_save, cpu_common_load, env);
565 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
566 cpu_save, cpu_load, env);
567 #endif
570 static inline void invalidate_page_bitmap(PageDesc *p)
572 if (p->code_bitmap) {
573 qemu_free(p->code_bitmap);
574 p->code_bitmap = NULL;
576 p->code_write_count = 0;
579 /* set to NULL all the 'first_tb' fields in all PageDescs */
580 static void page_flush_tb(void)
582 int i, j;
583 PageDesc *p;
585 for(i = 0; i < L1_SIZE; i++) {
586 p = l1_map[i];
587 if (p) {
588 for(j = 0; j < L2_SIZE; j++) {
589 p->first_tb = NULL;
590 invalidate_page_bitmap(p);
591 p++;
597 /* flush all the translation blocks */
598 /* XXX: tb_flush is currently not thread safe */
599 void tb_flush(CPUState *env1)
601 CPUState *env;
602 #if defined(DEBUG_FLUSH)
603 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
604 (unsigned long)(code_gen_ptr - code_gen_buffer),
605 nb_tbs, nb_tbs > 0 ?
606 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
607 #endif
608 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
609 cpu_abort(env1, "Internal error: code buffer overflow\n");
611 nb_tbs = 0;
613 for(env = first_cpu; env != NULL; env = env->next_cpu) {
614 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
617 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
618 page_flush_tb();
620 code_gen_ptr = code_gen_buffer;
621 /* XXX: flush processor icache at this point if cache flush is
622 expensive */
623 tb_flush_count++;
626 #ifdef DEBUG_TB_CHECK
628 static void tb_invalidate_check(target_ulong address)
630 TranslationBlock *tb;
631 int i;
632 address &= TARGET_PAGE_MASK;
633 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
634 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
635 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
636 address >= tb->pc + tb->size)) {
637 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
638 address, (long)tb->pc, tb->size);
644 /* verify that all the pages have correct rights for code */
645 static void tb_page_check(void)
647 TranslationBlock *tb;
648 int i, flags1, flags2;
650 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
651 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
652 flags1 = page_get_flags(tb->pc);
653 flags2 = page_get_flags(tb->pc + tb->size - 1);
654 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
655 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
656 (long)tb->pc, tb->size, flags1, flags2);
662 static void tb_jmp_check(TranslationBlock *tb)
664 TranslationBlock *tb1;
665 unsigned int n1;
667 /* suppress any remaining jumps to this TB */
668 tb1 = tb->jmp_first;
669 for(;;) {
670 n1 = (long)tb1 & 3;
671 tb1 = (TranslationBlock *)((long)tb1 & ~3);
672 if (n1 == 2)
673 break;
674 tb1 = tb1->jmp_next[n1];
676 /* check end of list */
677 if (tb1 != tb) {
678 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
682 #endif
684 /* invalidate one TB */
685 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
686 int next_offset)
688 TranslationBlock *tb1;
689 for(;;) {
690 tb1 = *ptb;
691 if (tb1 == tb) {
692 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
693 break;
695 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
699 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
701 TranslationBlock *tb1;
702 unsigned int n1;
704 for(;;) {
705 tb1 = *ptb;
706 n1 = (long)tb1 & 3;
707 tb1 = (TranslationBlock *)((long)tb1 & ~3);
708 if (tb1 == tb) {
709 *ptb = tb1->page_next[n1];
710 break;
712 ptb = &tb1->page_next[n1];
716 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
718 TranslationBlock *tb1, **ptb;
719 unsigned int n1;
721 ptb = &tb->jmp_next[n];
722 tb1 = *ptb;
723 if (tb1) {
724 /* find tb(n) in circular list */
725 for(;;) {
726 tb1 = *ptb;
727 n1 = (long)tb1 & 3;
728 tb1 = (TranslationBlock *)((long)tb1 & ~3);
729 if (n1 == n && tb1 == tb)
730 break;
731 if (n1 == 2) {
732 ptb = &tb1->jmp_first;
733 } else {
734 ptb = &tb1->jmp_next[n1];
737 /* now we can suppress tb(n) from the list */
738 *ptb = tb->jmp_next[n];
740 tb->jmp_next[n] = NULL;
744 /* reset the jump entry 'n' of a TB so that it is not chained to
745 another TB */
746 static inline void tb_reset_jump(TranslationBlock *tb, int n)
748 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
751 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
753 CPUState *env;
754 PageDesc *p;
755 unsigned int h, n1;
756 target_phys_addr_t phys_pc;
757 TranslationBlock *tb1, *tb2;
759 /* remove the TB from the hash list */
760 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
761 h = tb_phys_hash_func(phys_pc);
762 tb_remove(&tb_phys_hash[h], tb,
763 offsetof(TranslationBlock, phys_hash_next));
765 /* remove the TB from the page list */
766 if (tb->page_addr[0] != page_addr) {
767 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
768 tb_page_remove(&p->first_tb, tb);
769 invalidate_page_bitmap(p);
771 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
772 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
773 tb_page_remove(&p->first_tb, tb);
774 invalidate_page_bitmap(p);
777 tb_invalidated_flag = 1;
779 /* remove the TB from the hash list */
780 h = tb_jmp_cache_hash_func(tb->pc);
781 for(env = first_cpu; env != NULL; env = env->next_cpu) {
782 if (env->tb_jmp_cache[h] == tb)
783 env->tb_jmp_cache[h] = NULL;
786 /* suppress this TB from the two jump lists */
787 tb_jmp_remove(tb, 0);
788 tb_jmp_remove(tb, 1);
790 /* suppress any remaining jumps to this TB */
791 tb1 = tb->jmp_first;
792 for(;;) {
793 n1 = (long)tb1 & 3;
794 if (n1 == 2)
795 break;
796 tb1 = (TranslationBlock *)((long)tb1 & ~3);
797 tb2 = tb1->jmp_next[n1];
798 tb_reset_jump(tb1, n1);
799 tb1->jmp_next[n1] = NULL;
800 tb1 = tb2;
802 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
804 tb_phys_invalidate_count++;
807 static inline void set_bits(uint8_t *tab, int start, int len)
809 int end, mask, end1;
811 end = start + len;
812 tab += start >> 3;
813 mask = 0xff << (start & 7);
814 if ((start & ~7) == (end & ~7)) {
815 if (start < end) {
816 mask &= ~(0xff << (end & 7));
817 *tab |= mask;
819 } else {
820 *tab++ |= mask;
821 start = (start + 8) & ~7;
822 end1 = end & ~7;
823 while (start < end1) {
824 *tab++ = 0xff;
825 start += 8;
827 if (start < end) {
828 mask = ~(0xff << (end & 7));
829 *tab |= mask;
834 static void build_page_bitmap(PageDesc *p)
836 int n, tb_start, tb_end;
837 TranslationBlock *tb;
839 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
841 tb = p->first_tb;
842 while (tb != NULL) {
843 n = (long)tb & 3;
844 tb = (TranslationBlock *)((long)tb & ~3);
845 /* NOTE: this is subtle as a TB may span two physical pages */
846 if (n == 0) {
847 /* NOTE: tb_end may be after the end of the page, but
848 it is not a problem */
849 tb_start = tb->pc & ~TARGET_PAGE_MASK;
850 tb_end = tb_start + tb->size;
851 if (tb_end > TARGET_PAGE_SIZE)
852 tb_end = TARGET_PAGE_SIZE;
853 } else {
854 tb_start = 0;
855 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
857 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
858 tb = tb->page_next[n];
862 TranslationBlock *tb_gen_code(CPUState *env,
863 target_ulong pc, target_ulong cs_base,
864 int flags, int cflags)
866 TranslationBlock *tb;
867 uint8_t *tc_ptr;
868 target_ulong phys_pc, phys_page2, virt_page2;
869 int code_gen_size;
871 phys_pc = get_phys_addr_code(env, pc);
872 tb = tb_alloc(pc);
873 if (!tb) {
874 /* flush must be done */
875 tb_flush(env);
876 /* cannot fail at this point */
877 tb = tb_alloc(pc);
878 /* Don't forget to invalidate previous TB info. */
879 tb_invalidated_flag = 1;
881 tc_ptr = code_gen_ptr;
882 tb->tc_ptr = tc_ptr;
883 tb->cs_base = cs_base;
884 tb->flags = flags;
885 tb->cflags = cflags;
886 cpu_gen_code(env, tb, &code_gen_size);
887 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
889 /* check next page if needed */
890 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
891 phys_page2 = -1;
892 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
893 phys_page2 = get_phys_addr_code(env, virt_page2);
895 tb_link_phys(tb, phys_pc, phys_page2);
896 return tb;
899 /* invalidate all TBs which intersect with the target physical page
900 starting in range [start;end[. NOTE: start and end must refer to
901 the same physical page. 'is_cpu_write_access' should be true if called
902 from a real cpu write access: the virtual CPU will exit the current
903 TB if code is modified inside this TB. */
904 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
905 int is_cpu_write_access)
907 TranslationBlock *tb, *tb_next, *saved_tb;
908 CPUState *env = cpu_single_env;
909 target_ulong tb_start, tb_end;
910 PageDesc *p;
911 int n;
912 #ifdef TARGET_HAS_PRECISE_SMC
913 int current_tb_not_found = is_cpu_write_access;
914 TranslationBlock *current_tb = NULL;
915 int current_tb_modified = 0;
916 target_ulong current_pc = 0;
917 target_ulong current_cs_base = 0;
918 int current_flags = 0;
919 #endif /* TARGET_HAS_PRECISE_SMC */
921 p = page_find(start >> TARGET_PAGE_BITS);
922 if (!p)
923 return;
924 if (!p->code_bitmap &&
925 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
926 is_cpu_write_access) {
927 /* build code bitmap */
928 build_page_bitmap(p);
931 /* we remove all the TBs in the range [start, end[ */
932 /* XXX: see if in some cases it could be faster to invalidate all the code */
933 tb = p->first_tb;
934 while (tb != NULL) {
935 n = (long)tb & 3;
936 tb = (TranslationBlock *)((long)tb & ~3);
937 tb_next = tb->page_next[n];
938 /* NOTE: this is subtle as a TB may span two physical pages */
939 if (n == 0) {
940 /* NOTE: tb_end may be after the end of the page, but
941 it is not a problem */
942 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
943 tb_end = tb_start + tb->size;
944 } else {
945 tb_start = tb->page_addr[1];
946 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
948 if (!(tb_end <= start || tb_start >= end)) {
949 #ifdef TARGET_HAS_PRECISE_SMC
950 if (current_tb_not_found) {
951 current_tb_not_found = 0;
952 current_tb = NULL;
953 if (env->mem_io_pc) {
954 /* now we have a real cpu fault */
955 current_tb = tb_find_pc(env->mem_io_pc);
958 if (current_tb == tb &&
959 (current_tb->cflags & CF_COUNT_MASK) != 1) {
960 /* If we are modifying the current TB, we must stop
961 its execution. We could be more precise by checking
962 that the modification is after the current PC, but it
963 would require a specialized function to partially
964 restore the CPU state */
966 current_tb_modified = 1;
967 cpu_restore_state(current_tb, env,
968 env->mem_io_pc, NULL);
969 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
970 &current_flags);
972 #endif /* TARGET_HAS_PRECISE_SMC */
973 /* we need to do that to handle the case where a signal
974 occurs while doing tb_phys_invalidate() */
975 saved_tb = NULL;
976 if (env) {
977 saved_tb = env->current_tb;
978 env->current_tb = NULL;
980 tb_phys_invalidate(tb, -1);
981 if (env) {
982 env->current_tb = saved_tb;
983 if (env->interrupt_request && env->current_tb)
984 cpu_interrupt(env, env->interrupt_request);
987 tb = tb_next;
989 #if !defined(CONFIG_USER_ONLY)
990 /* if no code remaining, no need to continue to use slow writes */
991 if (!p->first_tb) {
992 invalidate_page_bitmap(p);
993 if (is_cpu_write_access) {
994 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
997 #endif
998 #ifdef TARGET_HAS_PRECISE_SMC
999 if (current_tb_modified) {
1000 /* we generate a block containing just the instruction
1001 modifying the memory. It will ensure that it cannot modify
1002 itself */
1003 env->current_tb = NULL;
1004 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1005 cpu_resume_from_signal(env, NULL);
1007 #endif
1010 /* len must be <= 8 and start must be a multiple of len */
1011 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1013 PageDesc *p;
1014 int offset, b;
1015 #if 0
1016 if (1) {
1017 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1018 cpu_single_env->mem_io_vaddr, len,
1019 cpu_single_env->eip,
1020 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1022 #endif
1023 p = page_find(start >> TARGET_PAGE_BITS);
1024 if (!p)
1025 return;
1026 if (p->code_bitmap) {
1027 offset = start & ~TARGET_PAGE_MASK;
1028 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1029 if (b & ((1 << len) - 1))
1030 goto do_invalidate;
1031 } else {
1032 do_invalidate:
1033 tb_invalidate_phys_page_range(start, start + len, 1);
1037 #if !defined(CONFIG_SOFTMMU)
1038 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1039 unsigned long pc, void *puc)
1041 TranslationBlock *tb;
1042 PageDesc *p;
1043 int n;
1044 #ifdef TARGET_HAS_PRECISE_SMC
1045 TranslationBlock *current_tb = NULL;
1046 CPUState *env = cpu_single_env;
1047 int current_tb_modified = 0;
1048 target_ulong current_pc = 0;
1049 target_ulong current_cs_base = 0;
1050 int current_flags = 0;
1051 #endif
1053 addr &= TARGET_PAGE_MASK;
1054 p = page_find(addr >> TARGET_PAGE_BITS);
1055 if (!p)
1056 return;
1057 tb = p->first_tb;
1058 #ifdef TARGET_HAS_PRECISE_SMC
1059 if (tb && pc != 0) {
1060 current_tb = tb_find_pc(pc);
1062 #endif
1063 while (tb != NULL) {
1064 n = (long)tb & 3;
1065 tb = (TranslationBlock *)((long)tb & ~3);
1066 #ifdef TARGET_HAS_PRECISE_SMC
1067 if (current_tb == tb &&
1068 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1069 /* If we are modifying the current TB, we must stop
1070 its execution. We could be more precise by checking
1071 that the modification is after the current PC, but it
1072 would require a specialized function to partially
1073 restore the CPU state */
1075 current_tb_modified = 1;
1076 cpu_restore_state(current_tb, env, pc, puc);
1077 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1078 &current_flags);
1080 #endif /* TARGET_HAS_PRECISE_SMC */
1081 tb_phys_invalidate(tb, addr);
1082 tb = tb->page_next[n];
1084 p->first_tb = NULL;
1085 #ifdef TARGET_HAS_PRECISE_SMC
1086 if (current_tb_modified) {
1087 /* we generate a block containing just the instruction
1088 modifying the memory. It will ensure that it cannot modify
1089 itself */
1090 env->current_tb = NULL;
1091 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1092 cpu_resume_from_signal(env, puc);
1094 #endif
1096 #endif
1098 /* add the tb in the target page and protect it if necessary */
1099 static inline void tb_alloc_page(TranslationBlock *tb,
1100 unsigned int n, target_ulong page_addr)
1102 PageDesc *p;
1103 TranslationBlock *last_first_tb;
1105 tb->page_addr[n] = page_addr;
1106 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1107 tb->page_next[n] = p->first_tb;
1108 last_first_tb = p->first_tb;
1109 p->first_tb = (TranslationBlock *)((long)tb | n);
1110 invalidate_page_bitmap(p);
1112 #if defined(TARGET_HAS_SMC) || 1
1114 #if defined(CONFIG_USER_ONLY)
1115 if (p->flags & PAGE_WRITE) {
1116 target_ulong addr;
1117 PageDesc *p2;
1118 int prot;
1120 /* force the host page as non writable (writes will have a
1121 page fault + mprotect overhead) */
1122 page_addr &= qemu_host_page_mask;
1123 prot = 0;
1124 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1125 addr += TARGET_PAGE_SIZE) {
1127 p2 = page_find (addr >> TARGET_PAGE_BITS);
1128 if (!p2)
1129 continue;
1130 prot |= p2->flags;
1131 p2->flags &= ~PAGE_WRITE;
1132 page_get_flags(addr);
1134 mprotect(g2h(page_addr), qemu_host_page_size,
1135 (prot & PAGE_BITS) & ~PAGE_WRITE);
1136 #ifdef DEBUG_TB_INVALIDATE
1137 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1138 page_addr);
1139 #endif
1141 #else
1142 /* if some code is already present, then the pages are already
1143 protected. So we handle the case where only the first TB is
1144 allocated in a physical page */
1145 if (!last_first_tb) {
1146 tlb_protect_code(page_addr);
1148 #endif
1150 #endif /* TARGET_HAS_SMC */
1153 /* Allocate a new translation block. Flush the translation buffer if
1154 too many translation blocks or too much generated code. */
1155 TranslationBlock *tb_alloc(target_ulong pc)
1157 TranslationBlock *tb;
1159 if (nb_tbs >= code_gen_max_blocks ||
1160 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1161 return NULL;
1162 tb = &tbs[nb_tbs++];
1163 tb->pc = pc;
1164 tb->cflags = 0;
1165 return tb;
1168 void tb_free(TranslationBlock *tb)
1170 /* In practice this is mostly used for single use temporary TB
1171 Ignore the hard cases and just back up if this TB happens to
1172 be the last one generated. */
1173 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1174 code_gen_ptr = tb->tc_ptr;
1175 nb_tbs--;
1179 /* add a new TB and link it to the physical page tables. phys_page2 is
1180 (-1) to indicate that only one page contains the TB. */
1181 void tb_link_phys(TranslationBlock *tb,
1182 target_ulong phys_pc, target_ulong phys_page2)
1184 unsigned int h;
1185 TranslationBlock **ptb;
1187 /* Grab the mmap lock to stop another thread invalidating this TB
1188 before we are done. */
1189 mmap_lock();
1190 /* add in the physical hash table */
1191 h = tb_phys_hash_func(phys_pc);
1192 ptb = &tb_phys_hash[h];
1193 tb->phys_hash_next = *ptb;
1194 *ptb = tb;
1196 /* add in the page list */
1197 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1198 if (phys_page2 != -1)
1199 tb_alloc_page(tb, 1, phys_page2);
1200 else
1201 tb->page_addr[1] = -1;
1203 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1204 tb->jmp_next[0] = NULL;
1205 tb->jmp_next[1] = NULL;
1207 /* init original jump addresses */
1208 if (tb->tb_next_offset[0] != 0xffff)
1209 tb_reset_jump(tb, 0);
1210 if (tb->tb_next_offset[1] != 0xffff)
1211 tb_reset_jump(tb, 1);
1213 #ifdef DEBUG_TB_CHECK
1214 tb_page_check();
1215 #endif
1216 mmap_unlock();
1219 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1220 tb[1].tc_ptr. Return NULL if not found */
1221 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1223 int m_min, m_max, m;
1224 unsigned long v;
1225 TranslationBlock *tb;
1227 if (nb_tbs <= 0)
1228 return NULL;
1229 if (tc_ptr < (unsigned long)code_gen_buffer ||
1230 tc_ptr >= (unsigned long)code_gen_ptr)
1231 return NULL;
1232 /* binary search (cf Knuth) */
1233 m_min = 0;
1234 m_max = nb_tbs - 1;
1235 while (m_min <= m_max) {
1236 m = (m_min + m_max) >> 1;
1237 tb = &tbs[m];
1238 v = (unsigned long)tb->tc_ptr;
1239 if (v == tc_ptr)
1240 return tb;
1241 else if (tc_ptr < v) {
1242 m_max = m - 1;
1243 } else {
1244 m_min = m + 1;
1247 return &tbs[m_max];
1250 static void tb_reset_jump_recursive(TranslationBlock *tb);
1252 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1254 TranslationBlock *tb1, *tb_next, **ptb;
1255 unsigned int n1;
1257 tb1 = tb->jmp_next[n];
1258 if (tb1 != NULL) {
1259 /* find head of list */
1260 for(;;) {
1261 n1 = (long)tb1 & 3;
1262 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1263 if (n1 == 2)
1264 break;
1265 tb1 = tb1->jmp_next[n1];
1267 /* we are now sure now that tb jumps to tb1 */
1268 tb_next = tb1;
1270 /* remove tb from the jmp_first list */
1271 ptb = &tb_next->jmp_first;
1272 for(;;) {
1273 tb1 = *ptb;
1274 n1 = (long)tb1 & 3;
1275 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1276 if (n1 == n && tb1 == tb)
1277 break;
1278 ptb = &tb1->jmp_next[n1];
1280 *ptb = tb->jmp_next[n];
1281 tb->jmp_next[n] = NULL;
1283 /* suppress the jump to next tb in generated code */
1284 tb_reset_jump(tb, n);
1286 /* suppress jumps in the tb on which we could have jumped */
1287 tb_reset_jump_recursive(tb_next);
1291 static void tb_reset_jump_recursive(TranslationBlock *tb)
1293 tb_reset_jump_recursive2(tb, 0);
1294 tb_reset_jump_recursive2(tb, 1);
1297 #if defined(TARGET_HAS_ICE)
1298 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1300 target_phys_addr_t addr;
1301 target_ulong pd;
1302 ram_addr_t ram_addr;
1303 PhysPageDesc *p;
1305 addr = cpu_get_phys_page_debug(env, pc);
1306 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1307 if (!p) {
1308 pd = IO_MEM_UNASSIGNED;
1309 } else {
1310 pd = p->phys_offset;
1312 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1313 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1315 #endif
1317 /* Add a watchpoint. */
1318 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1319 int flags, CPUWatchpoint **watchpoint)
1321 target_ulong len_mask = ~(len - 1);
1322 CPUWatchpoint *wp;
1324 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1325 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1326 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1327 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1328 return -EINVAL;
1330 wp = qemu_malloc(sizeof(*wp));
1332 wp->vaddr = addr;
1333 wp->len_mask = len_mask;
1334 wp->flags = flags;
1336 /* keep all GDB-injected watchpoints in front */
1337 if (flags & BP_GDB)
1338 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1339 else
1340 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1342 tlb_flush_page(env, addr);
1344 if (watchpoint)
1345 *watchpoint = wp;
1346 return 0;
1349 /* Remove a specific watchpoint. */
1350 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1351 int flags)
1353 target_ulong len_mask = ~(len - 1);
1354 CPUWatchpoint *wp;
1356 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1357 if (addr == wp->vaddr && len_mask == wp->len_mask
1358 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1359 cpu_watchpoint_remove_by_ref(env, wp);
1360 return 0;
1363 return -ENOENT;
1366 /* Remove a specific watchpoint by reference. */
1367 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1369 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1371 tlb_flush_page(env, watchpoint->vaddr);
1373 qemu_free(watchpoint);
1376 /* Remove all matching watchpoints. */
1377 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1379 CPUWatchpoint *wp, *next;
1381 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1382 if (wp->flags & mask)
1383 cpu_watchpoint_remove_by_ref(env, wp);
1387 /* Add a breakpoint. */
1388 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1389 CPUBreakpoint **breakpoint)
1391 #if defined(TARGET_HAS_ICE)
1392 CPUBreakpoint *bp;
1394 bp = qemu_malloc(sizeof(*bp));
1396 bp->pc = pc;
1397 bp->flags = flags;
1399 /* keep all GDB-injected breakpoints in front */
1400 if (flags & BP_GDB)
1401 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1402 else
1403 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1405 breakpoint_invalidate(env, pc);
1407 if (breakpoint)
1408 *breakpoint = bp;
1409 return 0;
1410 #else
1411 return -ENOSYS;
1412 #endif
1415 /* Remove a specific breakpoint. */
1416 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1418 #if defined(TARGET_HAS_ICE)
1419 CPUBreakpoint *bp;
1421 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1422 if (bp->pc == pc && bp->flags == flags) {
1423 cpu_breakpoint_remove_by_ref(env, bp);
1424 return 0;
1427 return -ENOENT;
1428 #else
1429 return -ENOSYS;
1430 #endif
1433 /* Remove a specific breakpoint by reference. */
1434 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1436 #if defined(TARGET_HAS_ICE)
1437 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1439 breakpoint_invalidate(env, breakpoint->pc);
1441 qemu_free(breakpoint);
1442 #endif
1445 /* Remove all matching breakpoints. */
1446 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1448 #if defined(TARGET_HAS_ICE)
1449 CPUBreakpoint *bp, *next;
1451 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1452 if (bp->flags & mask)
1453 cpu_breakpoint_remove_by_ref(env, bp);
1455 #endif
1458 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1459 CPU loop after each instruction */
1460 void cpu_single_step(CPUState *env, int enabled)
1462 #if defined(TARGET_HAS_ICE)
1463 if (env->singlestep_enabled != enabled) {
1464 env->singlestep_enabled = enabled;
1465 if (kvm_enabled())
1466 kvm_update_guest_debug(env, 0);
1467 else {
1468 /* must flush all the translated code to avoid inconsistancies */
1469 /* XXX: only flush what is necessary */
1470 tb_flush(env);
1473 #endif
1476 /* enable or disable low levels log */
1477 void cpu_set_log(int log_flags)
1479 loglevel = log_flags;
1480 if (loglevel && !logfile) {
1481 logfile = fopen(logfilename, log_append ? "a" : "w");
1482 if (!logfile) {
1483 perror(logfilename);
1484 _exit(1);
1486 #if !defined(CONFIG_SOFTMMU)
1487 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1489 static char logfile_buf[4096];
1490 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1492 #else
1493 setvbuf(logfile, NULL, _IOLBF, 0);
1494 #endif
1495 log_append = 1;
1497 if (!loglevel && logfile) {
1498 fclose(logfile);
1499 logfile = NULL;
1503 void cpu_set_log_filename(const char *filename)
1505 logfilename = strdup(filename);
1506 if (logfile) {
1507 fclose(logfile);
1508 logfile = NULL;
1510 cpu_set_log(loglevel);
1513 /* mask must never be zero, except for A20 change call */
1514 void cpu_interrupt(CPUState *env, int mask)
1516 #if !defined(USE_NPTL)
1517 TranslationBlock *tb;
1518 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1519 #endif
1520 int old_mask;
1522 old_mask = env->interrupt_request;
1523 /* FIXME: This is probably not threadsafe. A different thread could
1524 be in the middle of a read-modify-write operation. */
1525 env->interrupt_request |= mask;
1526 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1527 kvm_update_interrupt_request(env);
1528 #if defined(USE_NPTL)
1529 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1530 problem and hope the cpu will stop of its own accord. For userspace
1531 emulation this often isn't actually as bad as it sounds. Often
1532 signals are used primarily to interrupt blocking syscalls. */
1533 #else
1534 if (use_icount) {
1535 env->icount_decr.u16.high = 0xffff;
1536 #ifndef CONFIG_USER_ONLY
1537 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1538 an async event happened and we need to process it. */
1539 if (!can_do_io(env)
1540 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1541 cpu_abort(env, "Raised interrupt while not in I/O function");
1543 #endif
1544 } else {
1545 tb = env->current_tb;
1546 /* if the cpu is currently executing code, we must unlink it and
1547 all the potentially executing TB */
1548 if (tb && !testandset(&interrupt_lock)) {
1549 env->current_tb = NULL;
1550 tb_reset_jump_recursive(tb);
1551 resetlock(&interrupt_lock);
1554 #endif
1557 void cpu_reset_interrupt(CPUState *env, int mask)
1559 env->interrupt_request &= ~mask;
1562 const CPULogItem cpu_log_items[] = {
1563 { CPU_LOG_TB_OUT_ASM, "out_asm",
1564 "show generated host assembly code for each compiled TB" },
1565 { CPU_LOG_TB_IN_ASM, "in_asm",
1566 "show target assembly code for each compiled TB" },
1567 { CPU_LOG_TB_OP, "op",
1568 "show micro ops for each compiled TB" },
1569 { CPU_LOG_TB_OP_OPT, "op_opt",
1570 "show micro ops "
1571 #ifdef TARGET_I386
1572 "before eflags optimization and "
1573 #endif
1574 "after liveness analysis" },
1575 { CPU_LOG_INT, "int",
1576 "show interrupts/exceptions in short format" },
1577 { CPU_LOG_EXEC, "exec",
1578 "show trace before each executed TB (lots of logs)" },
1579 { CPU_LOG_TB_CPU, "cpu",
1580 "show CPU state before block translation" },
1581 #ifdef TARGET_I386
1582 { CPU_LOG_PCALL, "pcall",
1583 "show protected mode far calls/returns/exceptions" },
1584 { CPU_LOG_RESET, "cpu_reset",
1585 "show CPU state before CPU resets" },
1586 #endif
1587 #ifdef DEBUG_IOPORT
1588 { CPU_LOG_IOPORT, "ioport",
1589 "show all i/o ports accesses" },
1590 #endif
1591 { 0, NULL, NULL },
1594 static int cmp1(const char *s1, int n, const char *s2)
1596 if (strlen(s2) != n)
1597 return 0;
1598 return memcmp(s1, s2, n) == 0;
1601 /* takes a comma separated list of log masks. Return 0 if error. */
1602 int cpu_str_to_log_mask(const char *str)
1604 const CPULogItem *item;
1605 int mask;
1606 const char *p, *p1;
1608 p = str;
1609 mask = 0;
1610 for(;;) {
1611 p1 = strchr(p, ',');
1612 if (!p1)
1613 p1 = p + strlen(p);
1614 if(cmp1(p,p1-p,"all")) {
1615 for(item = cpu_log_items; item->mask != 0; item++) {
1616 mask |= item->mask;
1618 } else {
1619 for(item = cpu_log_items; item->mask != 0; item++) {
1620 if (cmp1(p, p1 - p, item->name))
1621 goto found;
1623 return 0;
1625 found:
1626 mask |= item->mask;
1627 if (*p1 != ',')
1628 break;
1629 p = p1 + 1;
1631 return mask;
1634 void cpu_abort(CPUState *env, const char *fmt, ...)
1636 va_list ap;
1637 va_list ap2;
1639 va_start(ap, fmt);
1640 va_copy(ap2, ap);
1641 fprintf(stderr, "qemu: fatal: ");
1642 vfprintf(stderr, fmt, ap);
1643 fprintf(stderr, "\n");
1644 #ifdef TARGET_I386
1645 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1646 #else
1647 cpu_dump_state(env, stderr, fprintf, 0);
1648 #endif
1649 if (qemu_log_enabled()) {
1650 qemu_log("qemu: fatal: ");
1651 qemu_log_vprintf(fmt, ap2);
1652 qemu_log("\n");
1653 #ifdef TARGET_I386
1654 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1655 #else
1656 log_cpu_state(env, 0);
1657 #endif
1658 qemu_log_flush();
1659 qemu_log_close();
1661 va_end(ap2);
1662 va_end(ap);
1663 abort();
1666 CPUState *cpu_copy(CPUState *env)
1668 CPUState *new_env = cpu_init(env->cpu_model_str);
1669 CPUState *next_cpu = new_env->next_cpu;
1670 int cpu_index = new_env->cpu_index;
1671 #if defined(TARGET_HAS_ICE)
1672 CPUBreakpoint *bp;
1673 CPUWatchpoint *wp;
1674 #endif
1676 memcpy(new_env, env, sizeof(CPUState));
1678 /* Preserve chaining and index. */
1679 new_env->next_cpu = next_cpu;
1680 new_env->cpu_index = cpu_index;
1682 /* Clone all break/watchpoints.
1683 Note: Once we support ptrace with hw-debug register access, make sure
1684 BP_CPU break/watchpoints are handled correctly on clone. */
1685 TAILQ_INIT(&env->breakpoints);
1686 TAILQ_INIT(&env->watchpoints);
1687 #if defined(TARGET_HAS_ICE)
1688 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1689 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1691 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1692 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1693 wp->flags, NULL);
1695 #endif
1697 return new_env;
1700 #if !defined(CONFIG_USER_ONLY)
1702 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1704 unsigned int i;
1706 /* Discard jump cache entries for any tb which might potentially
1707 overlap the flushed page. */
1708 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1709 memset (&env->tb_jmp_cache[i], 0,
1710 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1712 i = tb_jmp_cache_hash_page(addr);
1713 memset (&env->tb_jmp_cache[i], 0,
1714 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1717 /* NOTE: if flush_global is true, also flush global entries (not
1718 implemented yet) */
1719 void tlb_flush(CPUState *env, int flush_global)
1721 int i;
1723 #if defined(DEBUG_TLB)
1724 printf("tlb_flush:\n");
1725 #endif
1726 /* must reset current TB so that interrupts cannot modify the
1727 links while we are modifying them */
1728 env->current_tb = NULL;
1730 for(i = 0; i < CPU_TLB_SIZE; i++) {
1731 env->tlb_table[0][i].addr_read = -1;
1732 env->tlb_table[0][i].addr_write = -1;
1733 env->tlb_table[0][i].addr_code = -1;
1734 env->tlb_table[1][i].addr_read = -1;
1735 env->tlb_table[1][i].addr_write = -1;
1736 env->tlb_table[1][i].addr_code = -1;
1737 #if (NB_MMU_MODES >= 3)
1738 env->tlb_table[2][i].addr_read = -1;
1739 env->tlb_table[2][i].addr_write = -1;
1740 env->tlb_table[2][i].addr_code = -1;
1741 #if (NB_MMU_MODES == 4)
1742 env->tlb_table[3][i].addr_read = -1;
1743 env->tlb_table[3][i].addr_write = -1;
1744 env->tlb_table[3][i].addr_code = -1;
1745 #endif
1746 #endif
1749 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1751 #ifdef USE_KQEMU
1752 if (env->kqemu_enabled) {
1753 kqemu_flush(env, flush_global);
1755 #endif
1756 tlb_flush_count++;
1759 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1761 if (addr == (tlb_entry->addr_read &
1762 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1763 addr == (tlb_entry->addr_write &
1764 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1765 addr == (tlb_entry->addr_code &
1766 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1767 tlb_entry->addr_read = -1;
1768 tlb_entry->addr_write = -1;
1769 tlb_entry->addr_code = -1;
1773 void tlb_flush_page(CPUState *env, target_ulong addr)
1775 int i;
1777 #if defined(DEBUG_TLB)
1778 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1779 #endif
1780 /* must reset current TB so that interrupts cannot modify the
1781 links while we are modifying them */
1782 env->current_tb = NULL;
1784 addr &= TARGET_PAGE_MASK;
1785 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1786 tlb_flush_entry(&env->tlb_table[0][i], addr);
1787 tlb_flush_entry(&env->tlb_table[1][i], addr);
1788 #if (NB_MMU_MODES >= 3)
1789 tlb_flush_entry(&env->tlb_table[2][i], addr);
1790 #if (NB_MMU_MODES == 4)
1791 tlb_flush_entry(&env->tlb_table[3][i], addr);
1792 #endif
1793 #endif
1795 tlb_flush_jmp_cache(env, addr);
1797 #ifdef USE_KQEMU
1798 if (env->kqemu_enabled) {
1799 kqemu_flush_page(env, addr);
1801 #endif
1804 /* update the TLBs so that writes to code in the virtual page 'addr'
1805 can be detected */
1806 static void tlb_protect_code(ram_addr_t ram_addr)
1808 cpu_physical_memory_reset_dirty(ram_addr,
1809 ram_addr + TARGET_PAGE_SIZE,
1810 CODE_DIRTY_FLAG);
1813 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1814 tested for self modifying code */
1815 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1816 target_ulong vaddr)
1818 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1821 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1822 unsigned long start, unsigned long length)
1824 unsigned long addr;
1825 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1826 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1827 if ((addr - start) < length) {
1828 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1833 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1834 int dirty_flags)
1836 CPUState *env;
1837 unsigned long length, start1;
1838 int i, mask, len;
1839 uint8_t *p;
1841 start &= TARGET_PAGE_MASK;
1842 end = TARGET_PAGE_ALIGN(end);
1844 length = end - start;
1845 if (length == 0)
1846 return;
1847 len = length >> TARGET_PAGE_BITS;
1848 #ifdef USE_KQEMU
1849 /* XXX: should not depend on cpu context */
1850 env = first_cpu;
1851 if (env->kqemu_enabled) {
1852 ram_addr_t addr;
1853 addr = start;
1854 for(i = 0; i < len; i++) {
1855 kqemu_set_notdirty(env, addr);
1856 addr += TARGET_PAGE_SIZE;
1859 #endif
1860 mask = ~dirty_flags;
1861 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1862 for(i = 0; i < len; i++)
1863 p[i] &= mask;
1865 /* we modify the TLB cache so that the dirty bit will be set again
1866 when accessing the range */
1867 start1 = start + (unsigned long)phys_ram_base;
1868 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1869 for(i = 0; i < CPU_TLB_SIZE; i++)
1870 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1871 for(i = 0; i < CPU_TLB_SIZE; i++)
1872 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1873 #if (NB_MMU_MODES >= 3)
1874 for(i = 0; i < CPU_TLB_SIZE; i++)
1875 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1876 #if (NB_MMU_MODES == 4)
1877 for(i = 0; i < CPU_TLB_SIZE; i++)
1878 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1879 #endif
1880 #endif
1884 int cpu_physical_memory_set_dirty_tracking(int enable)
1886 int r=0;
1888 if (kvm_enabled())
1889 r = kvm_physical_memory_set_dirty_tracking(enable);
1890 in_migration = enable;
1891 return r;
1894 int cpu_physical_memory_get_dirty_tracking(void)
1896 return in_migration;
1899 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1901 if (kvm_enabled())
1902 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1905 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1907 ram_addr_t ram_addr;
1909 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1910 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1911 tlb_entry->addend - (unsigned long)phys_ram_base;
1912 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1913 tlb_entry->addr_write |= TLB_NOTDIRTY;
1918 /* update the TLB according to the current state of the dirty bits */
1919 void cpu_tlb_update_dirty(CPUState *env)
1921 int i;
1922 for(i = 0; i < CPU_TLB_SIZE; i++)
1923 tlb_update_dirty(&env->tlb_table[0][i]);
1924 for(i = 0; i < CPU_TLB_SIZE; i++)
1925 tlb_update_dirty(&env->tlb_table[1][i]);
1926 #if (NB_MMU_MODES >= 3)
1927 for(i = 0; i < CPU_TLB_SIZE; i++)
1928 tlb_update_dirty(&env->tlb_table[2][i]);
1929 #if (NB_MMU_MODES == 4)
1930 for(i = 0; i < CPU_TLB_SIZE; i++)
1931 tlb_update_dirty(&env->tlb_table[3][i]);
1932 #endif
1933 #endif
1936 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1938 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1939 tlb_entry->addr_write = vaddr;
1942 /* update the TLB corresponding to virtual page vaddr
1943 so that it is no longer dirty */
1944 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1946 int i;
1948 vaddr &= TARGET_PAGE_MASK;
1949 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1950 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1951 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1952 #if (NB_MMU_MODES >= 3)
1953 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1954 #if (NB_MMU_MODES == 4)
1955 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1956 #endif
1957 #endif
1960 /* add a new TLB entry. At most one entry for a given virtual address
1961 is permitted. Return 0 if OK or 2 if the page could not be mapped
1962 (can only happen in non SOFTMMU mode for I/O pages or pages
1963 conflicting with the host address space). */
1964 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1965 target_phys_addr_t paddr, int prot,
1966 int mmu_idx, int is_softmmu)
1968 PhysPageDesc *p;
1969 unsigned long pd;
1970 unsigned int index;
1971 target_ulong address;
1972 target_ulong code_address;
1973 target_phys_addr_t addend;
1974 int ret;
1975 CPUTLBEntry *te;
1976 CPUWatchpoint *wp;
1977 target_phys_addr_t iotlb;
1979 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1980 if (!p) {
1981 pd = IO_MEM_UNASSIGNED;
1982 } else {
1983 pd = p->phys_offset;
1985 #if defined(DEBUG_TLB)
1986 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1987 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1988 #endif
1990 ret = 0;
1991 address = vaddr;
1992 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1993 /* IO memory case (romd handled later) */
1994 address |= TLB_MMIO;
1996 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1997 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1998 /* Normal RAM. */
1999 iotlb = pd & TARGET_PAGE_MASK;
2000 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2001 iotlb |= IO_MEM_NOTDIRTY;
2002 else
2003 iotlb |= IO_MEM_ROM;
2004 } else {
2005 /* IO handlers are currently passed a phsical address.
2006 It would be nice to pass an offset from the base address
2007 of that region. This would avoid having to special case RAM,
2008 and avoid full address decoding in every device.
2009 We can't use the high bits of pd for this because
2010 IO_MEM_ROMD uses these as a ram address. */
2011 iotlb = (pd & ~TARGET_PAGE_MASK);
2012 if (p) {
2013 iotlb += p->region_offset;
2014 } else {
2015 iotlb += paddr;
2019 code_address = address;
2020 /* Make accesses to pages with watchpoints go via the
2021 watchpoint trap routines. */
2022 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2023 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2024 iotlb = io_mem_watch + paddr;
2025 /* TODO: The memory case can be optimized by not trapping
2026 reads of pages with a write breakpoint. */
2027 address |= TLB_MMIO;
2031 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2032 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2033 te = &env->tlb_table[mmu_idx][index];
2034 te->addend = addend - vaddr;
2035 if (prot & PAGE_READ) {
2036 te->addr_read = address;
2037 } else {
2038 te->addr_read = -1;
2041 if (prot & PAGE_EXEC) {
2042 te->addr_code = code_address;
2043 } else {
2044 te->addr_code = -1;
2046 if (prot & PAGE_WRITE) {
2047 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2048 (pd & IO_MEM_ROMD)) {
2049 /* Write access calls the I/O callback. */
2050 te->addr_write = address | TLB_MMIO;
2051 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2052 !cpu_physical_memory_is_dirty(pd)) {
2053 te->addr_write = address | TLB_NOTDIRTY;
2054 } else {
2055 te->addr_write = address;
2057 } else {
2058 te->addr_write = -1;
2060 return ret;
2063 #else
2065 void tlb_flush(CPUState *env, int flush_global)
2069 void tlb_flush_page(CPUState *env, target_ulong addr)
2073 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2074 target_phys_addr_t paddr, int prot,
2075 int mmu_idx, int is_softmmu)
2077 return 0;
2080 /* dump memory mappings */
2081 void page_dump(FILE *f)
2083 unsigned long start, end;
2084 int i, j, prot, prot1;
2085 PageDesc *p;
2087 fprintf(f, "%-8s %-8s %-8s %s\n",
2088 "start", "end", "size", "prot");
2089 start = -1;
2090 end = -1;
2091 prot = 0;
2092 for(i = 0; i <= L1_SIZE; i++) {
2093 if (i < L1_SIZE)
2094 p = l1_map[i];
2095 else
2096 p = NULL;
2097 for(j = 0;j < L2_SIZE; j++) {
2098 if (!p)
2099 prot1 = 0;
2100 else
2101 prot1 = p[j].flags;
2102 if (prot1 != prot) {
2103 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2104 if (start != -1) {
2105 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2106 start, end, end - start,
2107 prot & PAGE_READ ? 'r' : '-',
2108 prot & PAGE_WRITE ? 'w' : '-',
2109 prot & PAGE_EXEC ? 'x' : '-');
2111 if (prot1 != 0)
2112 start = end;
2113 else
2114 start = -1;
2115 prot = prot1;
2117 if (!p)
2118 break;
2123 int page_get_flags(target_ulong address)
2125 PageDesc *p;
2127 p = page_find(address >> TARGET_PAGE_BITS);
2128 if (!p)
2129 return 0;
2130 return p->flags;
2133 /* modify the flags of a page and invalidate the code if
2134 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2135 depending on PAGE_WRITE */
2136 void page_set_flags(target_ulong start, target_ulong end, int flags)
2138 PageDesc *p;
2139 target_ulong addr;
2141 /* mmap_lock should already be held. */
2142 start = start & TARGET_PAGE_MASK;
2143 end = TARGET_PAGE_ALIGN(end);
2144 if (flags & PAGE_WRITE)
2145 flags |= PAGE_WRITE_ORG;
2146 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2147 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2148 /* We may be called for host regions that are outside guest
2149 address space. */
2150 if (!p)
2151 return;
2152 /* if the write protection is set, then we invalidate the code
2153 inside */
2154 if (!(p->flags & PAGE_WRITE) &&
2155 (flags & PAGE_WRITE) &&
2156 p->first_tb) {
2157 tb_invalidate_phys_page(addr, 0, NULL);
2159 p->flags = flags;
2163 int page_check_range(target_ulong start, target_ulong len, int flags)
2165 PageDesc *p;
2166 target_ulong end;
2167 target_ulong addr;
2169 if (start + len < start)
2170 /* we've wrapped around */
2171 return -1;
2173 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2174 start = start & TARGET_PAGE_MASK;
2176 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2177 p = page_find(addr >> TARGET_PAGE_BITS);
2178 if( !p )
2179 return -1;
2180 if( !(p->flags & PAGE_VALID) )
2181 return -1;
2183 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2184 return -1;
2185 if (flags & PAGE_WRITE) {
2186 if (!(p->flags & PAGE_WRITE_ORG))
2187 return -1;
2188 /* unprotect the page if it was put read-only because it
2189 contains translated code */
2190 if (!(p->flags & PAGE_WRITE)) {
2191 if (!page_unprotect(addr, 0, NULL))
2192 return -1;
2194 return 0;
2197 return 0;
2200 /* called from signal handler: invalidate the code and unprotect the
2201 page. Return TRUE if the fault was succesfully handled. */
2202 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2204 unsigned int page_index, prot, pindex;
2205 PageDesc *p, *p1;
2206 target_ulong host_start, host_end, addr;
2208 /* Technically this isn't safe inside a signal handler. However we
2209 know this only ever happens in a synchronous SEGV handler, so in
2210 practice it seems to be ok. */
2211 mmap_lock();
2213 host_start = address & qemu_host_page_mask;
2214 page_index = host_start >> TARGET_PAGE_BITS;
2215 p1 = page_find(page_index);
2216 if (!p1) {
2217 mmap_unlock();
2218 return 0;
2220 host_end = host_start + qemu_host_page_size;
2221 p = p1;
2222 prot = 0;
2223 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2224 prot |= p->flags;
2225 p++;
2227 /* if the page was really writable, then we change its
2228 protection back to writable */
2229 if (prot & PAGE_WRITE_ORG) {
2230 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2231 if (!(p1[pindex].flags & PAGE_WRITE)) {
2232 mprotect((void *)g2h(host_start), qemu_host_page_size,
2233 (prot & PAGE_BITS) | PAGE_WRITE);
2234 p1[pindex].flags |= PAGE_WRITE;
2235 /* and since the content will be modified, we must invalidate
2236 the corresponding translated code. */
2237 tb_invalidate_phys_page(address, pc, puc);
2238 #ifdef DEBUG_TB_CHECK
2239 tb_invalidate_check(address);
2240 #endif
2241 mmap_unlock();
2242 return 1;
2245 mmap_unlock();
2246 return 0;
2249 static inline void tlb_set_dirty(CPUState *env,
2250 unsigned long addr, target_ulong vaddr)
2253 #endif /* defined(CONFIG_USER_ONLY) */
2255 #if !defined(CONFIG_USER_ONLY)
2257 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2258 ram_addr_t memory, ram_addr_t region_offset);
2259 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2260 ram_addr_t orig_memory, ram_addr_t region_offset);
2261 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2262 need_subpage) \
2263 do { \
2264 if (addr > start_addr) \
2265 start_addr2 = 0; \
2266 else { \
2267 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2268 if (start_addr2 > 0) \
2269 need_subpage = 1; \
2272 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2273 end_addr2 = TARGET_PAGE_SIZE - 1; \
2274 else { \
2275 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2276 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2277 need_subpage = 1; \
2279 } while (0)
2281 /* register physical memory. 'size' must be a multiple of the target
2282 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2283 io memory page. The address used when calling the IO function is
2284 the offset from the start of the region, plus region_offset. Both
2285 start_region and regon_offset are rounded down to a page boundary
2286 before calculating this offset. This should not be a problem unless
2287 the low bits of start_addr and region_offset differ. */
2288 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2289 ram_addr_t size,
2290 ram_addr_t phys_offset,
2291 ram_addr_t region_offset)
2293 target_phys_addr_t addr, end_addr;
2294 PhysPageDesc *p;
2295 CPUState *env;
2296 ram_addr_t orig_size = size;
2297 void *subpage;
2299 #ifdef USE_KQEMU
2300 /* XXX: should not depend on cpu context */
2301 env = first_cpu;
2302 if (env->kqemu_enabled) {
2303 kqemu_set_phys_mem(start_addr, size, phys_offset);
2305 #endif
2306 if (kvm_enabled())
2307 kvm_set_phys_mem(start_addr, size, phys_offset);
2309 region_offset &= TARGET_PAGE_MASK;
2310 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2311 end_addr = start_addr + (target_phys_addr_t)size;
2312 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2313 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2314 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2315 ram_addr_t orig_memory = p->phys_offset;
2316 target_phys_addr_t start_addr2, end_addr2;
2317 int need_subpage = 0;
2319 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2320 need_subpage);
2321 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2322 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2323 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2324 &p->phys_offset, orig_memory,
2325 p->region_offset);
2326 } else {
2327 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2328 >> IO_MEM_SHIFT];
2330 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2331 region_offset);
2332 p->region_offset = 0;
2333 } else {
2334 p->phys_offset = phys_offset;
2335 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2336 (phys_offset & IO_MEM_ROMD))
2337 phys_offset += TARGET_PAGE_SIZE;
2339 } else {
2340 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2341 p->phys_offset = phys_offset;
2342 p->region_offset = region_offset;
2343 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2344 (phys_offset & IO_MEM_ROMD)) {
2345 phys_offset += TARGET_PAGE_SIZE;
2346 } else {
2347 target_phys_addr_t start_addr2, end_addr2;
2348 int need_subpage = 0;
2350 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2351 end_addr2, need_subpage);
2353 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2354 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2355 &p->phys_offset, IO_MEM_UNASSIGNED,
2357 subpage_register(subpage, start_addr2, end_addr2,
2358 phys_offset, region_offset);
2359 p->region_offset = 0;
2363 region_offset += TARGET_PAGE_SIZE;
2366 /* since each CPU stores ram addresses in its TLB cache, we must
2367 reset the modified entries */
2368 /* XXX: slow ! */
2369 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2370 tlb_flush(env, 1);
2374 /* XXX: temporary until new memory mapping API */
2375 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2377 PhysPageDesc *p;
2379 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2380 if (!p)
2381 return IO_MEM_UNASSIGNED;
2382 return p->phys_offset;
2385 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2387 if (kvm_enabled())
2388 kvm_coalesce_mmio_region(addr, size);
2391 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2393 if (kvm_enabled())
2394 kvm_uncoalesce_mmio_region(addr, size);
2397 /* XXX: better than nothing */
2398 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2400 ram_addr_t addr;
2401 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2402 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2403 (uint64_t)size, (uint64_t)phys_ram_size);
2404 abort();
2406 addr = phys_ram_alloc_offset;
2407 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2408 return addr;
2411 void qemu_ram_free(ram_addr_t addr)
2415 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2417 #ifdef DEBUG_UNASSIGNED
2418 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2419 #endif
2420 #if defined(TARGET_SPARC)
2421 do_unassigned_access(addr, 0, 0, 0, 1);
2422 #endif
2423 return 0;
2426 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2428 #ifdef DEBUG_UNASSIGNED
2429 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2430 #endif
2431 #if defined(TARGET_SPARC)
2432 do_unassigned_access(addr, 0, 0, 0, 2);
2433 #endif
2434 return 0;
2437 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2439 #ifdef DEBUG_UNASSIGNED
2440 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2441 #endif
2442 #if defined(TARGET_SPARC)
2443 do_unassigned_access(addr, 0, 0, 0, 4);
2444 #endif
2445 return 0;
2448 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2450 #ifdef DEBUG_UNASSIGNED
2451 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2452 #endif
2453 #if defined(TARGET_SPARC)
2454 do_unassigned_access(addr, 1, 0, 0, 1);
2455 #endif
2458 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2460 #ifdef DEBUG_UNASSIGNED
2461 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2462 #endif
2463 #if defined(TARGET_SPARC)
2464 do_unassigned_access(addr, 1, 0, 0, 2);
2465 #endif
2468 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2470 #ifdef DEBUG_UNASSIGNED
2471 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2472 #endif
2473 #if defined(TARGET_SPARC)
2474 do_unassigned_access(addr, 1, 0, 0, 4);
2475 #endif
2478 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2479 unassigned_mem_readb,
2480 unassigned_mem_readw,
2481 unassigned_mem_readl,
2484 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2485 unassigned_mem_writeb,
2486 unassigned_mem_writew,
2487 unassigned_mem_writel,
2490 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2491 uint32_t val)
2493 int dirty_flags;
2494 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2495 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2496 #if !defined(CONFIG_USER_ONLY)
2497 tb_invalidate_phys_page_fast(ram_addr, 1);
2498 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2499 #endif
2501 stb_p(phys_ram_base + ram_addr, val);
2502 #ifdef USE_KQEMU
2503 if (cpu_single_env->kqemu_enabled &&
2504 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2505 kqemu_modify_page(cpu_single_env, ram_addr);
2506 #endif
2507 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2508 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2509 /* we remove the notdirty callback only if the code has been
2510 flushed */
2511 if (dirty_flags == 0xff)
2512 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2515 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2516 uint32_t val)
2518 int dirty_flags;
2519 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2520 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2521 #if !defined(CONFIG_USER_ONLY)
2522 tb_invalidate_phys_page_fast(ram_addr, 2);
2523 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2524 #endif
2526 stw_p(phys_ram_base + ram_addr, val);
2527 #ifdef USE_KQEMU
2528 if (cpu_single_env->kqemu_enabled &&
2529 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2530 kqemu_modify_page(cpu_single_env, ram_addr);
2531 #endif
2532 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2533 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2534 /* we remove the notdirty callback only if the code has been
2535 flushed */
2536 if (dirty_flags == 0xff)
2537 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2540 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2541 uint32_t val)
2543 int dirty_flags;
2544 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2545 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2546 #if !defined(CONFIG_USER_ONLY)
2547 tb_invalidate_phys_page_fast(ram_addr, 4);
2548 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2549 #endif
2551 stl_p(phys_ram_base + ram_addr, val);
2552 #ifdef USE_KQEMU
2553 if (cpu_single_env->kqemu_enabled &&
2554 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2555 kqemu_modify_page(cpu_single_env, ram_addr);
2556 #endif
2557 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2558 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2559 /* we remove the notdirty callback only if the code has been
2560 flushed */
2561 if (dirty_flags == 0xff)
2562 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2565 static CPUReadMemoryFunc *error_mem_read[3] = {
2566 NULL, /* never used */
2567 NULL, /* never used */
2568 NULL, /* never used */
2571 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2572 notdirty_mem_writeb,
2573 notdirty_mem_writew,
2574 notdirty_mem_writel,
2577 /* Generate a debug exception if a watchpoint has been hit. */
2578 static void check_watchpoint(int offset, int len_mask, int flags)
2580 CPUState *env = cpu_single_env;
2581 target_ulong pc, cs_base;
2582 TranslationBlock *tb;
2583 target_ulong vaddr;
2584 CPUWatchpoint *wp;
2585 int cpu_flags;
2587 if (env->watchpoint_hit) {
2588 /* We re-entered the check after replacing the TB. Now raise
2589 * the debug interrupt so that is will trigger after the
2590 * current instruction. */
2591 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2592 return;
2594 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2595 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2596 if ((vaddr == (wp->vaddr & len_mask) ||
2597 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2598 wp->flags |= BP_WATCHPOINT_HIT;
2599 if (!env->watchpoint_hit) {
2600 env->watchpoint_hit = wp;
2601 tb = tb_find_pc(env->mem_io_pc);
2602 if (!tb) {
2603 cpu_abort(env, "check_watchpoint: could not find TB for "
2604 "pc=%p", (void *)env->mem_io_pc);
2606 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2607 tb_phys_invalidate(tb, -1);
2608 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2609 env->exception_index = EXCP_DEBUG;
2610 } else {
2611 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2612 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2614 cpu_resume_from_signal(env, NULL);
2616 } else {
2617 wp->flags &= ~BP_WATCHPOINT_HIT;
2622 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2623 so these check for a hit then pass through to the normal out-of-line
2624 phys routines. */
2625 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2627 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2628 return ldub_phys(addr);
2631 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2633 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2634 return lduw_phys(addr);
2637 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2639 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2640 return ldl_phys(addr);
2643 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2644 uint32_t val)
2646 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2647 stb_phys(addr, val);
2650 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2651 uint32_t val)
2653 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2654 stw_phys(addr, val);
2657 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2658 uint32_t val)
2660 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2661 stl_phys(addr, val);
2664 static CPUReadMemoryFunc *watch_mem_read[3] = {
2665 watch_mem_readb,
2666 watch_mem_readw,
2667 watch_mem_readl,
2670 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2671 watch_mem_writeb,
2672 watch_mem_writew,
2673 watch_mem_writel,
2676 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2677 unsigned int len)
2679 uint32_t ret;
2680 unsigned int idx;
2682 idx = SUBPAGE_IDX(addr);
2683 #if defined(DEBUG_SUBPAGE)
2684 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2685 mmio, len, addr, idx);
2686 #endif
2687 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2688 addr + mmio->region_offset[idx][0][len]);
2690 return ret;
2693 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2694 uint32_t value, unsigned int len)
2696 unsigned int idx;
2698 idx = SUBPAGE_IDX(addr);
2699 #if defined(DEBUG_SUBPAGE)
2700 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2701 mmio, len, addr, idx, value);
2702 #endif
2703 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2704 addr + mmio->region_offset[idx][1][len],
2705 value);
2708 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2710 #if defined(DEBUG_SUBPAGE)
2711 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2712 #endif
2714 return subpage_readlen(opaque, addr, 0);
2717 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2718 uint32_t value)
2720 #if defined(DEBUG_SUBPAGE)
2721 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2722 #endif
2723 subpage_writelen(opaque, addr, value, 0);
2726 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2728 #if defined(DEBUG_SUBPAGE)
2729 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2730 #endif
2732 return subpage_readlen(opaque, addr, 1);
2735 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2736 uint32_t value)
2738 #if defined(DEBUG_SUBPAGE)
2739 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2740 #endif
2741 subpage_writelen(opaque, addr, value, 1);
2744 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2746 #if defined(DEBUG_SUBPAGE)
2747 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2748 #endif
2750 return subpage_readlen(opaque, addr, 2);
2753 static void subpage_writel (void *opaque,
2754 target_phys_addr_t addr, uint32_t value)
2756 #if defined(DEBUG_SUBPAGE)
2757 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2758 #endif
2759 subpage_writelen(opaque, addr, value, 2);
2762 static CPUReadMemoryFunc *subpage_read[] = {
2763 &subpage_readb,
2764 &subpage_readw,
2765 &subpage_readl,
2768 static CPUWriteMemoryFunc *subpage_write[] = {
2769 &subpage_writeb,
2770 &subpage_writew,
2771 &subpage_writel,
2774 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2775 ram_addr_t memory, ram_addr_t region_offset)
2777 int idx, eidx;
2778 unsigned int i;
2780 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2781 return -1;
2782 idx = SUBPAGE_IDX(start);
2783 eidx = SUBPAGE_IDX(end);
2784 #if defined(DEBUG_SUBPAGE)
2785 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2786 mmio, start, end, idx, eidx, memory);
2787 #endif
2788 memory >>= IO_MEM_SHIFT;
2789 for (; idx <= eidx; idx++) {
2790 for (i = 0; i < 4; i++) {
2791 if (io_mem_read[memory][i]) {
2792 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2793 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2794 mmio->region_offset[idx][0][i] = region_offset;
2796 if (io_mem_write[memory][i]) {
2797 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2798 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2799 mmio->region_offset[idx][1][i] = region_offset;
2804 return 0;
2807 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2808 ram_addr_t orig_memory, ram_addr_t region_offset)
2810 subpage_t *mmio;
2811 int subpage_memory;
2813 mmio = qemu_mallocz(sizeof(subpage_t));
2815 mmio->base = base;
2816 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2817 #if defined(DEBUG_SUBPAGE)
2818 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2819 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2820 #endif
2821 *phys = subpage_memory | IO_MEM_SUBPAGE;
2822 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2823 region_offset);
2825 return mmio;
2828 static int get_free_io_mem_idx(void)
2830 int i;
2832 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2833 if (!io_mem_used[i]) {
2834 io_mem_used[i] = 1;
2835 return i;
2838 return -1;
2841 static void io_mem_init(void)
2843 int i;
2845 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2846 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2847 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2848 for (i=0; i<5; i++)
2849 io_mem_used[i] = 1;
2851 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2852 watch_mem_write, NULL);
2853 /* alloc dirty bits array */
2854 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2855 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2858 /* mem_read and mem_write are arrays of functions containing the
2859 function to access byte (index 0), word (index 1) and dword (index
2860 2). Functions can be omitted with a NULL function pointer. The
2861 registered functions may be modified dynamically later.
2862 If io_index is non zero, the corresponding io zone is
2863 modified. If it is zero, a new io zone is allocated. The return
2864 value can be used with cpu_register_physical_memory(). (-1) is
2865 returned if error. */
2866 int cpu_register_io_memory(int io_index,
2867 CPUReadMemoryFunc **mem_read,
2868 CPUWriteMemoryFunc **mem_write,
2869 void *opaque)
2871 int i, subwidth = 0;
2873 if (io_index <= 0) {
2874 io_index = get_free_io_mem_idx();
2875 if (io_index == -1)
2876 return io_index;
2877 } else {
2878 if (io_index >= IO_MEM_NB_ENTRIES)
2879 return -1;
2882 for(i = 0;i < 3; i++) {
2883 if (!mem_read[i] || !mem_write[i])
2884 subwidth = IO_MEM_SUBWIDTH;
2885 io_mem_read[io_index][i] = mem_read[i];
2886 io_mem_write[io_index][i] = mem_write[i];
2888 io_mem_opaque[io_index] = opaque;
2889 return (io_index << IO_MEM_SHIFT) | subwidth;
2892 void cpu_unregister_io_memory(int io_table_address)
2894 int i;
2895 int io_index = io_table_address >> IO_MEM_SHIFT;
2897 for (i=0;i < 3; i++) {
2898 io_mem_read[io_index][i] = unassigned_mem_read[i];
2899 io_mem_write[io_index][i] = unassigned_mem_write[i];
2901 io_mem_opaque[io_index] = NULL;
2902 io_mem_used[io_index] = 0;
2905 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2907 return io_mem_write[io_index >> IO_MEM_SHIFT];
2910 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2912 return io_mem_read[io_index >> IO_MEM_SHIFT];
2915 #endif /* !defined(CONFIG_USER_ONLY) */
2917 /* physical memory access (slow version, mainly for debug) */
2918 #if defined(CONFIG_USER_ONLY)
2919 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2920 int len, int is_write)
2922 int l, flags;
2923 target_ulong page;
2924 void * p;
2926 while (len > 0) {
2927 page = addr & TARGET_PAGE_MASK;
2928 l = (page + TARGET_PAGE_SIZE) - addr;
2929 if (l > len)
2930 l = len;
2931 flags = page_get_flags(page);
2932 if (!(flags & PAGE_VALID))
2933 return;
2934 if (is_write) {
2935 if (!(flags & PAGE_WRITE))
2936 return;
2937 /* XXX: this code should not depend on lock_user */
2938 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2939 /* FIXME - should this return an error rather than just fail? */
2940 return;
2941 memcpy(p, buf, l);
2942 unlock_user(p, addr, l);
2943 } else {
2944 if (!(flags & PAGE_READ))
2945 return;
2946 /* XXX: this code should not depend on lock_user */
2947 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2948 /* FIXME - should this return an error rather than just fail? */
2949 return;
2950 memcpy(buf, p, l);
2951 unlock_user(p, addr, 0);
2953 len -= l;
2954 buf += l;
2955 addr += l;
2959 #else
2960 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2961 int len, int is_write)
2963 int l, io_index;
2964 uint8_t *ptr;
2965 uint32_t val;
2966 target_phys_addr_t page;
2967 unsigned long pd;
2968 PhysPageDesc *p;
2970 while (len > 0) {
2971 page = addr & TARGET_PAGE_MASK;
2972 l = (page + TARGET_PAGE_SIZE) - addr;
2973 if (l > len)
2974 l = len;
2975 p = phys_page_find(page >> TARGET_PAGE_BITS);
2976 if (!p) {
2977 pd = IO_MEM_UNASSIGNED;
2978 } else {
2979 pd = p->phys_offset;
2982 if (is_write) {
2983 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2984 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2985 if (p)
2986 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2987 /* XXX: could force cpu_single_env to NULL to avoid
2988 potential bugs */
2989 if (l >= 4 && ((addr & 3) == 0)) {
2990 /* 32 bit write access */
2991 val = ldl_p(buf);
2992 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2993 l = 4;
2994 } else if (l >= 2 && ((addr & 1) == 0)) {
2995 /* 16 bit write access */
2996 val = lduw_p(buf);
2997 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2998 l = 2;
2999 } else {
3000 /* 8 bit write access */
3001 val = ldub_p(buf);
3002 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3003 l = 1;
3005 } else {
3006 unsigned long addr1;
3007 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3008 /* RAM case */
3009 ptr = phys_ram_base + addr1;
3010 memcpy(ptr, buf, l);
3011 if (!cpu_physical_memory_is_dirty(addr1)) {
3012 /* invalidate code */
3013 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3014 /* set dirty bit */
3015 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3016 (0xff & ~CODE_DIRTY_FLAG);
3018 /* qemu doesn't execute guest code directly, but kvm does
3019 therefore fluch instruction caches */
3020 if (kvm_enabled())
3021 flush_icache_range((unsigned long)ptr,
3022 ((unsigned long)ptr)+l);
3024 } else {
3025 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3026 !(pd & IO_MEM_ROMD)) {
3027 /* I/O case */
3028 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3029 if (p)
3030 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3031 if (l >= 4 && ((addr & 3) == 0)) {
3032 /* 32 bit read access */
3033 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3034 stl_p(buf, val);
3035 l = 4;
3036 } else if (l >= 2 && ((addr & 1) == 0)) {
3037 /* 16 bit read access */
3038 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3039 stw_p(buf, val);
3040 l = 2;
3041 } else {
3042 /* 8 bit read access */
3043 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3044 stb_p(buf, val);
3045 l = 1;
3047 } else {
3048 /* RAM case */
3049 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3050 (addr & ~TARGET_PAGE_MASK);
3051 memcpy(buf, ptr, l);
3054 len -= l;
3055 buf += l;
3056 addr += l;
3060 /* used for ROM loading : can write in RAM and ROM */
3061 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3062 const uint8_t *buf, int len)
3064 int l;
3065 uint8_t *ptr;
3066 target_phys_addr_t page;
3067 unsigned long pd;
3068 PhysPageDesc *p;
3070 while (len > 0) {
3071 page = addr & TARGET_PAGE_MASK;
3072 l = (page + TARGET_PAGE_SIZE) - addr;
3073 if (l > len)
3074 l = len;
3075 p = phys_page_find(page >> TARGET_PAGE_BITS);
3076 if (!p) {
3077 pd = IO_MEM_UNASSIGNED;
3078 } else {
3079 pd = p->phys_offset;
3082 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3083 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3084 !(pd & IO_MEM_ROMD)) {
3085 /* do nothing */
3086 } else {
3087 unsigned long addr1;
3088 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3089 /* ROM/RAM case */
3090 ptr = phys_ram_base + addr1;
3091 memcpy(ptr, buf, l);
3093 len -= l;
3094 buf += l;
3095 addr += l;
3099 typedef struct {
3100 void *buffer;
3101 target_phys_addr_t addr;
3102 target_phys_addr_t len;
3103 } BounceBuffer;
3105 static BounceBuffer bounce;
3107 typedef struct MapClient {
3108 void *opaque;
3109 void (*callback)(void *opaque);
3110 LIST_ENTRY(MapClient) link;
3111 } MapClient;
3113 static LIST_HEAD(map_client_list, MapClient) map_client_list
3114 = LIST_HEAD_INITIALIZER(map_client_list);
3116 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3118 MapClient *client = qemu_malloc(sizeof(*client));
3120 client->opaque = opaque;
3121 client->callback = callback;
3122 LIST_INSERT_HEAD(&map_client_list, client, link);
3123 return client;
3126 void cpu_unregister_map_client(void *_client)
3128 MapClient *client = (MapClient *)_client;
3130 LIST_REMOVE(client, link);
3133 static void cpu_notify_map_clients(void)
3135 MapClient *client;
3137 while (!LIST_EMPTY(&map_client_list)) {
3138 client = LIST_FIRST(&map_client_list);
3139 client->callback(client->opaque);
3140 LIST_REMOVE(client, link);
3144 /* Map a physical memory region into a host virtual address.
3145 * May map a subset of the requested range, given by and returned in *plen.
3146 * May return NULL if resources needed to perform the mapping are exhausted.
3147 * Use only for reads OR writes - not for read-modify-write operations.
3148 * Use cpu_register_map_client() to know when retrying the map operation is
3149 * likely to succeed.
3151 void *cpu_physical_memory_map(target_phys_addr_t addr,
3152 target_phys_addr_t *plen,
3153 int is_write)
3155 target_phys_addr_t len = *plen;
3156 target_phys_addr_t done = 0;
3157 int l;
3158 uint8_t *ret = NULL;
3159 uint8_t *ptr;
3160 target_phys_addr_t page;
3161 unsigned long pd;
3162 PhysPageDesc *p;
3163 unsigned long addr1;
3165 while (len > 0) {
3166 page = addr & TARGET_PAGE_MASK;
3167 l = (page + TARGET_PAGE_SIZE) - addr;
3168 if (l > len)
3169 l = len;
3170 p = phys_page_find(page >> TARGET_PAGE_BITS);
3171 if (!p) {
3172 pd = IO_MEM_UNASSIGNED;
3173 } else {
3174 pd = p->phys_offset;
3177 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3178 if (done || bounce.buffer) {
3179 break;
3181 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3182 bounce.addr = addr;
3183 bounce.len = l;
3184 if (!is_write) {
3185 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3187 ptr = bounce.buffer;
3188 } else {
3189 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3190 ptr = phys_ram_base + addr1;
3192 if (!done) {
3193 ret = ptr;
3194 } else if (ret + done != ptr) {
3195 break;
3198 len -= l;
3199 addr += l;
3200 done += l;
3202 *plen = done;
3203 return ret;
3206 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3207 * Will also mark the memory as dirty if is_write == 1. access_len gives
3208 * the amount of memory that was actually read or written by the caller.
3210 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3211 int is_write, target_phys_addr_t access_len)
3213 if (buffer != bounce.buffer) {
3214 if (is_write) {
3215 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3216 while (access_len) {
3217 unsigned l;
3218 l = TARGET_PAGE_SIZE;
3219 if (l > access_len)
3220 l = access_len;
3221 if (!cpu_physical_memory_is_dirty(addr1)) {
3222 /* invalidate code */
3223 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3224 /* set dirty bit */
3225 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3226 (0xff & ~CODE_DIRTY_FLAG);
3228 addr1 += l;
3229 access_len -= l;
3232 return;
3234 if (is_write) {
3235 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3237 qemu_free(bounce.buffer);
3238 bounce.buffer = NULL;
3239 cpu_notify_map_clients();
3242 /* warning: addr must be aligned */
3243 uint32_t ldl_phys(target_phys_addr_t addr)
3245 int io_index;
3246 uint8_t *ptr;
3247 uint32_t val;
3248 unsigned long pd;
3249 PhysPageDesc *p;
3251 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3252 if (!p) {
3253 pd = IO_MEM_UNASSIGNED;
3254 } else {
3255 pd = p->phys_offset;
3258 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3259 !(pd & IO_MEM_ROMD)) {
3260 /* I/O case */
3261 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3262 if (p)
3263 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3264 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3265 } else {
3266 /* RAM case */
3267 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3268 (addr & ~TARGET_PAGE_MASK);
3269 val = ldl_p(ptr);
3271 return val;
3274 /* warning: addr must be aligned */
3275 uint64_t ldq_phys(target_phys_addr_t addr)
3277 int io_index;
3278 uint8_t *ptr;
3279 uint64_t val;
3280 unsigned long pd;
3281 PhysPageDesc *p;
3283 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3284 if (!p) {
3285 pd = IO_MEM_UNASSIGNED;
3286 } else {
3287 pd = p->phys_offset;
3290 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3291 !(pd & IO_MEM_ROMD)) {
3292 /* I/O case */
3293 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3294 if (p)
3295 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3296 #ifdef TARGET_WORDS_BIGENDIAN
3297 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3298 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3299 #else
3300 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3301 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3302 #endif
3303 } else {
3304 /* RAM case */
3305 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3306 (addr & ~TARGET_PAGE_MASK);
3307 val = ldq_p(ptr);
3309 return val;
3312 /* XXX: optimize */
3313 uint32_t ldub_phys(target_phys_addr_t addr)
3315 uint8_t val;
3316 cpu_physical_memory_read(addr, &val, 1);
3317 return val;
3320 /* XXX: optimize */
3321 uint32_t lduw_phys(target_phys_addr_t addr)
3323 uint16_t val;
3324 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3325 return tswap16(val);
3328 #ifdef __GNUC__
3329 #define likely(x) __builtin_expect(!!(x), 1)
3330 #define unlikely(x) __builtin_expect(!!(x), 0)
3331 #else
3332 #define likely(x) x
3333 #define unlikely(x) x
3334 #endif
3336 /* warning: addr must be aligned. The ram page is not masked as dirty
3337 and the code inside is not invalidated. It is useful if the dirty
3338 bits are used to track modified PTEs */
3339 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3341 int io_index;
3342 uint8_t *ptr;
3343 unsigned long pd;
3344 PhysPageDesc *p;
3346 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3347 if (!p) {
3348 pd = IO_MEM_UNASSIGNED;
3349 } else {
3350 pd = p->phys_offset;
3353 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3354 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3355 if (p)
3356 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3357 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3358 } else {
3359 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3360 ptr = phys_ram_base + addr1;
3361 stl_p(ptr, val);
3363 if (unlikely(in_migration)) {
3364 if (!cpu_physical_memory_is_dirty(addr1)) {
3365 /* invalidate code */
3366 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3367 /* set dirty bit */
3368 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3369 (0xff & ~CODE_DIRTY_FLAG);
3375 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3377 int io_index;
3378 uint8_t *ptr;
3379 unsigned long pd;
3380 PhysPageDesc *p;
3382 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3383 if (!p) {
3384 pd = IO_MEM_UNASSIGNED;
3385 } else {
3386 pd = p->phys_offset;
3389 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3390 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3391 if (p)
3392 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3393 #ifdef TARGET_WORDS_BIGENDIAN
3394 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3395 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3396 #else
3397 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3398 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3399 #endif
3400 } else {
3401 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3402 (addr & ~TARGET_PAGE_MASK);
3403 stq_p(ptr, val);
3407 /* warning: addr must be aligned */
3408 void stl_phys(target_phys_addr_t addr, uint32_t val)
3410 int io_index;
3411 uint8_t *ptr;
3412 unsigned long pd;
3413 PhysPageDesc *p;
3415 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3416 if (!p) {
3417 pd = IO_MEM_UNASSIGNED;
3418 } else {
3419 pd = p->phys_offset;
3422 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3423 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3424 if (p)
3425 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3426 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3427 } else {
3428 unsigned long addr1;
3429 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3430 /* RAM case */
3431 ptr = phys_ram_base + addr1;
3432 stl_p(ptr, val);
3433 if (!cpu_physical_memory_is_dirty(addr1)) {
3434 /* invalidate code */
3435 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3436 /* set dirty bit */
3437 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3438 (0xff & ~CODE_DIRTY_FLAG);
3443 /* XXX: optimize */
3444 void stb_phys(target_phys_addr_t addr, uint32_t val)
3446 uint8_t v = val;
3447 cpu_physical_memory_write(addr, &v, 1);
3450 /* XXX: optimize */
3451 void stw_phys(target_phys_addr_t addr, uint32_t val)
3453 uint16_t v = tswap16(val);
3454 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3457 /* XXX: optimize */
3458 void stq_phys(target_phys_addr_t addr, uint64_t val)
3460 val = tswap64(val);
3461 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3464 #endif
3466 /* virtual memory access for debug */
3467 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3468 uint8_t *buf, int len, int is_write)
3470 int l;
3471 target_phys_addr_t phys_addr;
3472 target_ulong page;
3474 while (len > 0) {
3475 page = addr & TARGET_PAGE_MASK;
3476 phys_addr = cpu_get_phys_page_debug(env, page);
3477 /* if no physical page mapped, return an error */
3478 if (phys_addr == -1)
3479 return -1;
3480 l = (page + TARGET_PAGE_SIZE) - addr;
3481 if (l > len)
3482 l = len;
3483 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3484 buf, l, is_write);
3485 len -= l;
3486 buf += l;
3487 addr += l;
3489 return 0;
3492 /* in deterministic execution mode, instructions doing device I/Os
3493 must be at the end of the TB */
3494 void cpu_io_recompile(CPUState *env, void *retaddr)
3496 TranslationBlock *tb;
3497 uint32_t n, cflags;
3498 target_ulong pc, cs_base;
3499 uint64_t flags;
3501 tb = tb_find_pc((unsigned long)retaddr);
3502 if (!tb) {
3503 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3504 retaddr);
3506 n = env->icount_decr.u16.low + tb->icount;
3507 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3508 /* Calculate how many instructions had been executed before the fault
3509 occurred. */
3510 n = n - env->icount_decr.u16.low;
3511 /* Generate a new TB ending on the I/O insn. */
3512 n++;
3513 /* On MIPS and SH, delay slot instructions can only be restarted if
3514 they were already the first instruction in the TB. If this is not
3515 the first instruction in a TB then re-execute the preceding
3516 branch. */
3517 #if defined(TARGET_MIPS)
3518 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3519 env->active_tc.PC -= 4;
3520 env->icount_decr.u16.low++;
3521 env->hflags &= ~MIPS_HFLAG_BMASK;
3523 #elif defined(TARGET_SH4)
3524 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3525 && n > 1) {
3526 env->pc -= 2;
3527 env->icount_decr.u16.low++;
3528 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3530 #endif
3531 /* This should never happen. */
3532 if (n > CF_COUNT_MASK)
3533 cpu_abort(env, "TB too big during recompile");
3535 cflags = n | CF_LAST_IO;
3536 pc = tb->pc;
3537 cs_base = tb->cs_base;
3538 flags = tb->flags;
3539 tb_phys_invalidate(tb, -1);
3540 /* FIXME: In theory this could raise an exception. In practice
3541 we have already translated the block once so it's probably ok. */
3542 tb_gen_code(env, pc, cs_base, flags, cflags);
3543 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3544 the first in the TB) then we end up generating a whole new TB and
3545 repeating the fault, which is horribly inefficient.
3546 Better would be to execute just this insn uncached, or generate a
3547 second new TB. */
3548 cpu_resume_from_signal(env, NULL);
3551 void dump_exec_info(FILE *f,
3552 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3554 int i, target_code_size, max_target_code_size;
3555 int direct_jmp_count, direct_jmp2_count, cross_page;
3556 TranslationBlock *tb;
3558 target_code_size = 0;
3559 max_target_code_size = 0;
3560 cross_page = 0;
3561 direct_jmp_count = 0;
3562 direct_jmp2_count = 0;
3563 for(i = 0; i < nb_tbs; i++) {
3564 tb = &tbs[i];
3565 target_code_size += tb->size;
3566 if (tb->size > max_target_code_size)
3567 max_target_code_size = tb->size;
3568 if (tb->page_addr[1] != -1)
3569 cross_page++;
3570 if (tb->tb_next_offset[0] != 0xffff) {
3571 direct_jmp_count++;
3572 if (tb->tb_next_offset[1] != 0xffff) {
3573 direct_jmp2_count++;
3577 /* XXX: avoid using doubles ? */
3578 cpu_fprintf(f, "Translation buffer state:\n");
3579 cpu_fprintf(f, "gen code size %ld/%ld\n",
3580 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3581 cpu_fprintf(f, "TB count %d/%d\n",
3582 nb_tbs, code_gen_max_blocks);
3583 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3584 nb_tbs ? target_code_size / nb_tbs : 0,
3585 max_target_code_size);
3586 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3587 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3588 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3589 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3590 cross_page,
3591 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3592 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3593 direct_jmp_count,
3594 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3595 direct_jmp2_count,
3596 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3597 cpu_fprintf(f, "\nStatistics:\n");
3598 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3599 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3600 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3601 tcg_dump_info(f, cpu_fprintf);
3604 #if !defined(CONFIG_USER_ONLY)
3606 #define MMUSUFFIX _cmmu
3607 #define GETPC() NULL
3608 #define env cpu_single_env
3609 #define SOFTMMU_CODE_ACCESS
3611 #define SHIFT 0
3612 #include "softmmu_template.h"
3614 #define SHIFT 1
3615 #include "softmmu_template.h"
3617 #define SHIFT 2
3618 #include "softmmu_template.h"
3620 #define SHIFT 3
3621 #include "softmmu_template.h"
3623 #undef env
3625 #endif