kvm: bios: fix SMBIOS end address range reporting
[qemu-kvm/fedora.git] / exec.c
blob25d0ee9073bd1116d6620742ead5f314ac1f2242
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
41 #include "tcg.h"
42 #endif
43 #include "qemu-kvm.h"
45 #include "hw/hw.h"
46 #if defined(CONFIG_USER_ONLY)
47 #include <qemu.h>
48 #endif
50 //#define DEBUG_TB_INVALIDATE
51 //#define DEBUG_FLUSH
52 //#define DEBUG_TLB
53 //#define DEBUG_UNASSIGNED
55 /* make various TB consistency checks */
56 //#define DEBUG_TB_CHECK
57 //#define DEBUG_TLB_CHECK
59 //#define DEBUG_IOPORT
60 //#define DEBUG_SUBPAGE
62 #if !defined(CONFIG_USER_ONLY)
63 /* TB consistency checks only implemented for usermode emulation. */
64 #undef DEBUG_TB_CHECK
65 #endif
67 #define SMC_BITMAP_USE_THRESHOLD 10
69 #define MMAP_AREA_START 0x00000000
70 #define MMAP_AREA_END 0xa8000000
72 #if defined(TARGET_SPARC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 41
74 #elif defined(TARGET_SPARC)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 36
76 #elif defined(TARGET_ALPHA)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #define TARGET_VIRT_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_PPC64)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 36
85 #elif defined(TARGET_IA64)
86 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 #else
88 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
89 #define TARGET_PHYS_ADDR_SPACE_BITS 32
90 #endif
92 TranslationBlock *tbs;
93 int code_gen_max_blocks;
94 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
95 int nb_tbs;
96 /* any access to the tbs or the page table must use this lock */
97 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
99 #if defined(__arm__) || defined(__sparc_v9__)
100 /* The prologue must be reachable with a direct jump. ARM and Sparc64
101 have limited branch ranges (possibly also PPC) so place it in a
102 section close to code segment. */
103 #define code_gen_section \
104 __attribute__((__section__(".gen_code"))) \
105 __attribute__((aligned (32)))
106 #else
107 #define code_gen_section \
108 __attribute__((aligned (32)))
109 #endif
111 uint8_t code_gen_prologue[1024] code_gen_section;
112 uint8_t *code_gen_buffer;
113 unsigned long code_gen_buffer_size;
114 /* threshold to flush the translated code buffer */
115 unsigned long code_gen_buffer_max_size;
116 uint8_t *code_gen_ptr;
118 #if !defined(CONFIG_USER_ONLY)
119 ram_addr_t phys_ram_size;
120 int phys_ram_fd;
121 uint8_t *phys_ram_base;
122 uint8_t *phys_ram_dirty;
123 uint8_t *bios_mem;
124 static int in_migration;
125 static ram_addr_t phys_ram_alloc_offset = 0;
126 #endif
128 CPUState *first_cpu;
129 /* current CPU in the current thread. It is only valid inside
130 cpu_exec() */
131 CPUState *cpu_single_env;
132 /* 0 = Do not count executed instructions.
133 1 = Precise instruction counting.
134 2 = Adaptive rate instruction counting. */
135 int use_icount = 0;
136 /* Current instruction counter. While executing translated code this may
137 include some instructions that have not yet been executed. */
138 int64_t qemu_icount;
140 typedef struct PageDesc {
141 /* list of TBs intersecting this ram page */
142 TranslationBlock *first_tb;
143 /* in order to optimize self modifying code, we count the number
144 of lookups we do to a given page to use a bitmap */
145 unsigned int code_write_count;
146 uint8_t *code_bitmap;
147 #if defined(CONFIG_USER_ONLY)
148 unsigned long flags;
149 #endif
150 } PageDesc;
152 typedef struct PhysPageDesc {
153 /* offset in host memory of the page + io_index in the low bits */
154 ram_addr_t phys_offset;
155 } PhysPageDesc;
157 #define L2_BITS 10
158 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
159 /* XXX: this is a temporary hack for alpha target.
160 * In the future, this is to be replaced by a multi-level table
161 * to actually be able to handle the complete 64 bits address space.
163 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
164 #else
165 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
166 #endif
168 #define L1_SIZE (1 << L1_BITS)
169 #define L2_SIZE (1 << L2_BITS)
171 unsigned long qemu_real_host_page_size;
172 unsigned long qemu_host_page_bits;
173 unsigned long qemu_host_page_size;
174 unsigned long qemu_host_page_mask;
176 /* XXX: for system emulation, it could just be an array */
177 static PageDesc *l1_map[L1_SIZE];
178 PhysPageDesc **l1_phys_map;
180 #if !defined(CONFIG_USER_ONLY)
181 static void io_mem_init(void);
183 /* io memory support */
184 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
185 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
186 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
187 char io_mem_used[IO_MEM_NB_ENTRIES];
188 static int io_mem_watch;
189 #endif
191 /* log support */
192 const char *logfilename = "/tmp/qemu.log";
193 FILE *logfile;
194 int loglevel;
195 static int log_append = 0;
197 /* statistics */
198 static int tlb_flush_count;
199 static int tb_flush_count;
200 static int tb_phys_invalidate_count;
202 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
203 typedef struct subpage_t {
204 target_phys_addr_t base;
205 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
206 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
207 void *opaque[TARGET_PAGE_SIZE][2][4];
208 } subpage_t;
210 #ifdef _WIN32
211 static void map_exec(void *addr, long size)
213 DWORD old_protect;
214 VirtualProtect(addr, size,
215 PAGE_EXECUTE_READWRITE, &old_protect);
218 #else
219 static void map_exec(void *addr, long size)
221 unsigned long start, end, page_size;
223 page_size = getpagesize();
224 start = (unsigned long)addr;
225 start &= ~(page_size - 1);
227 end = (unsigned long)addr + size;
228 end += page_size - 1;
229 end &= ~(page_size - 1);
231 mprotect((void *)start, end - start,
232 PROT_READ | PROT_WRITE | PROT_EXEC);
234 #endif
236 static void page_init(void)
238 /* NOTE: we can always suppose that qemu_host_page_size >=
239 TARGET_PAGE_SIZE */
240 #ifdef _WIN32
242 SYSTEM_INFO system_info;
243 DWORD old_protect;
245 GetSystemInfo(&system_info);
246 qemu_real_host_page_size = system_info.dwPageSize;
248 #else
249 qemu_real_host_page_size = getpagesize();
250 #endif
251 if (qemu_host_page_size == 0)
252 qemu_host_page_size = qemu_real_host_page_size;
253 if (qemu_host_page_size < TARGET_PAGE_SIZE)
254 qemu_host_page_size = TARGET_PAGE_SIZE;
255 qemu_host_page_bits = 0;
256 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
257 qemu_host_page_bits++;
258 qemu_host_page_mask = ~(qemu_host_page_size - 1);
259 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
260 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
262 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
264 long long startaddr, endaddr;
265 FILE *f;
266 int n;
268 mmap_lock();
269 last_brk = (unsigned long)sbrk(0);
270 f = fopen("/proc/self/maps", "r");
271 if (f) {
272 do {
273 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
274 if (n == 2) {
275 startaddr = MIN(startaddr,
276 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
277 endaddr = MIN(endaddr,
278 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
279 page_set_flags(startaddr & TARGET_PAGE_MASK,
280 TARGET_PAGE_ALIGN(endaddr),
281 PAGE_RESERVED);
283 } while (!feof(f));
284 fclose(f);
286 mmap_unlock();
288 #endif
291 static inline PageDesc **page_l1_map(target_ulong index)
293 #if TARGET_LONG_BITS > 32
294 /* Host memory outside guest VM. For 32-bit targets we have already
295 excluded high addresses. */
296 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
297 return NULL;
298 #endif
299 return &l1_map[index >> L2_BITS];
302 static inline PageDesc *page_find_alloc(target_ulong index)
304 PageDesc **lp, *p;
305 lp = page_l1_map(index);
306 if (!lp)
307 return NULL;
309 p = *lp;
310 if (!p) {
311 /* allocate if not found */
312 #if defined(CONFIG_USER_ONLY)
313 unsigned long addr;
314 size_t len = sizeof(PageDesc) * L2_SIZE;
315 /* Don't use qemu_malloc because it may recurse. */
316 p = mmap(0, len, PROT_READ | PROT_WRITE,
317 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
318 *lp = p;
319 addr = h2g(p);
320 if (addr == (target_ulong)addr) {
321 page_set_flags(addr & TARGET_PAGE_MASK,
322 TARGET_PAGE_ALIGN(addr + len),
323 PAGE_RESERVED);
325 #else
326 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
327 *lp = p;
328 #endif
330 return p + (index & (L2_SIZE - 1));
333 static inline PageDesc *page_find(target_ulong index)
335 PageDesc **lp, *p;
336 lp = page_l1_map(index);
337 if (!lp)
338 return NULL;
340 p = *lp;
341 if (!p)
342 return 0;
343 return p + (index & (L2_SIZE - 1));
346 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
348 void **lp, **p;
349 PhysPageDesc *pd;
351 p = (void **)l1_phys_map;
352 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
354 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
355 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
356 #endif
357 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
358 p = *lp;
359 if (!p) {
360 /* allocate if not found */
361 if (!alloc)
362 return NULL;
363 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
364 memset(p, 0, sizeof(void *) * L1_SIZE);
365 *lp = p;
367 #endif
368 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
369 pd = *lp;
370 if (!pd) {
371 int i;
372 /* allocate if not found */
373 if (!alloc)
374 return NULL;
375 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
376 *lp = pd;
377 for (i = 0; i < L2_SIZE; i++)
378 pd[i].phys_offset = IO_MEM_UNASSIGNED;
380 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
383 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
385 return phys_page_find_alloc(index, 0);
388 #if !defined(CONFIG_USER_ONLY)
389 static void tlb_protect_code(ram_addr_t ram_addr);
390 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
391 target_ulong vaddr);
392 #define mmap_lock() do { } while(0)
393 #define mmap_unlock() do { } while(0)
394 #endif
396 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
398 #if defined(CONFIG_USER_ONLY)
399 /* Currently it is not recommanded to allocate big chunks of data in
400 user mode. It will change when a dedicated libc will be used */
401 #define USE_STATIC_CODE_GEN_BUFFER
402 #endif
404 #ifdef USE_STATIC_CODE_GEN_BUFFER
405 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
406 #endif
408 static void code_gen_alloc(unsigned long tb_size)
410 if (kvm_enabled())
411 return;
413 #ifdef USE_STATIC_CODE_GEN_BUFFER
414 code_gen_buffer = static_code_gen_buffer;
415 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416 map_exec(code_gen_buffer, code_gen_buffer_size);
417 #else
418 code_gen_buffer_size = tb_size;
419 if (code_gen_buffer_size == 0) {
420 #if defined(CONFIG_USER_ONLY)
421 /* in user mode, phys_ram_size is not meaningful */
422 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
423 #else
424 /* XXX: needs ajustments */
425 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
426 #endif
428 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
429 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
430 /* The code gen buffer location may have constraints depending on
431 the host cpu and OS */
432 #if defined(__linux__)
434 int flags;
435 void *start = NULL;
437 flags = MAP_PRIVATE | MAP_ANONYMOUS;
438 #if defined(__x86_64__)
439 flags |= MAP_32BIT;
440 /* Cannot map more than that */
441 if (code_gen_buffer_size > (800 * 1024 * 1024))
442 code_gen_buffer_size = (800 * 1024 * 1024);
443 #elif defined(__sparc_v9__)
444 // Map the buffer below 2G, so we can use direct calls and branches
445 flags |= MAP_FIXED;
446 start = (void *) 0x60000000UL;
447 if (code_gen_buffer_size > (512 * 1024 * 1024))
448 code_gen_buffer_size = (512 * 1024 * 1024);
449 #endif
450 code_gen_buffer = mmap(start, code_gen_buffer_size,
451 PROT_WRITE | PROT_READ | PROT_EXEC,
452 flags, -1, 0);
453 if (code_gen_buffer == MAP_FAILED) {
454 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
455 exit(1);
458 #elif defined(__FreeBSD__)
460 int flags;
461 void *addr = NULL;
462 flags = MAP_PRIVATE | MAP_ANONYMOUS;
463 #if defined(__x86_64__)
464 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
465 * 0x40000000 is free */
466 flags |= MAP_FIXED;
467 addr = (void *)0x40000000;
468 /* Cannot map more than that */
469 if (code_gen_buffer_size > (800 * 1024 * 1024))
470 code_gen_buffer_size = (800 * 1024 * 1024);
471 #endif
472 code_gen_buffer = mmap(addr, code_gen_buffer_size,
473 PROT_WRITE | PROT_READ | PROT_EXEC,
474 flags, -1, 0);
475 if (code_gen_buffer == MAP_FAILED) {
476 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
477 exit(1);
480 #else
481 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
482 if (!code_gen_buffer) {
483 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
484 exit(1);
486 map_exec(code_gen_buffer, code_gen_buffer_size);
487 #endif
488 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
489 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
490 code_gen_buffer_max_size = code_gen_buffer_size -
491 code_gen_max_block_size();
492 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
493 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
496 /* Must be called before using the QEMU cpus. 'tb_size' is the size
497 (in bytes) allocated to the translation buffer. Zero means default
498 size. */
499 void cpu_exec_init_all(unsigned long tb_size)
501 cpu_gen_init();
502 code_gen_alloc(tb_size);
503 code_gen_ptr = code_gen_buffer;
504 page_init();
505 #if !defined(CONFIG_USER_ONLY)
506 io_mem_init();
507 #endif
510 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
512 #define CPU_COMMON_SAVE_VERSION 1
514 static void cpu_common_save(QEMUFile *f, void *opaque)
516 CPUState *env = opaque;
518 qemu_put_be32s(f, &env->halted);
519 qemu_put_be32s(f, &env->interrupt_request);
522 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
524 CPUState *env = opaque;
526 if (version_id != CPU_COMMON_SAVE_VERSION)
527 return -EINVAL;
529 qemu_get_be32s(f, &env->halted);
530 qemu_get_be32s(f, &env->interrupt_request);
531 tlb_flush(env, 1);
533 return 0;
535 #endif
537 void cpu_exec_init(CPUState *env)
539 CPUState **penv;
540 int cpu_index;
542 env->next_cpu = NULL;
543 penv = &first_cpu;
544 cpu_index = 0;
545 while (*penv != NULL) {
546 penv = (CPUState **)&(*penv)->next_cpu;
547 cpu_index++;
549 env->cpu_index = cpu_index;
550 env->nb_watchpoints = 0;
551 #ifdef __WIN32
552 env->thread_id = GetCurrentProcessId();
553 #else
554 env->thread_id = getpid();
555 #endif
556 *penv = env;
557 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
558 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
559 cpu_common_save, cpu_common_load, env);
560 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
561 cpu_save, cpu_load, env);
562 #endif
565 static inline void invalidate_page_bitmap(PageDesc *p)
567 if (p->code_bitmap) {
568 qemu_free(p->code_bitmap);
569 p->code_bitmap = NULL;
571 p->code_write_count = 0;
574 /* set to NULL all the 'first_tb' fields in all PageDescs */
575 static void page_flush_tb(void)
577 int i, j;
578 PageDesc *p;
580 for(i = 0; i < L1_SIZE; i++) {
581 p = l1_map[i];
582 if (p) {
583 for(j = 0; j < L2_SIZE; j++) {
584 p->first_tb = NULL;
585 invalidate_page_bitmap(p);
586 p++;
592 /* flush all the translation blocks */
593 /* XXX: tb_flush is currently not thread safe */
594 void tb_flush(CPUState *env1)
596 CPUState *env;
597 #if defined(DEBUG_FLUSH)
598 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
599 (unsigned long)(code_gen_ptr - code_gen_buffer),
600 nb_tbs, nb_tbs > 0 ?
601 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
602 #endif
603 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
604 cpu_abort(env1, "Internal error: code buffer overflow\n");
606 nb_tbs = 0;
608 for(env = first_cpu; env != NULL; env = env->next_cpu) {
609 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
612 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
613 page_flush_tb();
615 code_gen_ptr = code_gen_buffer;
616 /* XXX: flush processor icache at this point if cache flush is
617 expensive */
618 tb_flush_count++;
621 #ifdef DEBUG_TB_CHECK
623 static void tb_invalidate_check(target_ulong address)
625 TranslationBlock *tb;
626 int i;
627 address &= TARGET_PAGE_MASK;
628 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
629 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
630 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
631 address >= tb->pc + tb->size)) {
632 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
633 address, (long)tb->pc, tb->size);
639 /* verify that all the pages have correct rights for code */
640 static void tb_page_check(void)
642 TranslationBlock *tb;
643 int i, flags1, flags2;
645 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
646 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
647 flags1 = page_get_flags(tb->pc);
648 flags2 = page_get_flags(tb->pc + tb->size - 1);
649 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
650 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
651 (long)tb->pc, tb->size, flags1, flags2);
657 void tb_jmp_check(TranslationBlock *tb)
659 TranslationBlock *tb1;
660 unsigned int n1;
662 /* suppress any remaining jumps to this TB */
663 tb1 = tb->jmp_first;
664 for(;;) {
665 n1 = (long)tb1 & 3;
666 tb1 = (TranslationBlock *)((long)tb1 & ~3);
667 if (n1 == 2)
668 break;
669 tb1 = tb1->jmp_next[n1];
671 /* check end of list */
672 if (tb1 != tb) {
673 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
677 #endif
679 /* invalidate one TB */
680 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
681 int next_offset)
683 TranslationBlock *tb1;
684 for(;;) {
685 tb1 = *ptb;
686 if (tb1 == tb) {
687 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
688 break;
690 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
694 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
696 TranslationBlock *tb1;
697 unsigned int n1;
699 for(;;) {
700 tb1 = *ptb;
701 n1 = (long)tb1 & 3;
702 tb1 = (TranslationBlock *)((long)tb1 & ~3);
703 if (tb1 == tb) {
704 *ptb = tb1->page_next[n1];
705 break;
707 ptb = &tb1->page_next[n1];
711 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
713 TranslationBlock *tb1, **ptb;
714 unsigned int n1;
716 ptb = &tb->jmp_next[n];
717 tb1 = *ptb;
718 if (tb1) {
719 /* find tb(n) in circular list */
720 for(;;) {
721 tb1 = *ptb;
722 n1 = (long)tb1 & 3;
723 tb1 = (TranslationBlock *)((long)tb1 & ~3);
724 if (n1 == n && tb1 == tb)
725 break;
726 if (n1 == 2) {
727 ptb = &tb1->jmp_first;
728 } else {
729 ptb = &tb1->jmp_next[n1];
732 /* now we can suppress tb(n) from the list */
733 *ptb = tb->jmp_next[n];
735 tb->jmp_next[n] = NULL;
739 /* reset the jump entry 'n' of a TB so that it is not chained to
740 another TB */
741 static inline void tb_reset_jump(TranslationBlock *tb, int n)
743 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
746 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
748 CPUState *env;
749 PageDesc *p;
750 unsigned int h, n1;
751 target_phys_addr_t phys_pc;
752 TranslationBlock *tb1, *tb2;
754 /* remove the TB from the hash list */
755 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
756 h = tb_phys_hash_func(phys_pc);
757 tb_remove(&tb_phys_hash[h], tb,
758 offsetof(TranslationBlock, phys_hash_next));
760 /* remove the TB from the page list */
761 if (tb->page_addr[0] != page_addr) {
762 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
763 tb_page_remove(&p->first_tb, tb);
764 invalidate_page_bitmap(p);
766 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
767 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
768 tb_page_remove(&p->first_tb, tb);
769 invalidate_page_bitmap(p);
772 tb_invalidated_flag = 1;
774 /* remove the TB from the hash list */
775 h = tb_jmp_cache_hash_func(tb->pc);
776 for(env = first_cpu; env != NULL; env = env->next_cpu) {
777 if (env->tb_jmp_cache[h] == tb)
778 env->tb_jmp_cache[h] = NULL;
781 /* suppress this TB from the two jump lists */
782 tb_jmp_remove(tb, 0);
783 tb_jmp_remove(tb, 1);
785 /* suppress any remaining jumps to this TB */
786 tb1 = tb->jmp_first;
787 for(;;) {
788 n1 = (long)tb1 & 3;
789 if (n1 == 2)
790 break;
791 tb1 = (TranslationBlock *)((long)tb1 & ~3);
792 tb2 = tb1->jmp_next[n1];
793 tb_reset_jump(tb1, n1);
794 tb1->jmp_next[n1] = NULL;
795 tb1 = tb2;
797 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
799 tb_phys_invalidate_count++;
802 static inline void set_bits(uint8_t *tab, int start, int len)
804 int end, mask, end1;
806 end = start + len;
807 tab += start >> 3;
808 mask = 0xff << (start & 7);
809 if ((start & ~7) == (end & ~7)) {
810 if (start < end) {
811 mask &= ~(0xff << (end & 7));
812 *tab |= mask;
814 } else {
815 *tab++ |= mask;
816 start = (start + 8) & ~7;
817 end1 = end & ~7;
818 while (start < end1) {
819 *tab++ = 0xff;
820 start += 8;
822 if (start < end) {
823 mask = ~(0xff << (end & 7));
824 *tab |= mask;
829 static void build_page_bitmap(PageDesc *p)
831 int n, tb_start, tb_end;
832 TranslationBlock *tb;
834 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
835 if (!p->code_bitmap)
836 return;
838 tb = p->first_tb;
839 while (tb != NULL) {
840 n = (long)tb & 3;
841 tb = (TranslationBlock *)((long)tb & ~3);
842 /* NOTE: this is subtle as a TB may span two physical pages */
843 if (n == 0) {
844 /* NOTE: tb_end may be after the end of the page, but
845 it is not a problem */
846 tb_start = tb->pc & ~TARGET_PAGE_MASK;
847 tb_end = tb_start + tb->size;
848 if (tb_end > TARGET_PAGE_SIZE)
849 tb_end = TARGET_PAGE_SIZE;
850 } else {
851 tb_start = 0;
852 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
854 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
855 tb = tb->page_next[n];
859 TranslationBlock *tb_gen_code(CPUState *env,
860 target_ulong pc, target_ulong cs_base,
861 int flags, int cflags)
863 TranslationBlock *tb;
864 uint8_t *tc_ptr;
865 target_ulong phys_pc, phys_page2, virt_page2;
866 int code_gen_size;
868 phys_pc = get_phys_addr_code(env, pc);
869 tb = tb_alloc(pc);
870 if (!tb) {
871 /* flush must be done */
872 tb_flush(env);
873 /* cannot fail at this point */
874 tb = tb_alloc(pc);
875 /* Don't forget to invalidate previous TB info. */
876 tb_invalidated_flag = 1;
878 tc_ptr = code_gen_ptr;
879 tb->tc_ptr = tc_ptr;
880 tb->cs_base = cs_base;
881 tb->flags = flags;
882 tb->cflags = cflags;
883 cpu_gen_code(env, tb, &code_gen_size);
884 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
886 /* check next page if needed */
887 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
888 phys_page2 = -1;
889 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
890 phys_page2 = get_phys_addr_code(env, virt_page2);
892 tb_link_phys(tb, phys_pc, phys_page2);
893 return tb;
896 /* invalidate all TBs which intersect with the target physical page
897 starting in range [start;end[. NOTE: start and end must refer to
898 the same physical page. 'is_cpu_write_access' should be true if called
899 from a real cpu write access: the virtual CPU will exit the current
900 TB if code is modified inside this TB. */
901 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
902 int is_cpu_write_access)
904 int n, current_tb_modified, current_tb_not_found, current_flags;
905 CPUState *env = cpu_single_env;
906 PageDesc *p;
907 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
908 target_ulong tb_start, tb_end;
909 target_ulong current_pc, current_cs_base;
911 p = page_find(start >> TARGET_PAGE_BITS);
912 if (!p)
913 return;
914 if (!p->code_bitmap &&
915 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
916 is_cpu_write_access) {
917 /* build code bitmap */
918 build_page_bitmap(p);
921 /* we remove all the TBs in the range [start, end[ */
922 /* XXX: see if in some cases it could be faster to invalidate all the code */
923 current_tb_not_found = is_cpu_write_access;
924 current_tb_modified = 0;
925 current_tb = NULL; /* avoid warning */
926 current_pc = 0; /* avoid warning */
927 current_cs_base = 0; /* avoid warning */
928 current_flags = 0; /* avoid warning */
929 tb = p->first_tb;
930 while (tb != NULL) {
931 n = (long)tb & 3;
932 tb = (TranslationBlock *)((long)tb & ~3);
933 tb_next = tb->page_next[n];
934 /* NOTE: this is subtle as a TB may span two physical pages */
935 if (n == 0) {
936 /* NOTE: tb_end may be after the end of the page, but
937 it is not a problem */
938 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
939 tb_end = tb_start + tb->size;
940 } else {
941 tb_start = tb->page_addr[1];
942 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
944 if (!(tb_end <= start || tb_start >= end)) {
945 #ifdef TARGET_HAS_PRECISE_SMC
946 if (current_tb_not_found) {
947 current_tb_not_found = 0;
948 current_tb = NULL;
949 if (env->mem_io_pc) {
950 /* now we have a real cpu fault */
951 current_tb = tb_find_pc(env->mem_io_pc);
954 if (current_tb == tb &&
955 (current_tb->cflags & CF_COUNT_MASK) != 1) {
956 /* If we are modifying the current TB, we must stop
957 its execution. We could be more precise by checking
958 that the modification is after the current PC, but it
959 would require a specialized function to partially
960 restore the CPU state */
962 current_tb_modified = 1;
963 cpu_restore_state(current_tb, env,
964 env->mem_io_pc, NULL);
965 #if defined(TARGET_I386)
966 current_flags = env->hflags;
967 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
968 current_cs_base = (target_ulong)env->segs[R_CS].base;
969 current_pc = current_cs_base + env->eip;
970 #else
971 #error unsupported CPU
972 #endif
974 #endif /* TARGET_HAS_PRECISE_SMC */
975 /* we need to do that to handle the case where a signal
976 occurs while doing tb_phys_invalidate() */
977 saved_tb = NULL;
978 if (env) {
979 saved_tb = env->current_tb;
980 env->current_tb = NULL;
982 tb_phys_invalidate(tb, -1);
983 if (env) {
984 env->current_tb = saved_tb;
985 if (env->interrupt_request && env->current_tb)
986 cpu_interrupt(env, env->interrupt_request);
989 tb = tb_next;
991 #if !defined(CONFIG_USER_ONLY)
992 /* if no code remaining, no need to continue to use slow writes */
993 if (!p->first_tb) {
994 invalidate_page_bitmap(p);
995 if (is_cpu_write_access) {
996 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
999 #endif
1000 #ifdef TARGET_HAS_PRECISE_SMC
1001 if (current_tb_modified) {
1002 /* we generate a block containing just the instruction
1003 modifying the memory. It will ensure that it cannot modify
1004 itself */
1005 env->current_tb = NULL;
1006 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1007 cpu_resume_from_signal(env, NULL);
1009 #endif
1012 /* len must be <= 8 and start must be a multiple of len */
1013 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1015 PageDesc *p;
1016 int offset, b;
1017 #if 0
1018 if (1) {
1019 if (loglevel) {
1020 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1021 cpu_single_env->mem_io_vaddr, len,
1022 cpu_single_env->eip,
1023 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1026 #endif
1027 p = page_find(start >> TARGET_PAGE_BITS);
1028 if (!p)
1029 return;
1030 if (p->code_bitmap) {
1031 offset = start & ~TARGET_PAGE_MASK;
1032 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1033 if (b & ((1 << len) - 1))
1034 goto do_invalidate;
1035 } else {
1036 do_invalidate:
1037 tb_invalidate_phys_page_range(start, start + len, 1);
1041 #if !defined(CONFIG_SOFTMMU)
1042 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1043 unsigned long pc, void *puc)
1045 int n, current_flags, current_tb_modified;
1046 target_ulong current_pc, current_cs_base;
1047 PageDesc *p;
1048 TranslationBlock *tb, *current_tb;
1049 #ifdef TARGET_HAS_PRECISE_SMC
1050 CPUState *env = cpu_single_env;
1051 #endif
1053 addr &= TARGET_PAGE_MASK;
1054 p = page_find(addr >> TARGET_PAGE_BITS);
1055 if (!p)
1056 return;
1057 tb = p->first_tb;
1058 current_tb_modified = 0;
1059 current_tb = NULL;
1060 current_pc = 0; /* avoid warning */
1061 current_cs_base = 0; /* avoid warning */
1062 current_flags = 0; /* avoid warning */
1063 #ifdef TARGET_HAS_PRECISE_SMC
1064 if (tb && pc != 0) {
1065 current_tb = tb_find_pc(pc);
1067 #endif
1068 while (tb != NULL) {
1069 n = (long)tb & 3;
1070 tb = (TranslationBlock *)((long)tb & ~3);
1071 #ifdef TARGET_HAS_PRECISE_SMC
1072 if (current_tb == tb &&
1073 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1074 /* If we are modifying the current TB, we must stop
1075 its execution. We could be more precise by checking
1076 that the modification is after the current PC, but it
1077 would require a specialized function to partially
1078 restore the CPU state */
1080 current_tb_modified = 1;
1081 cpu_restore_state(current_tb, env, pc, puc);
1082 #if defined(TARGET_I386)
1083 current_flags = env->hflags;
1084 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1085 current_cs_base = (target_ulong)env->segs[R_CS].base;
1086 current_pc = current_cs_base + env->eip;
1087 #else
1088 #error unsupported CPU
1089 #endif
1091 #endif /* TARGET_HAS_PRECISE_SMC */
1092 tb_phys_invalidate(tb, addr);
1093 tb = tb->page_next[n];
1095 p->first_tb = NULL;
1096 #ifdef TARGET_HAS_PRECISE_SMC
1097 if (current_tb_modified) {
1098 /* we generate a block containing just the instruction
1099 modifying the memory. It will ensure that it cannot modify
1100 itself */
1101 env->current_tb = NULL;
1102 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1103 cpu_resume_from_signal(env, puc);
1105 #endif
1107 #endif
1109 /* add the tb in the target page and protect it if necessary */
1110 static inline void tb_alloc_page(TranslationBlock *tb,
1111 unsigned int n, target_ulong page_addr)
1113 PageDesc *p;
1114 TranslationBlock *last_first_tb;
1116 tb->page_addr[n] = page_addr;
1117 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1118 tb->page_next[n] = p->first_tb;
1119 last_first_tb = p->first_tb;
1120 p->first_tb = (TranslationBlock *)((long)tb | n);
1121 invalidate_page_bitmap(p);
1123 #if defined(TARGET_HAS_SMC) || 1
1125 #if defined(CONFIG_USER_ONLY)
1126 if (p->flags & PAGE_WRITE) {
1127 target_ulong addr;
1128 PageDesc *p2;
1129 int prot;
1131 /* force the host page as non writable (writes will have a
1132 page fault + mprotect overhead) */
1133 page_addr &= qemu_host_page_mask;
1134 prot = 0;
1135 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1136 addr += TARGET_PAGE_SIZE) {
1138 p2 = page_find (addr >> TARGET_PAGE_BITS);
1139 if (!p2)
1140 continue;
1141 prot |= p2->flags;
1142 p2->flags &= ~PAGE_WRITE;
1143 page_get_flags(addr);
1145 mprotect(g2h(page_addr), qemu_host_page_size,
1146 (prot & PAGE_BITS) & ~PAGE_WRITE);
1147 #ifdef DEBUG_TB_INVALIDATE
1148 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1149 page_addr);
1150 #endif
1152 #else
1153 /* if some code is already present, then the pages are already
1154 protected. So we handle the case where only the first TB is
1155 allocated in a physical page */
1156 if (!last_first_tb) {
1157 tlb_protect_code(page_addr);
1159 #endif
1161 #endif /* TARGET_HAS_SMC */
1164 /* Allocate a new translation block. Flush the translation buffer if
1165 too many translation blocks or too much generated code. */
1166 TranslationBlock *tb_alloc(target_ulong pc)
1168 TranslationBlock *tb;
1170 if (nb_tbs >= code_gen_max_blocks ||
1171 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1172 return NULL;
1173 tb = &tbs[nb_tbs++];
1174 tb->pc = pc;
1175 tb->cflags = 0;
1176 return tb;
1179 void tb_free(TranslationBlock *tb)
1181 /* In practice this is mostly used for single use temporary TB
1182 Ignore the hard cases and just back up if this TB happens to
1183 be the last one generated. */
1184 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1185 code_gen_ptr = tb->tc_ptr;
1186 nb_tbs--;
1190 /* add a new TB and link it to the physical page tables. phys_page2 is
1191 (-1) to indicate that only one page contains the TB. */
1192 void tb_link_phys(TranslationBlock *tb,
1193 target_ulong phys_pc, target_ulong phys_page2)
1195 unsigned int h;
1196 TranslationBlock **ptb;
1198 /* Grab the mmap lock to stop another thread invalidating this TB
1199 before we are done. */
1200 mmap_lock();
1201 /* add in the physical hash table */
1202 h = tb_phys_hash_func(phys_pc);
1203 ptb = &tb_phys_hash[h];
1204 tb->phys_hash_next = *ptb;
1205 *ptb = tb;
1207 /* add in the page list */
1208 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1209 if (phys_page2 != -1)
1210 tb_alloc_page(tb, 1, phys_page2);
1211 else
1212 tb->page_addr[1] = -1;
1214 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1215 tb->jmp_next[0] = NULL;
1216 tb->jmp_next[1] = NULL;
1218 /* init original jump addresses */
1219 if (tb->tb_next_offset[0] != 0xffff)
1220 tb_reset_jump(tb, 0);
1221 if (tb->tb_next_offset[1] != 0xffff)
1222 tb_reset_jump(tb, 1);
1224 #ifdef DEBUG_TB_CHECK
1225 tb_page_check();
1226 #endif
1227 mmap_unlock();
1230 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1231 tb[1].tc_ptr. Return NULL if not found */
1232 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1234 int m_min, m_max, m;
1235 unsigned long v;
1236 TranslationBlock *tb;
1238 if (nb_tbs <= 0)
1239 return NULL;
1240 if (tc_ptr < (unsigned long)code_gen_buffer ||
1241 tc_ptr >= (unsigned long)code_gen_ptr)
1242 return NULL;
1243 /* binary search (cf Knuth) */
1244 m_min = 0;
1245 m_max = nb_tbs - 1;
1246 while (m_min <= m_max) {
1247 m = (m_min + m_max) >> 1;
1248 tb = &tbs[m];
1249 v = (unsigned long)tb->tc_ptr;
1250 if (v == tc_ptr)
1251 return tb;
1252 else if (tc_ptr < v) {
1253 m_max = m - 1;
1254 } else {
1255 m_min = m + 1;
1258 return &tbs[m_max];
1261 static void tb_reset_jump_recursive(TranslationBlock *tb);
1263 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1265 TranslationBlock *tb1, *tb_next, **ptb;
1266 unsigned int n1;
1268 tb1 = tb->jmp_next[n];
1269 if (tb1 != NULL) {
1270 /* find head of list */
1271 for(;;) {
1272 n1 = (long)tb1 & 3;
1273 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1274 if (n1 == 2)
1275 break;
1276 tb1 = tb1->jmp_next[n1];
1278 /* we are now sure now that tb jumps to tb1 */
1279 tb_next = tb1;
1281 /* remove tb from the jmp_first list */
1282 ptb = &tb_next->jmp_first;
1283 for(;;) {
1284 tb1 = *ptb;
1285 n1 = (long)tb1 & 3;
1286 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1287 if (n1 == n && tb1 == tb)
1288 break;
1289 ptb = &tb1->jmp_next[n1];
1291 *ptb = tb->jmp_next[n];
1292 tb->jmp_next[n] = NULL;
1294 /* suppress the jump to next tb in generated code */
1295 tb_reset_jump(tb, n);
1297 /* suppress jumps in the tb on which we could have jumped */
1298 tb_reset_jump_recursive(tb_next);
1302 static void tb_reset_jump_recursive(TranslationBlock *tb)
1304 tb_reset_jump_recursive2(tb, 0);
1305 tb_reset_jump_recursive2(tb, 1);
1308 #if defined(TARGET_HAS_ICE)
1309 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1311 target_phys_addr_t addr;
1312 target_ulong pd;
1313 ram_addr_t ram_addr;
1314 PhysPageDesc *p;
1316 addr = cpu_get_phys_page_debug(env, pc);
1317 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1318 if (!p) {
1319 pd = IO_MEM_UNASSIGNED;
1320 } else {
1321 pd = p->phys_offset;
1323 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1324 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1326 #endif
1328 /* Add a watchpoint. */
1329 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1331 int i;
1333 for (i = 0; i < env->nb_watchpoints; i++) {
1334 if (addr == env->watchpoint[i].vaddr)
1335 return 0;
1337 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1338 return -1;
1340 i = env->nb_watchpoints++;
1341 env->watchpoint[i].vaddr = addr;
1342 env->watchpoint[i].type = type;
1343 tlb_flush_page(env, addr);
1344 /* FIXME: This flush is needed because of the hack to make memory ops
1345 terminate the TB. It can be removed once the proper IO trap and
1346 re-execute bits are in. */
1347 tb_flush(env);
1348 return i;
1351 /* Remove a watchpoint. */
1352 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1354 int i;
1356 for (i = 0; i < env->nb_watchpoints; i++) {
1357 if (addr == env->watchpoint[i].vaddr) {
1358 env->nb_watchpoints--;
1359 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1360 tlb_flush_page(env, addr);
1361 return 0;
1364 return -1;
1367 /* Remove all watchpoints. */
1368 void cpu_watchpoint_remove_all(CPUState *env) {
1369 int i;
1371 for (i = 0; i < env->nb_watchpoints; i++) {
1372 tlb_flush_page(env, env->watchpoint[i].vaddr);
1374 env->nb_watchpoints = 0;
1377 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1378 breakpoint is reached */
1379 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1381 #if defined(TARGET_HAS_ICE)
1382 int i;
1384 for(i = 0; i < env->nb_breakpoints; i++) {
1385 if (env->breakpoints[i] == pc)
1386 return 0;
1389 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1390 return -1;
1391 env->breakpoints[env->nb_breakpoints++] = pc;
1393 if (kvm_enabled())
1394 kvm_update_debugger(env);
1396 breakpoint_invalidate(env, pc);
1397 return 0;
1398 #else
1399 return -1;
1400 #endif
1403 /* remove all breakpoints */
1404 void cpu_breakpoint_remove_all(CPUState *env) {
1405 #if defined(TARGET_HAS_ICE)
1406 int i;
1407 for(i = 0; i < env->nb_breakpoints; i++) {
1408 breakpoint_invalidate(env, env->breakpoints[i]);
1410 env->nb_breakpoints = 0;
1411 #endif
1414 /* remove a breakpoint */
1415 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1417 #if defined(TARGET_HAS_ICE)
1418 int i;
1419 for(i = 0; i < env->nb_breakpoints; i++) {
1420 if (env->breakpoints[i] == pc)
1421 goto found;
1423 return -1;
1424 found:
1425 env->nb_breakpoints--;
1426 if (i < env->nb_breakpoints)
1427 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1429 if (kvm_enabled())
1430 kvm_update_debugger(env);
1432 breakpoint_invalidate(env, pc);
1433 return 0;
1434 #else
1435 return -1;
1436 #endif
1439 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1440 CPU loop after each instruction */
1441 void cpu_single_step(CPUState *env, int enabled)
1443 #if defined(TARGET_HAS_ICE)
1444 if (env->singlestep_enabled != enabled) {
1445 env->singlestep_enabled = enabled;
1446 /* must flush all the translated code to avoid inconsistancies */
1447 /* XXX: only flush what is necessary */
1448 tb_flush(env);
1450 if (kvm_enabled())
1451 kvm_update_debugger(env);
1452 #endif
1455 /* enable or disable low levels log */
1456 void cpu_set_log(int log_flags)
1458 loglevel = log_flags;
1459 if (loglevel && !logfile) {
1460 logfile = fopen(logfilename, log_append ? "a" : "w");
1461 if (!logfile) {
1462 perror(logfilename);
1463 _exit(1);
1465 #if !defined(CONFIG_SOFTMMU)
1466 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1468 static char logfile_buf[4096];
1469 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1471 #else
1472 setvbuf(logfile, NULL, _IOLBF, 0);
1473 #endif
1474 log_append = 1;
1476 if (!loglevel && logfile) {
1477 fclose(logfile);
1478 logfile = NULL;
1482 void cpu_set_log_filename(const char *filename)
1484 logfilename = strdup(filename);
1485 if (logfile) {
1486 fclose(logfile);
1487 logfile = NULL;
1489 cpu_set_log(loglevel);
1492 /* mask must never be zero, except for A20 change call */
1493 void cpu_interrupt(CPUState *env, int mask)
1495 #if !defined(USE_NPTL)
1496 TranslationBlock *tb;
1497 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1498 #endif
1499 int old_mask;
1501 old_mask = env->interrupt_request;
1502 /* FIXME: This is probably not threadsafe. A different thread could
1503 be in the middle of a read-modify-write operation. */
1504 env->interrupt_request |= mask;
1505 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1506 kvm_update_interrupt_request(env);
1507 #if defined(USE_NPTL)
1508 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1509 problem and hope the cpu will stop of its own accord. For userspace
1510 emulation this often isn't actually as bad as it sounds. Often
1511 signals are used primarily to interrupt blocking syscalls. */
1512 #else
1513 if (use_icount) {
1514 env->icount_decr.u16.high = 0xffff;
1515 #ifndef CONFIG_USER_ONLY
1516 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1517 an async event happened and we need to process it. */
1518 if (!can_do_io(env)
1519 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1520 cpu_abort(env, "Raised interrupt while not in I/O function");
1522 #endif
1523 } else {
1524 tb = env->current_tb;
1525 /* if the cpu is currently executing code, we must unlink it and
1526 all the potentially executing TB */
1527 if (tb && !testandset(&interrupt_lock)) {
1528 env->current_tb = NULL;
1529 tb_reset_jump_recursive(tb);
1530 resetlock(&interrupt_lock);
1533 #endif
1536 void cpu_reset_interrupt(CPUState *env, int mask)
1538 env->interrupt_request &= ~mask;
1541 CPULogItem cpu_log_items[] = {
1542 { CPU_LOG_TB_OUT_ASM, "out_asm",
1543 "show generated host assembly code for each compiled TB" },
1544 { CPU_LOG_TB_IN_ASM, "in_asm",
1545 "show target assembly code for each compiled TB" },
1546 { CPU_LOG_TB_OP, "op",
1547 "show micro ops for each compiled TB" },
1548 { CPU_LOG_TB_OP_OPT, "op_opt",
1549 "show micro ops "
1550 #ifdef TARGET_I386
1551 "before eflags optimization and "
1552 #endif
1553 "after liveness analysis" },
1554 { CPU_LOG_INT, "int",
1555 "show interrupts/exceptions in short format" },
1556 { CPU_LOG_EXEC, "exec",
1557 "show trace before each executed TB (lots of logs)" },
1558 { CPU_LOG_TB_CPU, "cpu",
1559 "show CPU state before block translation" },
1560 #ifdef TARGET_I386
1561 { CPU_LOG_PCALL, "pcall",
1562 "show protected mode far calls/returns/exceptions" },
1563 #endif
1564 #ifdef DEBUG_IOPORT
1565 { CPU_LOG_IOPORT, "ioport",
1566 "show all i/o ports accesses" },
1567 #endif
1568 { 0, NULL, NULL },
1571 static int cmp1(const char *s1, int n, const char *s2)
1573 if (strlen(s2) != n)
1574 return 0;
1575 return memcmp(s1, s2, n) == 0;
1578 /* takes a comma separated list of log masks. Return 0 if error. */
1579 int cpu_str_to_log_mask(const char *str)
1581 CPULogItem *item;
1582 int mask;
1583 const char *p, *p1;
1585 p = str;
1586 mask = 0;
1587 for(;;) {
1588 p1 = strchr(p, ',');
1589 if (!p1)
1590 p1 = p + strlen(p);
1591 if(cmp1(p,p1-p,"all")) {
1592 for(item = cpu_log_items; item->mask != 0; item++) {
1593 mask |= item->mask;
1595 } else {
1596 for(item = cpu_log_items; item->mask != 0; item++) {
1597 if (cmp1(p, p1 - p, item->name))
1598 goto found;
1600 return 0;
1602 found:
1603 mask |= item->mask;
1604 if (*p1 != ',')
1605 break;
1606 p = p1 + 1;
1608 return mask;
1611 void cpu_abort(CPUState *env, const char *fmt, ...)
1613 va_list ap;
1614 va_list ap2;
1616 va_start(ap, fmt);
1617 va_copy(ap2, ap);
1618 fprintf(stderr, "qemu: fatal: ");
1619 vfprintf(stderr, fmt, ap);
1620 fprintf(stderr, "\n");
1621 #ifdef TARGET_I386
1622 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1623 #else
1624 cpu_dump_state(env, stderr, fprintf, 0);
1625 #endif
1626 if (logfile) {
1627 fprintf(logfile, "qemu: fatal: ");
1628 vfprintf(logfile, fmt, ap2);
1629 fprintf(logfile, "\n");
1630 #ifdef TARGET_I386
1631 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1632 #else
1633 cpu_dump_state(env, logfile, fprintf, 0);
1634 #endif
1635 fflush(logfile);
1636 fclose(logfile);
1638 va_end(ap2);
1639 va_end(ap);
1640 abort();
1643 CPUState *cpu_copy(CPUState *env)
1645 CPUState *new_env = cpu_init(env->cpu_model_str);
1646 /* preserve chaining and index */
1647 CPUState *next_cpu = new_env->next_cpu;
1648 int cpu_index = new_env->cpu_index;
1649 memcpy(new_env, env, sizeof(CPUState));
1650 new_env->next_cpu = next_cpu;
1651 new_env->cpu_index = cpu_index;
1652 return new_env;
1655 #if !defined(CONFIG_USER_ONLY)
1657 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1659 unsigned int i;
1661 /* Discard jump cache entries for any tb which might potentially
1662 overlap the flushed page. */
1663 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1664 memset (&env->tb_jmp_cache[i], 0,
1665 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1667 i = tb_jmp_cache_hash_page(addr);
1668 memset (&env->tb_jmp_cache[i], 0,
1669 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1672 /* NOTE: if flush_global is true, also flush global entries (not
1673 implemented yet) */
1674 void tlb_flush(CPUState *env, int flush_global)
1676 int i;
1678 #if defined(DEBUG_TLB)
1679 printf("tlb_flush:\n");
1680 #endif
1681 /* must reset current TB so that interrupts cannot modify the
1682 links while we are modifying them */
1683 env->current_tb = NULL;
1685 for(i = 0; i < CPU_TLB_SIZE; i++) {
1686 env->tlb_table[0][i].addr_read = -1;
1687 env->tlb_table[0][i].addr_write = -1;
1688 env->tlb_table[0][i].addr_code = -1;
1689 env->tlb_table[1][i].addr_read = -1;
1690 env->tlb_table[1][i].addr_write = -1;
1691 env->tlb_table[1][i].addr_code = -1;
1692 #if (NB_MMU_MODES >= 3)
1693 env->tlb_table[2][i].addr_read = -1;
1694 env->tlb_table[2][i].addr_write = -1;
1695 env->tlb_table[2][i].addr_code = -1;
1696 #if (NB_MMU_MODES == 4)
1697 env->tlb_table[3][i].addr_read = -1;
1698 env->tlb_table[3][i].addr_write = -1;
1699 env->tlb_table[3][i].addr_code = -1;
1700 #endif
1701 #endif
1704 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1706 #ifdef USE_KQEMU
1707 if (env->kqemu_enabled) {
1708 kqemu_flush(env, flush_global);
1710 #endif
1711 tlb_flush_count++;
1714 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1716 if (addr == (tlb_entry->addr_read &
1717 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1718 addr == (tlb_entry->addr_write &
1719 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1720 addr == (tlb_entry->addr_code &
1721 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1722 tlb_entry->addr_read = -1;
1723 tlb_entry->addr_write = -1;
1724 tlb_entry->addr_code = -1;
1728 void tlb_flush_page(CPUState *env, target_ulong addr)
1730 int i;
1732 #if defined(DEBUG_TLB)
1733 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1734 #endif
1735 /* must reset current TB so that interrupts cannot modify the
1736 links while we are modifying them */
1737 env->current_tb = NULL;
1739 addr &= TARGET_PAGE_MASK;
1740 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1741 tlb_flush_entry(&env->tlb_table[0][i], addr);
1742 tlb_flush_entry(&env->tlb_table[1][i], addr);
1743 #if (NB_MMU_MODES >= 3)
1744 tlb_flush_entry(&env->tlb_table[2][i], addr);
1745 #if (NB_MMU_MODES == 4)
1746 tlb_flush_entry(&env->tlb_table[3][i], addr);
1747 #endif
1748 #endif
1750 tlb_flush_jmp_cache(env, addr);
1752 #ifdef USE_KQEMU
1753 if (env->kqemu_enabled) {
1754 kqemu_flush_page(env, addr);
1756 #endif
1759 /* update the TLBs so that writes to code in the virtual page 'addr'
1760 can be detected */
1761 static void tlb_protect_code(ram_addr_t ram_addr)
1763 cpu_physical_memory_reset_dirty(ram_addr,
1764 ram_addr + TARGET_PAGE_SIZE,
1765 CODE_DIRTY_FLAG);
1768 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1769 tested for self modifying code */
1770 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1771 target_ulong vaddr)
1773 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1776 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1777 unsigned long start, unsigned long length)
1779 unsigned long addr;
1780 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1781 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1782 if ((addr - start) < length) {
1783 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1788 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1789 int dirty_flags)
1791 CPUState *env;
1792 unsigned long length, start1;
1793 int i, mask, len;
1794 uint8_t *p;
1796 start &= TARGET_PAGE_MASK;
1797 end = TARGET_PAGE_ALIGN(end);
1799 length = end - start;
1800 if (length == 0)
1801 return;
1802 len = length >> TARGET_PAGE_BITS;
1803 #ifdef USE_KQEMU
1804 /* XXX: should not depend on cpu context */
1805 env = first_cpu;
1806 if (env->kqemu_enabled) {
1807 ram_addr_t addr;
1808 addr = start;
1809 for(i = 0; i < len; i++) {
1810 kqemu_set_notdirty(env, addr);
1811 addr += TARGET_PAGE_SIZE;
1814 #endif
1815 mask = ~dirty_flags;
1816 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1817 for(i = 0; i < len; i++)
1818 p[i] &= mask;
1820 /* we modify the TLB cache so that the dirty bit will be set again
1821 when accessing the range */
1822 start1 = start + (unsigned long)phys_ram_base;
1823 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1824 for(i = 0; i < CPU_TLB_SIZE; i++)
1825 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1826 for(i = 0; i < CPU_TLB_SIZE; i++)
1827 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1828 #if (NB_MMU_MODES >= 3)
1829 for(i = 0; i < CPU_TLB_SIZE; i++)
1830 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1831 #if (NB_MMU_MODES == 4)
1832 for(i = 0; i < CPU_TLB_SIZE; i++)
1833 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1834 #endif
1835 #endif
1839 int cpu_physical_memory_set_dirty_tracking(int enable)
1841 int r=0;
1843 if (kvm_enabled())
1844 r = kvm_physical_memory_set_dirty_tracking(enable);
1845 in_migration = enable;
1846 return r;
1849 int cpu_physical_memory_get_dirty_tracking(void)
1851 return in_migration;
1854 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1856 ram_addr_t ram_addr;
1858 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1859 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1860 tlb_entry->addend - (unsigned long)phys_ram_base;
1861 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1862 tlb_entry->addr_write |= TLB_NOTDIRTY;
1867 /* update the TLB according to the current state of the dirty bits */
1868 void cpu_tlb_update_dirty(CPUState *env)
1870 int i;
1871 for(i = 0; i < CPU_TLB_SIZE; i++)
1872 tlb_update_dirty(&env->tlb_table[0][i]);
1873 for(i = 0; i < CPU_TLB_SIZE; i++)
1874 tlb_update_dirty(&env->tlb_table[1][i]);
1875 #if (NB_MMU_MODES >= 3)
1876 for(i = 0; i < CPU_TLB_SIZE; i++)
1877 tlb_update_dirty(&env->tlb_table[2][i]);
1878 #if (NB_MMU_MODES == 4)
1879 for(i = 0; i < CPU_TLB_SIZE; i++)
1880 tlb_update_dirty(&env->tlb_table[3][i]);
1881 #endif
1882 #endif
1885 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1887 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1888 tlb_entry->addr_write = vaddr;
1891 /* update the TLB corresponding to virtual page vaddr
1892 so that it is no longer dirty */
1893 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1895 int i;
1897 vaddr &= TARGET_PAGE_MASK;
1898 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1899 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1900 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1901 #if (NB_MMU_MODES >= 3)
1902 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1903 #if (NB_MMU_MODES == 4)
1904 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1905 #endif
1906 #endif
1909 /* add a new TLB entry. At most one entry for a given virtual address
1910 is permitted. Return 0 if OK or 2 if the page could not be mapped
1911 (can only happen in non SOFTMMU mode for I/O pages or pages
1912 conflicting with the host address space). */
1913 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1914 target_phys_addr_t paddr, int prot,
1915 int mmu_idx, int is_softmmu)
1917 PhysPageDesc *p;
1918 unsigned long pd;
1919 unsigned int index;
1920 target_ulong address;
1921 target_ulong code_address;
1922 target_phys_addr_t addend;
1923 int ret;
1924 CPUTLBEntry *te;
1925 int i;
1926 target_phys_addr_t iotlb;
1928 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1929 if (!p) {
1930 pd = IO_MEM_UNASSIGNED;
1931 } else {
1932 pd = p->phys_offset;
1934 #if defined(DEBUG_TLB)
1935 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1936 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1937 #endif
1939 ret = 0;
1940 address = vaddr;
1941 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1942 /* IO memory case (romd handled later) */
1943 address |= TLB_MMIO;
1945 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1946 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1947 /* Normal RAM. */
1948 iotlb = pd & TARGET_PAGE_MASK;
1949 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1950 iotlb |= IO_MEM_NOTDIRTY;
1951 else
1952 iotlb |= IO_MEM_ROM;
1953 } else {
1954 /* IO handlers are currently passed a phsical address.
1955 It would be nice to pass an offset from the base address
1956 of that region. This would avoid having to special case RAM,
1957 and avoid full address decoding in every device.
1958 We can't use the high bits of pd for this because
1959 IO_MEM_ROMD uses these as a ram address. */
1960 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1963 code_address = address;
1964 /* Make accesses to pages with watchpoints go via the
1965 watchpoint trap routines. */
1966 for (i = 0; i < env->nb_watchpoints; i++) {
1967 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1968 iotlb = io_mem_watch + paddr;
1969 /* TODO: The memory case can be optimized by not trapping
1970 reads of pages with a write breakpoint. */
1971 address |= TLB_MMIO;
1975 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1976 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1977 te = &env->tlb_table[mmu_idx][index];
1978 te->addend = addend - vaddr;
1979 if (prot & PAGE_READ) {
1980 te->addr_read = address;
1981 } else {
1982 te->addr_read = -1;
1985 if (prot & PAGE_EXEC) {
1986 te->addr_code = code_address;
1987 } else {
1988 te->addr_code = -1;
1990 if (prot & PAGE_WRITE) {
1991 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1992 (pd & IO_MEM_ROMD)) {
1993 /* Write access calls the I/O callback. */
1994 te->addr_write = address | TLB_MMIO;
1995 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1996 !cpu_physical_memory_is_dirty(pd)) {
1997 te->addr_write = address | TLB_NOTDIRTY;
1998 } else {
1999 te->addr_write = address;
2001 } else {
2002 te->addr_write = -1;
2004 return ret;
2007 #else
2009 void tlb_flush(CPUState *env, int flush_global)
2013 void tlb_flush_page(CPUState *env, target_ulong addr)
2017 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2018 target_phys_addr_t paddr, int prot,
2019 int mmu_idx, int is_softmmu)
2021 return 0;
2024 /* dump memory mappings */
2025 void page_dump(FILE *f)
2027 unsigned long start, end;
2028 int i, j, prot, prot1;
2029 PageDesc *p;
2031 fprintf(f, "%-8s %-8s %-8s %s\n",
2032 "start", "end", "size", "prot");
2033 start = -1;
2034 end = -1;
2035 prot = 0;
2036 for(i = 0; i <= L1_SIZE; i++) {
2037 if (i < L1_SIZE)
2038 p = l1_map[i];
2039 else
2040 p = NULL;
2041 for(j = 0;j < L2_SIZE; j++) {
2042 if (!p)
2043 prot1 = 0;
2044 else
2045 prot1 = p[j].flags;
2046 if (prot1 != prot) {
2047 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2048 if (start != -1) {
2049 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2050 start, end, end - start,
2051 prot & PAGE_READ ? 'r' : '-',
2052 prot & PAGE_WRITE ? 'w' : '-',
2053 prot & PAGE_EXEC ? 'x' : '-');
2055 if (prot1 != 0)
2056 start = end;
2057 else
2058 start = -1;
2059 prot = prot1;
2061 if (!p)
2062 break;
2067 int page_get_flags(target_ulong address)
2069 PageDesc *p;
2071 p = page_find(address >> TARGET_PAGE_BITS);
2072 if (!p)
2073 return 0;
2074 return p->flags;
2077 /* modify the flags of a page and invalidate the code if
2078 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2079 depending on PAGE_WRITE */
2080 void page_set_flags(target_ulong start, target_ulong end, int flags)
2082 PageDesc *p;
2083 target_ulong addr;
2085 /* mmap_lock should already be held. */
2086 start = start & TARGET_PAGE_MASK;
2087 end = TARGET_PAGE_ALIGN(end);
2088 if (flags & PAGE_WRITE)
2089 flags |= PAGE_WRITE_ORG;
2090 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2091 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2092 /* We may be called for host regions that are outside guest
2093 address space. */
2094 if (!p)
2095 return;
2096 /* if the write protection is set, then we invalidate the code
2097 inside */
2098 if (!(p->flags & PAGE_WRITE) &&
2099 (flags & PAGE_WRITE) &&
2100 p->first_tb) {
2101 tb_invalidate_phys_page(addr, 0, NULL);
2103 p->flags = flags;
2107 int page_check_range(target_ulong start, target_ulong len, int flags)
2109 PageDesc *p;
2110 target_ulong end;
2111 target_ulong addr;
2113 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2114 start = start & TARGET_PAGE_MASK;
2116 if( end < start )
2117 /* we've wrapped around */
2118 return -1;
2119 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2120 p = page_find(addr >> TARGET_PAGE_BITS);
2121 if( !p )
2122 return -1;
2123 if( !(p->flags & PAGE_VALID) )
2124 return -1;
2126 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2127 return -1;
2128 if (flags & PAGE_WRITE) {
2129 if (!(p->flags & PAGE_WRITE_ORG))
2130 return -1;
2131 /* unprotect the page if it was put read-only because it
2132 contains translated code */
2133 if (!(p->flags & PAGE_WRITE)) {
2134 if (!page_unprotect(addr, 0, NULL))
2135 return -1;
2137 return 0;
2140 return 0;
2143 /* called from signal handler: invalidate the code and unprotect the
2144 page. Return TRUE if the fault was succesfully handled. */
2145 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2147 unsigned int page_index, prot, pindex;
2148 PageDesc *p, *p1;
2149 target_ulong host_start, host_end, addr;
2151 /* Technically this isn't safe inside a signal handler. However we
2152 know this only ever happens in a synchronous SEGV handler, so in
2153 practice it seems to be ok. */
2154 mmap_lock();
2156 host_start = address & qemu_host_page_mask;
2157 page_index = host_start >> TARGET_PAGE_BITS;
2158 p1 = page_find(page_index);
2159 if (!p1) {
2160 mmap_unlock();
2161 return 0;
2163 host_end = host_start + qemu_host_page_size;
2164 p = p1;
2165 prot = 0;
2166 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2167 prot |= p->flags;
2168 p++;
2170 /* if the page was really writable, then we change its
2171 protection back to writable */
2172 if (prot & PAGE_WRITE_ORG) {
2173 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2174 if (!(p1[pindex].flags & PAGE_WRITE)) {
2175 mprotect((void *)g2h(host_start), qemu_host_page_size,
2176 (prot & PAGE_BITS) | PAGE_WRITE);
2177 p1[pindex].flags |= PAGE_WRITE;
2178 /* and since the content will be modified, we must invalidate
2179 the corresponding translated code. */
2180 tb_invalidate_phys_page(address, pc, puc);
2181 #ifdef DEBUG_TB_CHECK
2182 tb_invalidate_check(address);
2183 #endif
2184 mmap_unlock();
2185 return 1;
2188 mmap_unlock();
2189 return 0;
2192 static inline void tlb_set_dirty(CPUState *env,
2193 unsigned long addr, target_ulong vaddr)
2196 #endif /* defined(CONFIG_USER_ONLY) */
2198 #if !defined(CONFIG_USER_ONLY)
2199 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2200 ram_addr_t memory);
2201 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2202 ram_addr_t orig_memory);
2203 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2204 need_subpage) \
2205 do { \
2206 if (addr > start_addr) \
2207 start_addr2 = 0; \
2208 else { \
2209 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2210 if (start_addr2 > 0) \
2211 need_subpage = 1; \
2214 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2215 end_addr2 = TARGET_PAGE_SIZE - 1; \
2216 else { \
2217 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2218 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2219 need_subpage = 1; \
2221 } while (0)
2223 /* register physical memory. 'size' must be a multiple of the target
2224 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2225 io memory page */
2226 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2227 ram_addr_t size,
2228 ram_addr_t phys_offset)
2230 target_phys_addr_t addr, end_addr;
2231 PhysPageDesc *p;
2232 CPUState *env;
2233 ram_addr_t orig_size = size;
2234 void *subpage;
2236 #ifdef USE_KQEMU
2237 /* XXX: should not depend on cpu context */
2238 env = first_cpu;
2239 if (env->kqemu_enabled) {
2240 kqemu_set_phys_mem(start_addr, size, phys_offset);
2242 #endif
2243 #ifdef USE_KVM
2244 if (kvm_enabled())
2245 kvm_cpu_register_physical_memory(start_addr, size, phys_offset);
2246 #endif
2248 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2249 end_addr = start_addr + (target_phys_addr_t)size;
2250 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2251 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2252 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2253 ram_addr_t orig_memory = p->phys_offset;
2254 target_phys_addr_t start_addr2, end_addr2;
2255 int need_subpage = 0;
2257 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2258 need_subpage);
2259 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2260 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2261 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2262 &p->phys_offset, orig_memory);
2263 } else {
2264 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2265 >> IO_MEM_SHIFT];
2267 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2268 } else {
2269 p->phys_offset = phys_offset;
2270 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2271 (phys_offset & IO_MEM_ROMD))
2272 phys_offset += TARGET_PAGE_SIZE;
2274 } else {
2275 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2276 p->phys_offset = phys_offset;
2277 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2278 (phys_offset & IO_MEM_ROMD))
2279 phys_offset += TARGET_PAGE_SIZE;
2280 else {
2281 target_phys_addr_t start_addr2, end_addr2;
2282 int need_subpage = 0;
2284 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2285 end_addr2, need_subpage);
2287 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2288 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2289 &p->phys_offset, IO_MEM_UNASSIGNED);
2290 subpage_register(subpage, start_addr2, end_addr2,
2291 phys_offset);
2297 /* since each CPU stores ram addresses in its TLB cache, we must
2298 reset the modified entries */
2299 /* XXX: slow ! */
2300 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2301 tlb_flush(env, 1);
2305 /* XXX: temporary until new memory mapping API */
2306 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2308 PhysPageDesc *p;
2310 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2311 if (!p)
2312 return IO_MEM_UNASSIGNED;
2313 return p->phys_offset;
2316 /* XXX: better than nothing */
2317 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2319 ram_addr_t addr;
2320 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2321 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2322 (uint64_t)size, (uint64_t)phys_ram_size);
2323 abort();
2325 addr = phys_ram_alloc_offset;
2326 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2327 return addr;
2330 void qemu_ram_free(ram_addr_t addr)
2334 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2336 #ifdef DEBUG_UNASSIGNED
2337 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2338 #endif
2339 #ifdef TARGET_SPARC
2340 do_unassigned_access(addr, 0, 0, 0);
2341 #elif defined(TARGET_CRIS)
2342 do_unassigned_access(addr, 0, 0, 0);
2343 #endif
2344 return 0;
2347 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2349 #ifdef DEBUG_UNASSIGNED
2350 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2351 #endif
2352 #ifdef TARGET_SPARC
2353 do_unassigned_access(addr, 1, 0, 0);
2354 #elif defined(TARGET_CRIS)
2355 do_unassigned_access(addr, 1, 0, 0);
2356 #endif
2359 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2360 unassigned_mem_readb,
2361 unassigned_mem_readb,
2362 unassigned_mem_readb,
2365 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2366 unassigned_mem_writeb,
2367 unassigned_mem_writeb,
2368 unassigned_mem_writeb,
2371 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2372 uint32_t val)
2374 int dirty_flags;
2375 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2376 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2377 #if !defined(CONFIG_USER_ONLY)
2378 tb_invalidate_phys_page_fast(ram_addr, 1);
2379 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2380 #endif
2382 stb_p(phys_ram_base + ram_addr, val);
2383 #ifdef USE_KQEMU
2384 if (cpu_single_env->kqemu_enabled &&
2385 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2386 kqemu_modify_page(cpu_single_env, ram_addr);
2387 #endif
2388 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2389 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2390 /* we remove the notdirty callback only if the code has been
2391 flushed */
2392 if (dirty_flags == 0xff)
2393 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2396 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2397 uint32_t val)
2399 int dirty_flags;
2400 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2401 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2402 #if !defined(CONFIG_USER_ONLY)
2403 tb_invalidate_phys_page_fast(ram_addr, 2);
2404 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2405 #endif
2407 stw_p(phys_ram_base + ram_addr, val);
2408 #ifdef USE_KQEMU
2409 if (cpu_single_env->kqemu_enabled &&
2410 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2411 kqemu_modify_page(cpu_single_env, ram_addr);
2412 #endif
2413 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2414 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2415 /* we remove the notdirty callback only if the code has been
2416 flushed */
2417 if (dirty_flags == 0xff)
2418 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2421 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2422 uint32_t val)
2424 int dirty_flags;
2425 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2426 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2427 #if !defined(CONFIG_USER_ONLY)
2428 tb_invalidate_phys_page_fast(ram_addr, 4);
2429 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2430 #endif
2432 stl_p(phys_ram_base + ram_addr, val);
2433 #ifdef USE_KQEMU
2434 if (cpu_single_env->kqemu_enabled &&
2435 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2436 kqemu_modify_page(cpu_single_env, ram_addr);
2437 #endif
2438 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2439 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2440 /* we remove the notdirty callback only if the code has been
2441 flushed */
2442 if (dirty_flags == 0xff)
2443 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2446 static CPUReadMemoryFunc *error_mem_read[3] = {
2447 NULL, /* never used */
2448 NULL, /* never used */
2449 NULL, /* never used */
2452 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2453 notdirty_mem_writeb,
2454 notdirty_mem_writew,
2455 notdirty_mem_writel,
2458 /* Generate a debug exception if a watchpoint has been hit. */
2459 static void check_watchpoint(int offset, int flags)
2461 CPUState *env = cpu_single_env;
2462 target_ulong vaddr;
2463 int i;
2465 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2466 for (i = 0; i < env->nb_watchpoints; i++) {
2467 if (vaddr == env->watchpoint[i].vaddr
2468 && (env->watchpoint[i].type & flags)) {
2469 env->watchpoint_hit = i + 1;
2470 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2471 break;
2476 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2477 so these check for a hit then pass through to the normal out-of-line
2478 phys routines. */
2479 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2481 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2482 return ldub_phys(addr);
2485 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2487 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2488 return lduw_phys(addr);
2491 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2493 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2494 return ldl_phys(addr);
2497 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2498 uint32_t val)
2500 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2501 stb_phys(addr, val);
2504 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2505 uint32_t val)
2507 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2508 stw_phys(addr, val);
2511 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2512 uint32_t val)
2514 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2515 stl_phys(addr, val);
2518 static CPUReadMemoryFunc *watch_mem_read[3] = {
2519 watch_mem_readb,
2520 watch_mem_readw,
2521 watch_mem_readl,
2524 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2525 watch_mem_writeb,
2526 watch_mem_writew,
2527 watch_mem_writel,
2530 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2531 unsigned int len)
2533 uint32_t ret;
2534 unsigned int idx;
2536 idx = SUBPAGE_IDX(addr - mmio->base);
2537 #if defined(DEBUG_SUBPAGE)
2538 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2539 mmio, len, addr, idx);
2540 #endif
2541 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2543 return ret;
2546 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2547 uint32_t value, unsigned int len)
2549 unsigned int idx;
2551 idx = SUBPAGE_IDX(addr - mmio->base);
2552 #if defined(DEBUG_SUBPAGE)
2553 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2554 mmio, len, addr, idx, value);
2555 #endif
2556 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2559 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2561 #if defined(DEBUG_SUBPAGE)
2562 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2563 #endif
2565 return subpage_readlen(opaque, addr, 0);
2568 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2569 uint32_t value)
2571 #if defined(DEBUG_SUBPAGE)
2572 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2573 #endif
2574 subpage_writelen(opaque, addr, value, 0);
2577 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2579 #if defined(DEBUG_SUBPAGE)
2580 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2581 #endif
2583 return subpage_readlen(opaque, addr, 1);
2586 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2587 uint32_t value)
2589 #if defined(DEBUG_SUBPAGE)
2590 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2591 #endif
2592 subpage_writelen(opaque, addr, value, 1);
2595 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2597 #if defined(DEBUG_SUBPAGE)
2598 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2599 #endif
2601 return subpage_readlen(opaque, addr, 2);
2604 static void subpage_writel (void *opaque,
2605 target_phys_addr_t addr, uint32_t value)
2607 #if defined(DEBUG_SUBPAGE)
2608 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2609 #endif
2610 subpage_writelen(opaque, addr, value, 2);
2613 static CPUReadMemoryFunc *subpage_read[] = {
2614 &subpage_readb,
2615 &subpage_readw,
2616 &subpage_readl,
2619 static CPUWriteMemoryFunc *subpage_write[] = {
2620 &subpage_writeb,
2621 &subpage_writew,
2622 &subpage_writel,
2625 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2626 ram_addr_t memory)
2628 int idx, eidx;
2629 unsigned int i;
2631 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2632 return -1;
2633 idx = SUBPAGE_IDX(start);
2634 eidx = SUBPAGE_IDX(end);
2635 #if defined(DEBUG_SUBPAGE)
2636 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2637 mmio, start, end, idx, eidx, memory);
2638 #endif
2639 memory >>= IO_MEM_SHIFT;
2640 for (; idx <= eidx; idx++) {
2641 for (i = 0; i < 4; i++) {
2642 if (io_mem_read[memory][i]) {
2643 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2644 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2646 if (io_mem_write[memory][i]) {
2647 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2648 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2653 return 0;
2656 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2657 ram_addr_t orig_memory)
2659 subpage_t *mmio;
2660 int subpage_memory;
2662 mmio = qemu_mallocz(sizeof(subpage_t));
2663 if (mmio != NULL) {
2664 mmio->base = base;
2665 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2666 #if defined(DEBUG_SUBPAGE)
2667 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2668 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2669 #endif
2670 *phys = subpage_memory | IO_MEM_SUBPAGE;
2671 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2674 return mmio;
2677 static int get_free_io_mem_idx(void)
2679 int i;
2681 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2682 if (!io_mem_used[i]) {
2683 io_mem_used[i] = 1;
2684 return i;
2687 return -1;
2690 static void io_mem_init(void)
2692 int i;
2694 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2695 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2696 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2697 for (i=0; i<5; i++)
2698 io_mem_used[i] = 1;
2700 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2701 watch_mem_write, NULL);
2702 /* alloc dirty bits array */
2703 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2704 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2707 /* mem_read and mem_write are arrays of functions containing the
2708 function to access byte (index 0), word (index 1) and dword (index
2709 2). Functions can be omitted with a NULL function pointer. The
2710 registered functions may be modified dynamically later.
2711 If io_index is non zero, the corresponding io zone is
2712 modified. If it is zero, a new io zone is allocated. The return
2713 value can be used with cpu_register_physical_memory(). (-1) is
2714 returned if error. */
2715 int cpu_register_io_memory(int io_index,
2716 CPUReadMemoryFunc **mem_read,
2717 CPUWriteMemoryFunc **mem_write,
2718 void *opaque)
2720 int i, subwidth = 0;
2722 if (io_index <= 0) {
2723 io_index = get_free_io_mem_idx();
2724 if (io_index == -1)
2725 return io_index;
2726 } else {
2727 if (io_index >= IO_MEM_NB_ENTRIES)
2728 return -1;
2731 for(i = 0;i < 3; i++) {
2732 if (!mem_read[i] || !mem_write[i])
2733 subwidth = IO_MEM_SUBWIDTH;
2734 io_mem_read[io_index][i] = mem_read[i];
2735 io_mem_write[io_index][i] = mem_write[i];
2737 io_mem_opaque[io_index] = opaque;
2738 return (io_index << IO_MEM_SHIFT) | subwidth;
2741 void cpu_unregister_io_memory(int io_table_address)
2743 int i;
2744 int io_index = io_table_address >> IO_MEM_SHIFT;
2746 for (i=0;i < 3; i++) {
2747 io_mem_read[io_index][i] = unassigned_mem_read[i];
2748 io_mem_write[io_index][i] = unassigned_mem_write[i];
2750 io_mem_opaque[io_index] = NULL;
2751 io_mem_used[io_index] = 0;
2754 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2756 return io_mem_write[io_index >> IO_MEM_SHIFT];
2759 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2761 return io_mem_read[io_index >> IO_MEM_SHIFT];
2764 #endif /* !defined(CONFIG_USER_ONLY) */
2766 /* physical memory access (slow version, mainly for debug) */
2767 #if defined(CONFIG_USER_ONLY)
2768 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2769 int len, int is_write)
2771 int l, flags;
2772 target_ulong page;
2773 void * p;
2775 while (len > 0) {
2776 page = addr & TARGET_PAGE_MASK;
2777 l = (page + TARGET_PAGE_SIZE) - addr;
2778 if (l > len)
2779 l = len;
2780 flags = page_get_flags(page);
2781 if (!(flags & PAGE_VALID))
2782 return;
2783 if (is_write) {
2784 if (!(flags & PAGE_WRITE))
2785 return;
2786 /* XXX: this code should not depend on lock_user */
2787 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2788 /* FIXME - should this return an error rather than just fail? */
2789 return;
2790 memcpy(p, buf, l);
2791 unlock_user(p, addr, l);
2792 } else {
2793 if (!(flags & PAGE_READ))
2794 return;
2795 /* XXX: this code should not depend on lock_user */
2796 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2797 /* FIXME - should this return an error rather than just fail? */
2798 return;
2799 memcpy(buf, p, l);
2800 unlock_user(p, addr, 0);
2802 len -= l;
2803 buf += l;
2804 addr += l;
2808 #else
2809 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2810 int len, int is_write)
2812 int l, io_index;
2813 uint8_t *ptr;
2814 uint32_t val;
2815 target_phys_addr_t page;
2816 unsigned long pd;
2817 PhysPageDesc *p;
2819 while (len > 0) {
2820 page = addr & TARGET_PAGE_MASK;
2821 l = (page + TARGET_PAGE_SIZE) - addr;
2822 if (l > len)
2823 l = len;
2824 p = phys_page_find(page >> TARGET_PAGE_BITS);
2825 if (!p) {
2826 pd = IO_MEM_UNASSIGNED;
2827 } else {
2828 pd = p->phys_offset;
2831 if (is_write) {
2832 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2833 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2834 /* XXX: could force cpu_single_env to NULL to avoid
2835 potential bugs */
2836 if (l >= 4 && ((addr & 3) == 0)) {
2837 /* 32 bit write access */
2838 val = ldl_p(buf);
2839 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2840 l = 4;
2841 } else if (l >= 2 && ((addr & 1) == 0)) {
2842 /* 16 bit write access */
2843 val = lduw_p(buf);
2844 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2845 l = 2;
2846 } else {
2847 /* 8 bit write access */
2848 val = ldub_p(buf);
2849 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2850 l = 1;
2852 } else {
2853 unsigned long addr1;
2854 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2855 /* RAM case */
2856 ptr = phys_ram_base + addr1;
2857 memcpy(ptr, buf, l);
2858 if (!cpu_physical_memory_is_dirty(addr1)) {
2859 /* invalidate code */
2860 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2861 /* set dirty bit */
2862 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2863 (0xff & ~CODE_DIRTY_FLAG);
2865 /* qemu doesn't execute guest code directly, but kvm does
2866 therefore fluch instruction caches */
2867 if (kvm_enabled())
2868 flush_icache_range((unsigned long)ptr,
2869 ((unsigned long)ptr)+l);
2871 } else {
2872 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2873 !(pd & IO_MEM_ROMD)) {
2874 /* I/O case */
2875 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2876 if (l >= 4 && ((addr & 3) == 0)) {
2877 /* 32 bit read access */
2878 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2879 stl_p(buf, val);
2880 l = 4;
2881 } else if (l >= 2 && ((addr & 1) == 0)) {
2882 /* 16 bit read access */
2883 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2884 stw_p(buf, val);
2885 l = 2;
2886 } else {
2887 /* 8 bit read access */
2888 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2889 stb_p(buf, val);
2890 l = 1;
2892 } else {
2893 /* RAM case */
2894 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2895 (addr & ~TARGET_PAGE_MASK);
2896 memcpy(buf, ptr, l);
2899 len -= l;
2900 buf += l;
2901 addr += l;
2905 /* used for ROM loading : can write in RAM and ROM */
2906 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2907 const uint8_t *buf, int len)
2909 int l;
2910 uint8_t *ptr;
2911 target_phys_addr_t page;
2912 unsigned long pd;
2913 PhysPageDesc *p;
2915 while (len > 0) {
2916 page = addr & TARGET_PAGE_MASK;
2917 l = (page + TARGET_PAGE_SIZE) - addr;
2918 if (l > len)
2919 l = len;
2920 p = phys_page_find(page >> TARGET_PAGE_BITS);
2921 if (!p) {
2922 pd = IO_MEM_UNASSIGNED;
2923 } else {
2924 pd = p->phys_offset;
2927 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2928 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2929 !(pd & IO_MEM_ROMD)) {
2930 /* do nothing */
2931 } else {
2932 unsigned long addr1;
2933 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2934 /* ROM/RAM case */
2935 ptr = phys_ram_base + addr1;
2936 memcpy(ptr, buf, l);
2938 len -= l;
2939 buf += l;
2940 addr += l;
2945 /* warning: addr must be aligned */
2946 uint32_t ldl_phys(target_phys_addr_t addr)
2948 int io_index;
2949 uint8_t *ptr;
2950 uint32_t val;
2951 unsigned long pd;
2952 PhysPageDesc *p;
2954 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2955 if (!p) {
2956 pd = IO_MEM_UNASSIGNED;
2957 } else {
2958 pd = p->phys_offset;
2961 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2962 !(pd & IO_MEM_ROMD)) {
2963 /* I/O case */
2964 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2965 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2966 } else {
2967 /* RAM case */
2968 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2969 (addr & ~TARGET_PAGE_MASK);
2970 val = ldl_p(ptr);
2972 return val;
2975 /* warning: addr must be aligned */
2976 uint64_t ldq_phys(target_phys_addr_t addr)
2978 int io_index;
2979 uint8_t *ptr;
2980 uint64_t val;
2981 unsigned long pd;
2982 PhysPageDesc *p;
2984 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2985 if (!p) {
2986 pd = IO_MEM_UNASSIGNED;
2987 } else {
2988 pd = p->phys_offset;
2991 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2992 !(pd & IO_MEM_ROMD)) {
2993 /* I/O case */
2994 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2995 #ifdef TARGET_WORDS_BIGENDIAN
2996 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2997 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2998 #else
2999 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3000 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3001 #endif
3002 } else {
3003 /* RAM case */
3004 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3005 (addr & ~TARGET_PAGE_MASK);
3006 val = ldq_p(ptr);
3008 return val;
3011 /* XXX: optimize */
3012 uint32_t ldub_phys(target_phys_addr_t addr)
3014 uint8_t val;
3015 cpu_physical_memory_read(addr, &val, 1);
3016 return val;
3019 /* XXX: optimize */
3020 uint32_t lduw_phys(target_phys_addr_t addr)
3022 uint16_t val;
3023 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3024 return tswap16(val);
3027 #ifdef __GNUC__
3028 #define likely(x) __builtin_expect(!!(x), 1)
3029 #define unlikely(x) __builtin_expect(!!(x), 0)
3030 #else
3031 #define likely(x) x
3032 #define unlikely(x) x
3033 #endif
3035 /* warning: addr must be aligned. The ram page is not masked as dirty
3036 and the code inside is not invalidated. It is useful if the dirty
3037 bits are used to track modified PTEs */
3038 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3040 int io_index;
3041 uint8_t *ptr;
3042 unsigned long pd;
3043 PhysPageDesc *p;
3045 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3046 if (!p) {
3047 pd = IO_MEM_UNASSIGNED;
3048 } else {
3049 pd = p->phys_offset;
3052 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3053 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3054 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3055 } else {
3056 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3057 ptr = phys_ram_base + addr1;
3058 stl_p(ptr, val);
3060 if (unlikely(in_migration)) {
3061 if (!cpu_physical_memory_is_dirty(addr1)) {
3062 /* invalidate code */
3063 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3064 /* set dirty bit */
3065 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3066 (0xff & ~CODE_DIRTY_FLAG);
3072 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3074 int io_index;
3075 uint8_t *ptr;
3076 unsigned long pd;
3077 PhysPageDesc *p;
3079 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3080 if (!p) {
3081 pd = IO_MEM_UNASSIGNED;
3082 } else {
3083 pd = p->phys_offset;
3086 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3087 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3088 #ifdef TARGET_WORDS_BIGENDIAN
3089 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3090 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3091 #else
3092 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3093 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3094 #endif
3095 } else {
3096 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3097 (addr & ~TARGET_PAGE_MASK);
3098 stq_p(ptr, val);
3102 /* warning: addr must be aligned */
3103 void stl_phys(target_phys_addr_t addr, uint32_t val)
3105 int io_index;
3106 uint8_t *ptr;
3107 unsigned long pd;
3108 PhysPageDesc *p;
3110 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3111 if (!p) {
3112 pd = IO_MEM_UNASSIGNED;
3113 } else {
3114 pd = p->phys_offset;
3117 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3118 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3119 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3120 } else {
3121 unsigned long addr1;
3122 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3123 /* RAM case */
3124 ptr = phys_ram_base + addr1;
3125 stl_p(ptr, val);
3126 if (!cpu_physical_memory_is_dirty(addr1)) {
3127 /* invalidate code */
3128 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3129 /* set dirty bit */
3130 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3131 (0xff & ~CODE_DIRTY_FLAG);
3136 /* XXX: optimize */
3137 void stb_phys(target_phys_addr_t addr, uint32_t val)
3139 uint8_t v = val;
3140 cpu_physical_memory_write(addr, &v, 1);
3143 /* XXX: optimize */
3144 void stw_phys(target_phys_addr_t addr, uint32_t val)
3146 uint16_t v = tswap16(val);
3147 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3150 /* XXX: optimize */
3151 void stq_phys(target_phys_addr_t addr, uint64_t val)
3153 val = tswap64(val);
3154 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3157 #endif
3159 /* virtual memory access for debug */
3160 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3161 uint8_t *buf, int len, int is_write)
3163 int l;
3164 target_phys_addr_t phys_addr;
3165 target_ulong page;
3167 while (len > 0) {
3168 page = addr & TARGET_PAGE_MASK;
3169 phys_addr = cpu_get_phys_page_debug(env, page);
3170 /* if no physical page mapped, return an error */
3171 if (phys_addr == -1)
3172 return -1;
3173 l = (page + TARGET_PAGE_SIZE) - addr;
3174 if (l > len)
3175 l = len;
3176 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3177 buf, l, is_write);
3178 len -= l;
3179 buf += l;
3180 addr += l;
3182 return 0;
3185 /* in deterministic execution mode, instructions doing device I/Os
3186 must be at the end of the TB */
3187 void cpu_io_recompile(CPUState *env, void *retaddr)
3189 TranslationBlock *tb;
3190 uint32_t n, cflags;
3191 target_ulong pc, cs_base;
3192 uint64_t flags;
3194 tb = tb_find_pc((unsigned long)retaddr);
3195 if (!tb) {
3196 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3197 retaddr);
3199 n = env->icount_decr.u16.low + tb->icount;
3200 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3201 /* Calculate how many instructions had been executed before the fault
3202 occurred. */
3203 n = n - env->icount_decr.u16.low;
3204 /* Generate a new TB ending on the I/O insn. */
3205 n++;
3206 /* On MIPS and SH, delay slot instructions can only be restarted if
3207 they were already the first instruction in the TB. If this is not
3208 the first instruction in a TB then re-execute the preceding
3209 branch. */
3210 #if defined(TARGET_MIPS)
3211 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3212 env->active_tc.PC -= 4;
3213 env->icount_decr.u16.low++;
3214 env->hflags &= ~MIPS_HFLAG_BMASK;
3216 #elif defined(TARGET_SH4)
3217 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3218 && n > 1) {
3219 env->pc -= 2;
3220 env->icount_decr.u16.low++;
3221 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3223 #endif
3224 /* This should never happen. */
3225 if (n > CF_COUNT_MASK)
3226 cpu_abort(env, "TB too big during recompile");
3228 cflags = n | CF_LAST_IO;
3229 pc = tb->pc;
3230 cs_base = tb->cs_base;
3231 flags = tb->flags;
3232 tb_phys_invalidate(tb, -1);
3233 /* FIXME: In theory this could raise an exception. In practice
3234 we have already translated the block once so it's probably ok. */
3235 tb_gen_code(env, pc, cs_base, flags, cflags);
3236 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3237 the first in the TB) then we end up generating a whole new TB and
3238 repeating the fault, which is horribly inefficient.
3239 Better would be to execute just this insn uncached, or generate a
3240 second new TB. */
3241 cpu_resume_from_signal(env, NULL);
3244 void dump_exec_info(FILE *f,
3245 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3247 int i, target_code_size, max_target_code_size;
3248 int direct_jmp_count, direct_jmp2_count, cross_page;
3249 TranslationBlock *tb;
3251 target_code_size = 0;
3252 max_target_code_size = 0;
3253 cross_page = 0;
3254 direct_jmp_count = 0;
3255 direct_jmp2_count = 0;
3256 for(i = 0; i < nb_tbs; i++) {
3257 tb = &tbs[i];
3258 target_code_size += tb->size;
3259 if (tb->size > max_target_code_size)
3260 max_target_code_size = tb->size;
3261 if (tb->page_addr[1] != -1)
3262 cross_page++;
3263 if (tb->tb_next_offset[0] != 0xffff) {
3264 direct_jmp_count++;
3265 if (tb->tb_next_offset[1] != 0xffff) {
3266 direct_jmp2_count++;
3270 /* XXX: avoid using doubles ? */
3271 cpu_fprintf(f, "Translation buffer state:\n");
3272 cpu_fprintf(f, "gen code size %ld/%ld\n",
3273 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3274 cpu_fprintf(f, "TB count %d/%d\n",
3275 nb_tbs, code_gen_max_blocks);
3276 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3277 nb_tbs ? target_code_size / nb_tbs : 0,
3278 max_target_code_size);
3279 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3280 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3281 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3282 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3283 cross_page,
3284 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3285 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3286 direct_jmp_count,
3287 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3288 direct_jmp2_count,
3289 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3290 cpu_fprintf(f, "\nStatistics:\n");
3291 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3292 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3293 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3294 tcg_dump_info(f, cpu_fprintf);
3297 #if !defined(CONFIG_USER_ONLY)
3299 #define MMUSUFFIX _cmmu
3300 #define GETPC() NULL
3301 #define env cpu_single_env
3302 #define SOFTMMU_CODE_ACCESS
3304 #define SHIFT 0
3305 #include "softmmu_template.h"
3307 #define SHIFT 1
3308 #include "softmmu_template.h"
3310 #define SHIFT 2
3311 #include "softmmu_template.h"
3313 #define SHIFT 3
3314 #include "softmmu_template.h"
3316 #undef env
3318 #endif