Remove change to TARGET_PAGE_SIZE
[qemu-kvm/fedora.git] / exec.c
blobc15ce94fb8544db6d2556dffd2d8edd5805559e2
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
41 #include "tcg.h"
42 #endif
43 #include "qemu-kvm.h"
45 #include "hw/hw.h"
46 #if defined(CONFIG_USER_ONLY)
47 #include <qemu.h>
48 #endif
50 //#define DEBUG_TB_INVALIDATE
51 //#define DEBUG_FLUSH
52 //#define DEBUG_TLB
53 //#define DEBUG_UNASSIGNED
55 /* make various TB consistency checks */
56 //#define DEBUG_TB_CHECK
57 //#define DEBUG_TLB_CHECK
59 //#define DEBUG_IOPORT
60 //#define DEBUG_SUBPAGE
62 #if !defined(CONFIG_USER_ONLY)
63 /* TB consistency checks only implemented for usermode emulation. */
64 #undef DEBUG_TB_CHECK
65 #endif
67 #define SMC_BITMAP_USE_THRESHOLD 10
69 #define MMAP_AREA_START 0x00000000
70 #define MMAP_AREA_END 0xa8000000
72 #if defined(TARGET_SPARC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 41
74 #elif defined(TARGET_SPARC)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 36
76 #elif defined(TARGET_ALPHA)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #define TARGET_VIRT_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_PPC64)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 36
85 #else
86 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
87 #define TARGET_PHYS_ADDR_SPACE_BITS 32
88 #endif
90 TranslationBlock *tbs;
91 int code_gen_max_blocks;
92 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
93 int nb_tbs;
94 /* any access to the tbs or the page table must use this lock */
95 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
97 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
98 uint8_t *code_gen_buffer;
99 unsigned long code_gen_buffer_size;
100 /* threshold to flush the translated code buffer */
101 unsigned long code_gen_buffer_max_size;
102 uint8_t *code_gen_ptr;
104 #if !defined(CONFIG_USER_ONLY)
105 ram_addr_t phys_ram_size;
106 int phys_ram_fd;
107 uint8_t *phys_ram_base;
108 uint8_t *phys_ram_dirty;
109 uint8_t *bios_mem;
110 static int in_migration;
111 static ram_addr_t phys_ram_alloc_offset = 0;
112 #endif
114 CPUState *first_cpu;
115 /* current CPU in the current thread. It is only valid inside
116 cpu_exec() */
117 CPUState *cpu_single_env;
118 /* 0 = Do not count executed instructions.
119 1 = Precise instruction counting.
120 2 = Adaptive rate instruction counting. */
121 int use_icount = 0;
122 /* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
124 int64_t qemu_icount;
126 typedef struct PageDesc {
127 /* list of TBs intersecting this ram page */
128 TranslationBlock *first_tb;
129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count;
132 uint8_t *code_bitmap;
133 #if defined(CONFIG_USER_ONLY)
134 unsigned long flags;
135 #endif
136 } PageDesc;
138 typedef struct PhysPageDesc {
139 /* offset in host memory of the page + io_index in the low bits */
140 ram_addr_t phys_offset;
141 } PhysPageDesc;
143 #define L2_BITS 10
144 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
145 /* XXX: this is a temporary hack for alpha target.
146 * In the future, this is to be replaced by a multi-level table
147 * to actually be able to handle the complete 64 bits address space.
149 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
150 #else
151 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
152 #endif
154 #define L1_SIZE (1 << L1_BITS)
155 #define L2_SIZE (1 << L2_BITS)
157 unsigned long qemu_real_host_page_size;
158 unsigned long qemu_host_page_bits;
159 unsigned long qemu_host_page_size;
160 unsigned long qemu_host_page_mask;
162 /* XXX: for system emulation, it could just be an array */
163 static PageDesc *l1_map[L1_SIZE];
164 PhysPageDesc **l1_phys_map;
166 #if !defined(CONFIG_USER_ONLY)
167 static void io_mem_init(void);
169 /* io memory support */
170 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
171 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
172 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
173 static int io_mem_nb;
174 char io_mem_used[IO_MEM_NB_ENTRIES];
175 static int io_mem_watch;
176 #endif
178 /* log support */
179 char *logfilename = "/tmp/qemu.log";
180 FILE *logfile;
181 int loglevel;
182 static int log_append = 0;
184 /* statistics */
185 static int tlb_flush_count;
186 static int tb_flush_count;
187 static int tb_phys_invalidate_count;
189 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
190 typedef struct subpage_t {
191 target_phys_addr_t base;
192 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
193 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
194 void *opaque[TARGET_PAGE_SIZE][2][4];
195 } subpage_t;
197 #ifdef _WIN32
198 static void map_exec(void *addr, long size)
200 DWORD old_protect;
201 VirtualProtect(addr, size,
202 PAGE_EXECUTE_READWRITE, &old_protect);
205 #else
206 static void map_exec(void *addr, long size)
208 unsigned long start, end, page_size;
210 page_size = getpagesize();
211 start = (unsigned long)addr;
212 start &= ~(page_size - 1);
214 end = (unsigned long)addr + size;
215 end += page_size - 1;
216 end &= ~(page_size - 1);
218 mprotect((void *)start, end - start,
219 PROT_READ | PROT_WRITE | PROT_EXEC);
221 #endif
223 static void page_init(void)
225 /* NOTE: we can always suppose that qemu_host_page_size >=
226 TARGET_PAGE_SIZE */
227 #ifdef _WIN32
229 SYSTEM_INFO system_info;
230 DWORD old_protect;
232 GetSystemInfo(&system_info);
233 qemu_real_host_page_size = system_info.dwPageSize;
235 #else
236 qemu_real_host_page_size = getpagesize();
237 #endif
238 if (qemu_host_page_size == 0)
239 qemu_host_page_size = qemu_real_host_page_size;
240 if (qemu_host_page_size < TARGET_PAGE_SIZE)
241 qemu_host_page_size = TARGET_PAGE_SIZE;
242 qemu_host_page_bits = 0;
243 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
244 qemu_host_page_bits++;
245 qemu_host_page_mask = ~(qemu_host_page_size - 1);
246 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
247 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
249 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
251 long long startaddr, endaddr;
252 FILE *f;
253 int n;
255 mmap_lock();
256 last_brk = (unsigned long)sbrk(0);
257 f = fopen("/proc/self/maps", "r");
258 if (f) {
259 do {
260 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
261 if (n == 2) {
262 startaddr = MIN(startaddr,
263 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
264 endaddr = MIN(endaddr,
265 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
266 page_set_flags(startaddr & TARGET_PAGE_MASK,
267 TARGET_PAGE_ALIGN(endaddr),
268 PAGE_RESERVED);
270 } while (!feof(f));
271 fclose(f);
273 mmap_unlock();
275 #endif
278 static inline PageDesc *page_find_alloc(target_ulong index)
280 PageDesc **lp, *p;
282 #if TARGET_LONG_BITS > 32
283 /* Host memory outside guest VM. For 32-bit targets we have already
284 excluded high addresses. */
285 if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
286 return NULL;
287 #endif
288 lp = &l1_map[index >> L2_BITS];
289 p = *lp;
290 if (!p) {
291 /* allocate if not found */
292 #if defined(CONFIG_USER_ONLY)
293 unsigned long addr;
294 size_t len = sizeof(PageDesc) * L2_SIZE;
295 /* Don't use qemu_malloc because it may recurse. */
296 p = mmap(0, len, PROT_READ | PROT_WRITE,
297 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
298 *lp = p;
299 addr = h2g(p);
300 if (addr == (target_ulong)addr) {
301 page_set_flags(addr & TARGET_PAGE_MASK,
302 TARGET_PAGE_ALIGN(addr + len),
303 PAGE_RESERVED);
305 #else
306 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
307 *lp = p;
308 #endif
310 return p + (index & (L2_SIZE - 1));
313 static inline PageDesc *page_find(target_ulong index)
315 PageDesc *p;
317 p = l1_map[index >> L2_BITS];
318 if (!p)
319 return 0;
320 return p + (index & (L2_SIZE - 1));
323 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
325 void **lp, **p;
326 PhysPageDesc *pd;
328 p = (void **)l1_phys_map;
329 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
331 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
332 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
333 #endif
334 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
335 p = *lp;
336 if (!p) {
337 /* allocate if not found */
338 if (!alloc)
339 return NULL;
340 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
341 memset(p, 0, sizeof(void *) * L1_SIZE);
342 *lp = p;
344 #endif
345 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
346 pd = *lp;
347 if (!pd) {
348 int i;
349 /* allocate if not found */
350 if (!alloc)
351 return NULL;
352 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
353 *lp = pd;
354 for (i = 0; i < L2_SIZE; i++)
355 pd[i].phys_offset = IO_MEM_UNASSIGNED;
357 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
360 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
362 return phys_page_find_alloc(index, 0);
365 #if !defined(CONFIG_USER_ONLY)
366 static void tlb_protect_code(ram_addr_t ram_addr);
367 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
368 target_ulong vaddr);
369 #define mmap_lock() do { } while(0)
370 #define mmap_unlock() do { } while(0)
371 #endif
373 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
375 #if defined(CONFIG_USER_ONLY)
376 /* Currently it is not recommanded to allocate big chunks of data in
377 user mode. It will change when a dedicated libc will be used */
378 #define USE_STATIC_CODE_GEN_BUFFER
379 #endif
381 #ifdef USE_STATIC_CODE_GEN_BUFFER
382 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
383 #endif
385 void code_gen_alloc(unsigned long tb_size)
387 #ifdef USE_STATIC_CODE_GEN_BUFFER
388 code_gen_buffer = static_code_gen_buffer;
389 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
390 map_exec(code_gen_buffer, code_gen_buffer_size);
391 #else
392 code_gen_buffer_size = tb_size;
393 if (code_gen_buffer_size == 0) {
394 #if defined(CONFIG_USER_ONLY)
395 /* in user mode, phys_ram_size is not meaningful */
396 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
397 #else
398 /* XXX: needs ajustments */
399 code_gen_buffer_size = (int)(phys_ram_size / 4);
400 #endif
402 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
403 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
404 /* The code gen buffer location may have constraints depending on
405 the host cpu and OS */
406 #if defined(__linux__)
408 int flags;
409 flags = MAP_PRIVATE | MAP_ANONYMOUS;
410 #if defined(__x86_64__)
411 flags |= MAP_32BIT;
412 /* Cannot map more than that */
413 if (code_gen_buffer_size > (800 * 1024 * 1024))
414 code_gen_buffer_size = (800 * 1024 * 1024);
415 #endif
416 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
417 PROT_WRITE | PROT_READ | PROT_EXEC,
418 flags, -1, 0);
419 if (code_gen_buffer == MAP_FAILED) {
420 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
421 exit(1);
424 #else
425 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
426 if (!code_gen_buffer) {
427 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
428 exit(1);
430 map_exec(code_gen_buffer, code_gen_buffer_size);
431 #endif
432 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
433 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
434 code_gen_buffer_max_size = code_gen_buffer_size -
435 code_gen_max_block_size();
436 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
437 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
440 /* Must be called before using the QEMU cpus. 'tb_size' is the size
441 (in bytes) allocated to the translation buffer. Zero means default
442 size. */
443 void cpu_exec_init_all(unsigned long tb_size)
445 cpu_gen_init();
446 code_gen_alloc(tb_size);
447 code_gen_ptr = code_gen_buffer;
448 page_init();
449 #if !defined(CONFIG_USER_ONLY)
450 io_mem_init();
451 #endif
454 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
456 #define CPU_COMMON_SAVE_VERSION 1
458 static void cpu_common_save(QEMUFile *f, void *opaque)
460 CPUState *env = opaque;
462 qemu_put_be32s(f, &env->halted);
463 qemu_put_be32s(f, &env->interrupt_request);
466 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
468 CPUState *env = opaque;
470 if (version_id != CPU_COMMON_SAVE_VERSION)
471 return -EINVAL;
473 qemu_get_be32s(f, &env->halted);
474 qemu_get_be32s(f, &env->interrupt_request);
475 tlb_flush(env, 1);
477 return 0;
479 #endif
481 void cpu_exec_init(CPUState *env)
483 CPUState **penv;
484 int cpu_index;
486 env->next_cpu = NULL;
487 penv = &first_cpu;
488 cpu_index = 0;
489 while (*penv != NULL) {
490 penv = (CPUState **)&(*penv)->next_cpu;
491 cpu_index++;
493 env->cpu_index = cpu_index;
494 env->nb_watchpoints = 0;
495 #ifdef __WIN32
496 env->thread_id = GetCurrentProcessId();
497 #else
498 env->thread_id = getpid();
499 #endif
500 *penv = env;
501 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
503 cpu_common_save, cpu_common_load, env);
504 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
505 cpu_save, cpu_load, env);
506 #endif
509 static inline void invalidate_page_bitmap(PageDesc *p)
511 if (p->code_bitmap) {
512 qemu_free(p->code_bitmap);
513 p->code_bitmap = NULL;
515 p->code_write_count = 0;
518 /* set to NULL all the 'first_tb' fields in all PageDescs */
519 static void page_flush_tb(void)
521 int i, j;
522 PageDesc *p;
524 for(i = 0; i < L1_SIZE; i++) {
525 p = l1_map[i];
526 if (p) {
527 for(j = 0; j < L2_SIZE; j++) {
528 p->first_tb = NULL;
529 invalidate_page_bitmap(p);
530 p++;
536 /* flush all the translation blocks */
537 /* XXX: tb_flush is currently not thread safe */
538 void tb_flush(CPUState *env1)
540 CPUState *env;
541 #if defined(DEBUG_FLUSH)
542 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
543 (unsigned long)(code_gen_ptr - code_gen_buffer),
544 nb_tbs, nb_tbs > 0 ?
545 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
546 #endif
547 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
548 cpu_abort(env1, "Internal error: code buffer overflow\n");
550 nb_tbs = 0;
552 for(env = first_cpu; env != NULL; env = env->next_cpu) {
553 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
556 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
557 page_flush_tb();
559 code_gen_ptr = code_gen_buffer;
560 /* XXX: flush processor icache at this point if cache flush is
561 expensive */
562 tb_flush_count++;
565 #ifdef DEBUG_TB_CHECK
567 static void tb_invalidate_check(target_ulong address)
569 TranslationBlock *tb;
570 int i;
571 address &= TARGET_PAGE_MASK;
572 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
573 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
574 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
575 address >= tb->pc + tb->size)) {
576 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
577 address, (long)tb->pc, tb->size);
583 /* verify that all the pages have correct rights for code */
584 static void tb_page_check(void)
586 TranslationBlock *tb;
587 int i, flags1, flags2;
589 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
590 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
591 flags1 = page_get_flags(tb->pc);
592 flags2 = page_get_flags(tb->pc + tb->size - 1);
593 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
594 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
595 (long)tb->pc, tb->size, flags1, flags2);
601 void tb_jmp_check(TranslationBlock *tb)
603 TranslationBlock *tb1;
604 unsigned int n1;
606 /* suppress any remaining jumps to this TB */
607 tb1 = tb->jmp_first;
608 for(;;) {
609 n1 = (long)tb1 & 3;
610 tb1 = (TranslationBlock *)((long)tb1 & ~3);
611 if (n1 == 2)
612 break;
613 tb1 = tb1->jmp_next[n1];
615 /* check end of list */
616 if (tb1 != tb) {
617 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
621 #endif
623 /* invalidate one TB */
624 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
625 int next_offset)
627 TranslationBlock *tb1;
628 for(;;) {
629 tb1 = *ptb;
630 if (tb1 == tb) {
631 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
632 break;
634 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
638 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
640 TranslationBlock *tb1;
641 unsigned int n1;
643 for(;;) {
644 tb1 = *ptb;
645 n1 = (long)tb1 & 3;
646 tb1 = (TranslationBlock *)((long)tb1 & ~3);
647 if (tb1 == tb) {
648 *ptb = tb1->page_next[n1];
649 break;
651 ptb = &tb1->page_next[n1];
655 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
657 TranslationBlock *tb1, **ptb;
658 unsigned int n1;
660 ptb = &tb->jmp_next[n];
661 tb1 = *ptb;
662 if (tb1) {
663 /* find tb(n) in circular list */
664 for(;;) {
665 tb1 = *ptb;
666 n1 = (long)tb1 & 3;
667 tb1 = (TranslationBlock *)((long)tb1 & ~3);
668 if (n1 == n && tb1 == tb)
669 break;
670 if (n1 == 2) {
671 ptb = &tb1->jmp_first;
672 } else {
673 ptb = &tb1->jmp_next[n1];
676 /* now we can suppress tb(n) from the list */
677 *ptb = tb->jmp_next[n];
679 tb->jmp_next[n] = NULL;
683 /* reset the jump entry 'n' of a TB so that it is not chained to
684 another TB */
685 static inline void tb_reset_jump(TranslationBlock *tb, int n)
687 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
690 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
692 CPUState *env;
693 PageDesc *p;
694 unsigned int h, n1;
695 target_phys_addr_t phys_pc;
696 TranslationBlock *tb1, *tb2;
698 /* remove the TB from the hash list */
699 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
700 h = tb_phys_hash_func(phys_pc);
701 tb_remove(&tb_phys_hash[h], tb,
702 offsetof(TranslationBlock, phys_hash_next));
704 /* remove the TB from the page list */
705 if (tb->page_addr[0] != page_addr) {
706 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
707 tb_page_remove(&p->first_tb, tb);
708 invalidate_page_bitmap(p);
710 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
711 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
712 tb_page_remove(&p->first_tb, tb);
713 invalidate_page_bitmap(p);
716 tb_invalidated_flag = 1;
718 /* remove the TB from the hash list */
719 h = tb_jmp_cache_hash_func(tb->pc);
720 for(env = first_cpu; env != NULL; env = env->next_cpu) {
721 if (env->tb_jmp_cache[h] == tb)
722 env->tb_jmp_cache[h] = NULL;
725 /* suppress this TB from the two jump lists */
726 tb_jmp_remove(tb, 0);
727 tb_jmp_remove(tb, 1);
729 /* suppress any remaining jumps to this TB */
730 tb1 = tb->jmp_first;
731 for(;;) {
732 n1 = (long)tb1 & 3;
733 if (n1 == 2)
734 break;
735 tb1 = (TranslationBlock *)((long)tb1 & ~3);
736 tb2 = tb1->jmp_next[n1];
737 tb_reset_jump(tb1, n1);
738 tb1->jmp_next[n1] = NULL;
739 tb1 = tb2;
741 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
743 tb_phys_invalidate_count++;
746 static inline void set_bits(uint8_t *tab, int start, int len)
748 int end, mask, end1;
750 end = start + len;
751 tab += start >> 3;
752 mask = 0xff << (start & 7);
753 if ((start & ~7) == (end & ~7)) {
754 if (start < end) {
755 mask &= ~(0xff << (end & 7));
756 *tab |= mask;
758 } else {
759 *tab++ |= mask;
760 start = (start + 8) & ~7;
761 end1 = end & ~7;
762 while (start < end1) {
763 *tab++ = 0xff;
764 start += 8;
766 if (start < end) {
767 mask = ~(0xff << (end & 7));
768 *tab |= mask;
773 static void build_page_bitmap(PageDesc *p)
775 int n, tb_start, tb_end;
776 TranslationBlock *tb;
778 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
779 if (!p->code_bitmap)
780 return;
782 tb = p->first_tb;
783 while (tb != NULL) {
784 n = (long)tb & 3;
785 tb = (TranslationBlock *)((long)tb & ~3);
786 /* NOTE: this is subtle as a TB may span two physical pages */
787 if (n == 0) {
788 /* NOTE: tb_end may be after the end of the page, but
789 it is not a problem */
790 tb_start = tb->pc & ~TARGET_PAGE_MASK;
791 tb_end = tb_start + tb->size;
792 if (tb_end > TARGET_PAGE_SIZE)
793 tb_end = TARGET_PAGE_SIZE;
794 } else {
795 tb_start = 0;
796 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
798 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
799 tb = tb->page_next[n];
803 TranslationBlock *tb_gen_code(CPUState *env,
804 target_ulong pc, target_ulong cs_base,
805 int flags, int cflags)
807 TranslationBlock *tb;
808 uint8_t *tc_ptr;
809 target_ulong phys_pc, phys_page2, virt_page2;
810 int code_gen_size;
812 phys_pc = get_phys_addr_code(env, pc);
813 tb = tb_alloc(pc);
814 if (!tb) {
815 /* flush must be done */
816 tb_flush(env);
817 /* cannot fail at this point */
818 tb = tb_alloc(pc);
819 /* Don't forget to invalidate previous TB info. */
820 tb_invalidated_flag = 1;
822 tc_ptr = code_gen_ptr;
823 tb->tc_ptr = tc_ptr;
824 tb->cs_base = cs_base;
825 tb->flags = flags;
826 tb->cflags = cflags;
827 cpu_gen_code(env, tb, &code_gen_size);
828 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
830 /* check next page if needed */
831 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
832 phys_page2 = -1;
833 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
834 phys_page2 = get_phys_addr_code(env, virt_page2);
836 tb_link_phys(tb, phys_pc, phys_page2);
837 return tb;
840 /* invalidate all TBs which intersect with the target physical page
841 starting in range [start;end[. NOTE: start and end must refer to
842 the same physical page. 'is_cpu_write_access' should be true if called
843 from a real cpu write access: the virtual CPU will exit the current
844 TB if code is modified inside this TB. */
845 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
846 int is_cpu_write_access)
848 int n, current_tb_modified, current_tb_not_found, current_flags;
849 CPUState *env = cpu_single_env;
850 PageDesc *p;
851 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
852 target_ulong tb_start, tb_end;
853 target_ulong current_pc, current_cs_base;
855 p = page_find(start >> TARGET_PAGE_BITS);
856 if (!p)
857 return;
858 if (!p->code_bitmap &&
859 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
860 is_cpu_write_access) {
861 /* build code bitmap */
862 build_page_bitmap(p);
865 /* we remove all the TBs in the range [start, end[ */
866 /* XXX: see if in some cases it could be faster to invalidate all the code */
867 current_tb_not_found = is_cpu_write_access;
868 current_tb_modified = 0;
869 current_tb = NULL; /* avoid warning */
870 current_pc = 0; /* avoid warning */
871 current_cs_base = 0; /* avoid warning */
872 current_flags = 0; /* avoid warning */
873 tb = p->first_tb;
874 while (tb != NULL) {
875 n = (long)tb & 3;
876 tb = (TranslationBlock *)((long)tb & ~3);
877 tb_next = tb->page_next[n];
878 /* NOTE: this is subtle as a TB may span two physical pages */
879 if (n == 0) {
880 /* NOTE: tb_end may be after the end of the page, but
881 it is not a problem */
882 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
883 tb_end = tb_start + tb->size;
884 } else {
885 tb_start = tb->page_addr[1];
886 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
888 if (!(tb_end <= start || tb_start >= end)) {
889 #ifdef TARGET_HAS_PRECISE_SMC
890 if (current_tb_not_found) {
891 current_tb_not_found = 0;
892 current_tb = NULL;
893 if (env->mem_io_pc) {
894 /* now we have a real cpu fault */
895 current_tb = tb_find_pc(env->mem_io_pc);
898 if (current_tb == tb &&
899 (current_tb->cflags & CF_COUNT_MASK) != 1) {
900 /* If we are modifying the current TB, we must stop
901 its execution. We could be more precise by checking
902 that the modification is after the current PC, but it
903 would require a specialized function to partially
904 restore the CPU state */
906 current_tb_modified = 1;
907 cpu_restore_state(current_tb, env,
908 env->mem_io_pc, NULL);
909 #if defined(TARGET_I386)
910 current_flags = env->hflags;
911 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
912 current_cs_base = (target_ulong)env->segs[R_CS].base;
913 current_pc = current_cs_base + env->eip;
914 #else
915 #error unsupported CPU
916 #endif
918 #endif /* TARGET_HAS_PRECISE_SMC */
919 /* we need to do that to handle the case where a signal
920 occurs while doing tb_phys_invalidate() */
921 saved_tb = NULL;
922 if (env) {
923 saved_tb = env->current_tb;
924 env->current_tb = NULL;
926 tb_phys_invalidate(tb, -1);
927 if (env) {
928 env->current_tb = saved_tb;
929 if (env->interrupt_request && env->current_tb)
930 cpu_interrupt(env, env->interrupt_request);
933 tb = tb_next;
935 #if !defined(CONFIG_USER_ONLY)
936 /* if no code remaining, no need to continue to use slow writes */
937 if (!p->first_tb) {
938 invalidate_page_bitmap(p);
939 if (is_cpu_write_access) {
940 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
943 #endif
944 #ifdef TARGET_HAS_PRECISE_SMC
945 if (current_tb_modified) {
946 /* we generate a block containing just the instruction
947 modifying the memory. It will ensure that it cannot modify
948 itself */
949 env->current_tb = NULL;
950 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
951 cpu_resume_from_signal(env, NULL);
953 #endif
956 /* len must be <= 8 and start must be a multiple of len */
957 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
959 PageDesc *p;
960 int offset, b;
961 #if 0
962 if (1) {
963 if (loglevel) {
964 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
965 cpu_single_env->mem_io_vaddr, len,
966 cpu_single_env->eip,
967 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
970 #endif
971 p = page_find(start >> TARGET_PAGE_BITS);
972 if (!p)
973 return;
974 if (p->code_bitmap) {
975 offset = start & ~TARGET_PAGE_MASK;
976 b = p->code_bitmap[offset >> 3] >> (offset & 7);
977 if (b & ((1 << len) - 1))
978 goto do_invalidate;
979 } else {
980 do_invalidate:
981 tb_invalidate_phys_page_range(start, start + len, 1);
985 #if !defined(CONFIG_SOFTMMU)
986 static void tb_invalidate_phys_page(target_phys_addr_t addr,
987 unsigned long pc, void *puc)
989 int n, current_flags, current_tb_modified;
990 target_ulong current_pc, current_cs_base;
991 PageDesc *p;
992 TranslationBlock *tb, *current_tb;
993 #ifdef TARGET_HAS_PRECISE_SMC
994 CPUState *env = cpu_single_env;
995 #endif
997 addr &= TARGET_PAGE_MASK;
998 p = page_find(addr >> TARGET_PAGE_BITS);
999 if (!p)
1000 return;
1001 tb = p->first_tb;
1002 current_tb_modified = 0;
1003 current_tb = NULL;
1004 current_pc = 0; /* avoid warning */
1005 current_cs_base = 0; /* avoid warning */
1006 current_flags = 0; /* avoid warning */
1007 #ifdef TARGET_HAS_PRECISE_SMC
1008 if (tb && pc != 0) {
1009 current_tb = tb_find_pc(pc);
1011 #endif
1012 while (tb != NULL) {
1013 n = (long)tb & 3;
1014 tb = (TranslationBlock *)((long)tb & ~3);
1015 #ifdef TARGET_HAS_PRECISE_SMC
1016 if (current_tb == tb &&
1017 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1018 /* If we are modifying the current TB, we must stop
1019 its execution. We could be more precise by checking
1020 that the modification is after the current PC, but it
1021 would require a specialized function to partially
1022 restore the CPU state */
1024 current_tb_modified = 1;
1025 cpu_restore_state(current_tb, env, pc, puc);
1026 #if defined(TARGET_I386)
1027 current_flags = env->hflags;
1028 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1029 current_cs_base = (target_ulong)env->segs[R_CS].base;
1030 current_pc = current_cs_base + env->eip;
1031 #else
1032 #error unsupported CPU
1033 #endif
1035 #endif /* TARGET_HAS_PRECISE_SMC */
1036 tb_phys_invalidate(tb, addr);
1037 tb = tb->page_next[n];
1039 p->first_tb = NULL;
1040 #ifdef TARGET_HAS_PRECISE_SMC
1041 if (current_tb_modified) {
1042 /* we generate a block containing just the instruction
1043 modifying the memory. It will ensure that it cannot modify
1044 itself */
1045 env->current_tb = NULL;
1046 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1047 cpu_resume_from_signal(env, puc);
1049 #endif
1051 #endif
1053 /* add the tb in the target page and protect it if necessary */
1054 static inline void tb_alloc_page(TranslationBlock *tb,
1055 unsigned int n, target_ulong page_addr)
1057 PageDesc *p;
1058 TranslationBlock *last_first_tb;
1060 tb->page_addr[n] = page_addr;
1061 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1062 tb->page_next[n] = p->first_tb;
1063 last_first_tb = p->first_tb;
1064 p->first_tb = (TranslationBlock *)((long)tb | n);
1065 invalidate_page_bitmap(p);
1067 #if defined(TARGET_HAS_SMC) || 1
1069 #if defined(CONFIG_USER_ONLY)
1070 if (p->flags & PAGE_WRITE) {
1071 target_ulong addr;
1072 PageDesc *p2;
1073 int prot;
1075 /* force the host page as non writable (writes will have a
1076 page fault + mprotect overhead) */
1077 page_addr &= qemu_host_page_mask;
1078 prot = 0;
1079 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1080 addr += TARGET_PAGE_SIZE) {
1082 p2 = page_find (addr >> TARGET_PAGE_BITS);
1083 if (!p2)
1084 continue;
1085 prot |= p2->flags;
1086 p2->flags &= ~PAGE_WRITE;
1087 page_get_flags(addr);
1089 mprotect(g2h(page_addr), qemu_host_page_size,
1090 (prot & PAGE_BITS) & ~PAGE_WRITE);
1091 #ifdef DEBUG_TB_INVALIDATE
1092 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1093 page_addr);
1094 #endif
1096 #else
1097 /* if some code is already present, then the pages are already
1098 protected. So we handle the case where only the first TB is
1099 allocated in a physical page */
1100 if (!last_first_tb) {
1101 tlb_protect_code(page_addr);
1103 #endif
1105 #endif /* TARGET_HAS_SMC */
1108 /* Allocate a new translation block. Flush the translation buffer if
1109 too many translation blocks or too much generated code. */
1110 TranslationBlock *tb_alloc(target_ulong pc)
1112 TranslationBlock *tb;
1114 if (nb_tbs >= code_gen_max_blocks ||
1115 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1116 return NULL;
1117 tb = &tbs[nb_tbs++];
1118 tb->pc = pc;
1119 tb->cflags = 0;
1120 return tb;
1123 void tb_free(TranslationBlock *tb)
1125 /* In practice this is mostly used for single use temporary TB
1126 Ignore the hard cases and just back up if this TB happens to
1127 be the last one generated. */
1128 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1129 code_gen_ptr = tb->tc_ptr;
1130 nb_tbs--;
1134 /* add a new TB and link it to the physical page tables. phys_page2 is
1135 (-1) to indicate that only one page contains the TB. */
1136 void tb_link_phys(TranslationBlock *tb,
1137 target_ulong phys_pc, target_ulong phys_page2)
1139 unsigned int h;
1140 TranslationBlock **ptb;
1142 /* Grab the mmap lock to stop another thread invalidating this TB
1143 before we are done. */
1144 mmap_lock();
1145 /* add in the physical hash table */
1146 h = tb_phys_hash_func(phys_pc);
1147 ptb = &tb_phys_hash[h];
1148 tb->phys_hash_next = *ptb;
1149 *ptb = tb;
1151 /* add in the page list */
1152 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1153 if (phys_page2 != -1)
1154 tb_alloc_page(tb, 1, phys_page2);
1155 else
1156 tb->page_addr[1] = -1;
1158 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1159 tb->jmp_next[0] = NULL;
1160 tb->jmp_next[1] = NULL;
1162 /* init original jump addresses */
1163 if (tb->tb_next_offset[0] != 0xffff)
1164 tb_reset_jump(tb, 0);
1165 if (tb->tb_next_offset[1] != 0xffff)
1166 tb_reset_jump(tb, 1);
1168 #ifdef DEBUG_TB_CHECK
1169 tb_page_check();
1170 #endif
1171 mmap_unlock();
1174 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1175 tb[1].tc_ptr. Return NULL if not found */
1176 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1178 int m_min, m_max, m;
1179 unsigned long v;
1180 TranslationBlock *tb;
1182 if (nb_tbs <= 0)
1183 return NULL;
1184 if (tc_ptr < (unsigned long)code_gen_buffer ||
1185 tc_ptr >= (unsigned long)code_gen_ptr)
1186 return NULL;
1187 /* binary search (cf Knuth) */
1188 m_min = 0;
1189 m_max = nb_tbs - 1;
1190 while (m_min <= m_max) {
1191 m = (m_min + m_max) >> 1;
1192 tb = &tbs[m];
1193 v = (unsigned long)tb->tc_ptr;
1194 if (v == tc_ptr)
1195 return tb;
1196 else if (tc_ptr < v) {
1197 m_max = m - 1;
1198 } else {
1199 m_min = m + 1;
1202 return &tbs[m_max];
1205 static void tb_reset_jump_recursive(TranslationBlock *tb);
1207 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1209 TranslationBlock *tb1, *tb_next, **ptb;
1210 unsigned int n1;
1212 tb1 = tb->jmp_next[n];
1213 if (tb1 != NULL) {
1214 /* find head of list */
1215 for(;;) {
1216 n1 = (long)tb1 & 3;
1217 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1218 if (n1 == 2)
1219 break;
1220 tb1 = tb1->jmp_next[n1];
1222 /* we are now sure now that tb jumps to tb1 */
1223 tb_next = tb1;
1225 /* remove tb from the jmp_first list */
1226 ptb = &tb_next->jmp_first;
1227 for(;;) {
1228 tb1 = *ptb;
1229 n1 = (long)tb1 & 3;
1230 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1231 if (n1 == n && tb1 == tb)
1232 break;
1233 ptb = &tb1->jmp_next[n1];
1235 *ptb = tb->jmp_next[n];
1236 tb->jmp_next[n] = NULL;
1238 /* suppress the jump to next tb in generated code */
1239 tb_reset_jump(tb, n);
1241 /* suppress jumps in the tb on which we could have jumped */
1242 tb_reset_jump_recursive(tb_next);
1246 static void tb_reset_jump_recursive(TranslationBlock *tb)
1248 tb_reset_jump_recursive2(tb, 0);
1249 tb_reset_jump_recursive2(tb, 1);
1252 #if defined(TARGET_HAS_ICE)
1253 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1255 target_phys_addr_t addr;
1256 target_ulong pd;
1257 ram_addr_t ram_addr;
1258 PhysPageDesc *p;
1260 addr = cpu_get_phys_page_debug(env, pc);
1261 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1262 if (!p) {
1263 pd = IO_MEM_UNASSIGNED;
1264 } else {
1265 pd = p->phys_offset;
1267 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1268 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1270 #endif
1272 /* Add a watchpoint. */
1273 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1275 int i;
1277 for (i = 0; i < env->nb_watchpoints; i++) {
1278 if (addr == env->watchpoint[i].vaddr)
1279 return 0;
1281 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1282 return -1;
1284 i = env->nb_watchpoints++;
1285 env->watchpoint[i].vaddr = addr;
1286 env->watchpoint[i].type = type;
1287 tlb_flush_page(env, addr);
1288 /* FIXME: This flush is needed because of the hack to make memory ops
1289 terminate the TB. It can be removed once the proper IO trap and
1290 re-execute bits are in. */
1291 tb_flush(env);
1292 return i;
1295 /* Remove a watchpoint. */
1296 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1298 int i;
1300 for (i = 0; i < env->nb_watchpoints; i++) {
1301 if (addr == env->watchpoint[i].vaddr) {
1302 env->nb_watchpoints--;
1303 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1304 tlb_flush_page(env, addr);
1305 return 0;
1308 return -1;
1311 /* Remove all watchpoints. */
1312 void cpu_watchpoint_remove_all(CPUState *env) {
1313 int i;
1315 for (i = 0; i < env->nb_watchpoints; i++) {
1316 tlb_flush_page(env, env->watchpoint[i].vaddr);
1318 env->nb_watchpoints = 0;
1321 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1322 breakpoint is reached */
1323 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1325 #if defined(TARGET_HAS_ICE)
1326 int i;
1328 for(i = 0; i < env->nb_breakpoints; i++) {
1329 if (env->breakpoints[i] == pc)
1330 return 0;
1333 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1334 return -1;
1335 env->breakpoints[env->nb_breakpoints++] = pc;
1337 if (kvm_enabled())
1338 kvm_update_debugger(env);
1340 breakpoint_invalidate(env, pc);
1341 return 0;
1342 #else
1343 return -1;
1344 #endif
1347 /* remove all breakpoints */
1348 void cpu_breakpoint_remove_all(CPUState *env) {
1349 #if defined(TARGET_HAS_ICE)
1350 int i;
1351 for(i = 0; i < env->nb_breakpoints; i++) {
1352 breakpoint_invalidate(env, env->breakpoints[i]);
1354 env->nb_breakpoints = 0;
1355 #endif
1358 /* remove a breakpoint */
1359 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1361 #if defined(TARGET_HAS_ICE)
1362 int i;
1363 for(i = 0; i < env->nb_breakpoints; i++) {
1364 if (env->breakpoints[i] == pc)
1365 goto found;
1367 return -1;
1368 found:
1369 env->nb_breakpoints--;
1370 if (i < env->nb_breakpoints)
1371 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1373 if (kvm_enabled())
1374 kvm_update_debugger(env);
1376 breakpoint_invalidate(env, pc);
1377 return 0;
1378 #else
1379 return -1;
1380 #endif
1383 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1384 CPU loop after each instruction */
1385 void cpu_single_step(CPUState *env, int enabled)
1387 #if defined(TARGET_HAS_ICE)
1388 if (env->singlestep_enabled != enabled) {
1389 env->singlestep_enabled = enabled;
1390 /* must flush all the translated code to avoid inconsistancies */
1391 /* XXX: only flush what is necessary */
1392 tb_flush(env);
1394 if (kvm_enabled())
1395 kvm_update_debugger(env);
1396 #endif
1399 /* enable or disable low levels log */
1400 void cpu_set_log(int log_flags)
1402 loglevel = log_flags;
1403 if (loglevel && !logfile) {
1404 logfile = fopen(logfilename, log_append ? "a" : "w");
1405 if (!logfile) {
1406 perror(logfilename);
1407 _exit(1);
1409 #if !defined(CONFIG_SOFTMMU)
1410 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1412 static uint8_t logfile_buf[4096];
1413 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1415 #else
1416 setvbuf(logfile, NULL, _IOLBF, 0);
1417 #endif
1418 log_append = 1;
1420 if (!loglevel && logfile) {
1421 fclose(logfile);
1422 logfile = NULL;
1426 void cpu_set_log_filename(const char *filename)
1428 logfilename = strdup(filename);
1429 if (logfile) {
1430 fclose(logfile);
1431 logfile = NULL;
1433 cpu_set_log(loglevel);
1436 /* mask must never be zero, except for A20 change call */
1437 void cpu_interrupt(CPUState *env, int mask)
1439 #if !defined(USE_NPTL)
1440 TranslationBlock *tb;
1441 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1442 #endif
1443 int old_mask;
1445 old_mask = env->interrupt_request;
1446 /* FIXME: This is probably not threadsafe. A different thread could
1447 be in the middle of a read-modify-write operation. */
1448 env->interrupt_request |= mask;
1449 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1450 kvm_update_interrupt_request(env);
1451 #if defined(USE_NPTL)
1452 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1453 problem and hope the cpu will stop of its own accord. For userspace
1454 emulation this often isn't actually as bad as it sounds. Often
1455 signals are used primarily to interrupt blocking syscalls. */
1456 #else
1457 if (use_icount) {
1458 env->icount_decr.u16.high = 0xffff;
1459 #ifndef CONFIG_USER_ONLY
1460 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1461 an async event happened and we need to process it. */
1462 if (!can_do_io(env)
1463 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1464 cpu_abort(env, "Raised interrupt while not in I/O function");
1466 #endif
1467 } else {
1468 tb = env->current_tb;
1469 /* if the cpu is currently executing code, we must unlink it and
1470 all the potentially executing TB */
1471 if (tb && !testandset(&interrupt_lock)) {
1472 env->current_tb = NULL;
1473 tb_reset_jump_recursive(tb);
1474 resetlock(&interrupt_lock);
1477 #endif
1480 void cpu_reset_interrupt(CPUState *env, int mask)
1482 env->interrupt_request &= ~mask;
1485 CPULogItem cpu_log_items[] = {
1486 { CPU_LOG_TB_OUT_ASM, "out_asm",
1487 "show generated host assembly code for each compiled TB" },
1488 { CPU_LOG_TB_IN_ASM, "in_asm",
1489 "show target assembly code for each compiled TB" },
1490 { CPU_LOG_TB_OP, "op",
1491 "show micro ops for each compiled TB" },
1492 { CPU_LOG_TB_OP_OPT, "op_opt",
1493 "show micro ops "
1494 #ifdef TARGET_I386
1495 "before eflags optimization and "
1496 #endif
1497 "after liveness analysis" },
1498 { CPU_LOG_INT, "int",
1499 "show interrupts/exceptions in short format" },
1500 { CPU_LOG_EXEC, "exec",
1501 "show trace before each executed TB (lots of logs)" },
1502 { CPU_LOG_TB_CPU, "cpu",
1503 "show CPU state before block translation" },
1504 #ifdef TARGET_I386
1505 { CPU_LOG_PCALL, "pcall",
1506 "show protected mode far calls/returns/exceptions" },
1507 #endif
1508 #ifdef DEBUG_IOPORT
1509 { CPU_LOG_IOPORT, "ioport",
1510 "show all i/o ports accesses" },
1511 #endif
1512 { 0, NULL, NULL },
1515 static int cmp1(const char *s1, int n, const char *s2)
1517 if (strlen(s2) != n)
1518 return 0;
1519 return memcmp(s1, s2, n) == 0;
1522 /* takes a comma separated list of log masks. Return 0 if error. */
1523 int cpu_str_to_log_mask(const char *str)
1525 CPULogItem *item;
1526 int mask;
1527 const char *p, *p1;
1529 p = str;
1530 mask = 0;
1531 for(;;) {
1532 p1 = strchr(p, ',');
1533 if (!p1)
1534 p1 = p + strlen(p);
1535 if(cmp1(p,p1-p,"all")) {
1536 for(item = cpu_log_items; item->mask != 0; item++) {
1537 mask |= item->mask;
1539 } else {
1540 for(item = cpu_log_items; item->mask != 0; item++) {
1541 if (cmp1(p, p1 - p, item->name))
1542 goto found;
1544 return 0;
1546 found:
1547 mask |= item->mask;
1548 if (*p1 != ',')
1549 break;
1550 p = p1 + 1;
1552 return mask;
1555 void cpu_abort(CPUState *env, const char *fmt, ...)
1557 va_list ap;
1558 va_list ap2;
1560 va_start(ap, fmt);
1561 va_copy(ap2, ap);
1562 fprintf(stderr, "qemu: fatal: ");
1563 vfprintf(stderr, fmt, ap);
1564 fprintf(stderr, "\n");
1565 #ifdef TARGET_I386
1566 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1567 #else
1568 cpu_dump_state(env, stderr, fprintf, 0);
1569 #endif
1570 if (logfile) {
1571 fprintf(logfile, "qemu: fatal: ");
1572 vfprintf(logfile, fmt, ap2);
1573 fprintf(logfile, "\n");
1574 #ifdef TARGET_I386
1575 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1576 #else
1577 cpu_dump_state(env, logfile, fprintf, 0);
1578 #endif
1579 fflush(logfile);
1580 fclose(logfile);
1582 va_end(ap2);
1583 va_end(ap);
1584 abort();
1587 CPUState *cpu_copy(CPUState *env)
1589 CPUState *new_env = cpu_init(env->cpu_model_str);
1590 /* preserve chaining and index */
1591 CPUState *next_cpu = new_env->next_cpu;
1592 int cpu_index = new_env->cpu_index;
1593 memcpy(new_env, env, sizeof(CPUState));
1594 new_env->next_cpu = next_cpu;
1595 new_env->cpu_index = cpu_index;
1596 return new_env;
1599 #if !defined(CONFIG_USER_ONLY)
1601 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1603 unsigned int i;
1605 /* Discard jump cache entries for any tb which might potentially
1606 overlap the flushed page. */
1607 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1608 memset (&env->tb_jmp_cache[i], 0,
1609 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1611 i = tb_jmp_cache_hash_page(addr);
1612 memset (&env->tb_jmp_cache[i], 0,
1613 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1616 /* NOTE: if flush_global is true, also flush global entries (not
1617 implemented yet) */
1618 void tlb_flush(CPUState *env, int flush_global)
1620 int i;
1622 #if defined(DEBUG_TLB)
1623 printf("tlb_flush:\n");
1624 #endif
1625 /* must reset current TB so that interrupts cannot modify the
1626 links while we are modifying them */
1627 env->current_tb = NULL;
1629 for(i = 0; i < CPU_TLB_SIZE; i++) {
1630 env->tlb_table[0][i].addr_read = -1;
1631 env->tlb_table[0][i].addr_write = -1;
1632 env->tlb_table[0][i].addr_code = -1;
1633 env->tlb_table[1][i].addr_read = -1;
1634 env->tlb_table[1][i].addr_write = -1;
1635 env->tlb_table[1][i].addr_code = -1;
1636 #if (NB_MMU_MODES >= 3)
1637 env->tlb_table[2][i].addr_read = -1;
1638 env->tlb_table[2][i].addr_write = -1;
1639 env->tlb_table[2][i].addr_code = -1;
1640 #if (NB_MMU_MODES == 4)
1641 env->tlb_table[3][i].addr_read = -1;
1642 env->tlb_table[3][i].addr_write = -1;
1643 env->tlb_table[3][i].addr_code = -1;
1644 #endif
1645 #endif
1648 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1650 #ifdef USE_KQEMU
1651 if (env->kqemu_enabled) {
1652 kqemu_flush(env, flush_global);
1654 #endif
1655 tlb_flush_count++;
1658 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1660 if (addr == (tlb_entry->addr_read &
1661 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1662 addr == (tlb_entry->addr_write &
1663 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1664 addr == (tlb_entry->addr_code &
1665 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1666 tlb_entry->addr_read = -1;
1667 tlb_entry->addr_write = -1;
1668 tlb_entry->addr_code = -1;
1672 void tlb_flush_page(CPUState *env, target_ulong addr)
1674 int i;
1676 #if defined(DEBUG_TLB)
1677 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1678 #endif
1679 /* must reset current TB so that interrupts cannot modify the
1680 links while we are modifying them */
1681 env->current_tb = NULL;
1683 addr &= TARGET_PAGE_MASK;
1684 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1685 tlb_flush_entry(&env->tlb_table[0][i], addr);
1686 tlb_flush_entry(&env->tlb_table[1][i], addr);
1687 #if (NB_MMU_MODES >= 3)
1688 tlb_flush_entry(&env->tlb_table[2][i], addr);
1689 #if (NB_MMU_MODES == 4)
1690 tlb_flush_entry(&env->tlb_table[3][i], addr);
1691 #endif
1692 #endif
1694 tlb_flush_jmp_cache(env, addr);
1696 #ifdef USE_KQEMU
1697 if (env->kqemu_enabled) {
1698 kqemu_flush_page(env, addr);
1700 #endif
1703 /* update the TLBs so that writes to code in the virtual page 'addr'
1704 can be detected */
1705 static void tlb_protect_code(ram_addr_t ram_addr)
1707 cpu_physical_memory_reset_dirty(ram_addr,
1708 ram_addr + TARGET_PAGE_SIZE,
1709 CODE_DIRTY_FLAG);
1712 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1713 tested for self modifying code */
1714 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1715 target_ulong vaddr)
1717 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1720 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1721 unsigned long start, unsigned long length)
1723 unsigned long addr;
1724 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1725 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1726 if ((addr - start) < length) {
1727 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1732 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1733 int dirty_flags)
1735 CPUState *env;
1736 unsigned long length, start1;
1737 int i, mask, len;
1738 uint8_t *p;
1740 start &= TARGET_PAGE_MASK;
1741 end = TARGET_PAGE_ALIGN(end);
1743 length = end - start;
1744 if (length == 0)
1745 return;
1746 len = length >> TARGET_PAGE_BITS;
1747 #ifdef USE_KQEMU
1748 /* XXX: should not depend on cpu context */
1749 env = first_cpu;
1750 if (env->kqemu_enabled) {
1751 ram_addr_t addr;
1752 addr = start;
1753 for(i = 0; i < len; i++) {
1754 kqemu_set_notdirty(env, addr);
1755 addr += TARGET_PAGE_SIZE;
1758 #endif
1759 mask = ~dirty_flags;
1760 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1761 for(i = 0; i < len; i++)
1762 p[i] &= mask;
1764 /* we modify the TLB cache so that the dirty bit will be set again
1765 when accessing the range */
1766 start1 = start + (unsigned long)phys_ram_base;
1767 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1768 for(i = 0; i < CPU_TLB_SIZE; i++)
1769 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1770 for(i = 0; i < CPU_TLB_SIZE; i++)
1771 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1772 #if (NB_MMU_MODES >= 3)
1773 for(i = 0; i < CPU_TLB_SIZE; i++)
1774 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1775 #if (NB_MMU_MODES == 4)
1776 for(i = 0; i < CPU_TLB_SIZE; i++)
1777 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1778 #endif
1779 #endif
1783 int cpu_physical_memory_set_dirty_tracking(int enable)
1785 int r=0;
1787 if (kvm_enabled())
1788 r = kvm_physical_memory_set_dirty_tracking(enable);
1789 in_migration = enable;
1790 return r;
1793 int cpu_physical_memory_get_dirty_tracking(void)
1795 return in_migration;
1798 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1800 ram_addr_t ram_addr;
1802 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1803 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1804 tlb_entry->addend - (unsigned long)phys_ram_base;
1805 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1806 tlb_entry->addr_write |= TLB_NOTDIRTY;
1811 /* update the TLB according to the current state of the dirty bits */
1812 void cpu_tlb_update_dirty(CPUState *env)
1814 int i;
1815 for(i = 0; i < CPU_TLB_SIZE; i++)
1816 tlb_update_dirty(&env->tlb_table[0][i]);
1817 for(i = 0; i < CPU_TLB_SIZE; i++)
1818 tlb_update_dirty(&env->tlb_table[1][i]);
1819 #if (NB_MMU_MODES >= 3)
1820 for(i = 0; i < CPU_TLB_SIZE; i++)
1821 tlb_update_dirty(&env->tlb_table[2][i]);
1822 #if (NB_MMU_MODES == 4)
1823 for(i = 0; i < CPU_TLB_SIZE; i++)
1824 tlb_update_dirty(&env->tlb_table[3][i]);
1825 #endif
1826 #endif
1829 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1831 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1832 tlb_entry->addr_write = vaddr;
1835 /* update the TLB corresponding to virtual page vaddr
1836 so that it is no longer dirty */
1837 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1839 int i;
1841 vaddr &= TARGET_PAGE_MASK;
1842 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1843 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1844 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1845 #if (NB_MMU_MODES >= 3)
1846 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1847 #if (NB_MMU_MODES == 4)
1848 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1849 #endif
1850 #endif
1853 /* add a new TLB entry. At most one entry for a given virtual address
1854 is permitted. Return 0 if OK or 2 if the page could not be mapped
1855 (can only happen in non SOFTMMU mode for I/O pages or pages
1856 conflicting with the host address space). */
1857 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1858 target_phys_addr_t paddr, int prot,
1859 int mmu_idx, int is_softmmu)
1861 PhysPageDesc *p;
1862 unsigned long pd;
1863 unsigned int index;
1864 target_ulong address;
1865 target_ulong code_address;
1866 target_phys_addr_t addend;
1867 int ret;
1868 CPUTLBEntry *te;
1869 int i;
1870 target_phys_addr_t iotlb;
1872 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1873 if (!p) {
1874 pd = IO_MEM_UNASSIGNED;
1875 } else {
1876 pd = p->phys_offset;
1878 #if defined(DEBUG_TLB)
1879 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1880 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1881 #endif
1883 ret = 0;
1884 address = vaddr;
1885 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1886 /* IO memory case (romd handled later) */
1887 address |= TLB_MMIO;
1889 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1890 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1891 /* Normal RAM. */
1892 iotlb = pd & TARGET_PAGE_MASK;
1893 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1894 iotlb |= IO_MEM_NOTDIRTY;
1895 else
1896 iotlb |= IO_MEM_ROM;
1897 } else {
1898 /* IO handlers are currently passed a phsical address.
1899 It would be nice to pass an offset from the base address
1900 of that region. This would avoid having to special case RAM,
1901 and avoid full address decoding in every device.
1902 We can't use the high bits of pd for this because
1903 IO_MEM_ROMD uses these as a ram address. */
1904 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1907 code_address = address;
1908 /* Make accesses to pages with watchpoints go via the
1909 watchpoint trap routines. */
1910 for (i = 0; i < env->nb_watchpoints; i++) {
1911 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1912 iotlb = io_mem_watch + paddr;
1913 /* TODO: The memory case can be optimized by not trapping
1914 reads of pages with a write breakpoint. */
1915 address |= TLB_MMIO;
1919 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1920 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1921 te = &env->tlb_table[mmu_idx][index];
1922 te->addend = addend - vaddr;
1923 if (prot & PAGE_READ) {
1924 te->addr_read = address;
1925 } else {
1926 te->addr_read = -1;
1929 if (prot & PAGE_EXEC) {
1930 te->addr_code = code_address;
1931 } else {
1932 te->addr_code = -1;
1934 if (prot & PAGE_WRITE) {
1935 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1936 (pd & IO_MEM_ROMD)) {
1937 /* Write access calls the I/O callback. */
1938 te->addr_write = address | TLB_MMIO;
1939 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1940 !cpu_physical_memory_is_dirty(pd)) {
1941 te->addr_write = address | TLB_NOTDIRTY;
1942 } else {
1943 te->addr_write = address;
1945 } else {
1946 te->addr_write = -1;
1948 return ret;
1951 #else
1953 void tlb_flush(CPUState *env, int flush_global)
1957 void tlb_flush_page(CPUState *env, target_ulong addr)
1961 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1962 target_phys_addr_t paddr, int prot,
1963 int mmu_idx, int is_softmmu)
1965 return 0;
1968 /* dump memory mappings */
1969 void page_dump(FILE *f)
1971 unsigned long start, end;
1972 int i, j, prot, prot1;
1973 PageDesc *p;
1975 fprintf(f, "%-8s %-8s %-8s %s\n",
1976 "start", "end", "size", "prot");
1977 start = -1;
1978 end = -1;
1979 prot = 0;
1980 for(i = 0; i <= L1_SIZE; i++) {
1981 if (i < L1_SIZE)
1982 p = l1_map[i];
1983 else
1984 p = NULL;
1985 for(j = 0;j < L2_SIZE; j++) {
1986 if (!p)
1987 prot1 = 0;
1988 else
1989 prot1 = p[j].flags;
1990 if (prot1 != prot) {
1991 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1992 if (start != -1) {
1993 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1994 start, end, end - start,
1995 prot & PAGE_READ ? 'r' : '-',
1996 prot & PAGE_WRITE ? 'w' : '-',
1997 prot & PAGE_EXEC ? 'x' : '-');
1999 if (prot1 != 0)
2000 start = end;
2001 else
2002 start = -1;
2003 prot = prot1;
2005 if (!p)
2006 break;
2011 int page_get_flags(target_ulong address)
2013 PageDesc *p;
2015 p = page_find(address >> TARGET_PAGE_BITS);
2016 if (!p)
2017 return 0;
2018 return p->flags;
2021 /* modify the flags of a page and invalidate the code if
2022 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2023 depending on PAGE_WRITE */
2024 void page_set_flags(target_ulong start, target_ulong end, int flags)
2026 PageDesc *p;
2027 target_ulong addr;
2029 /* mmap_lock should already be held. */
2030 start = start & TARGET_PAGE_MASK;
2031 end = TARGET_PAGE_ALIGN(end);
2032 if (flags & PAGE_WRITE)
2033 flags |= PAGE_WRITE_ORG;
2034 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2035 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2036 /* We may be called for host regions that are outside guest
2037 address space. */
2038 if (!p)
2039 return;
2040 /* if the write protection is set, then we invalidate the code
2041 inside */
2042 if (!(p->flags & PAGE_WRITE) &&
2043 (flags & PAGE_WRITE) &&
2044 p->first_tb) {
2045 tb_invalidate_phys_page(addr, 0, NULL);
2047 p->flags = flags;
2051 int page_check_range(target_ulong start, target_ulong len, int flags)
2053 PageDesc *p;
2054 target_ulong end;
2055 target_ulong addr;
2057 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2058 start = start & TARGET_PAGE_MASK;
2060 if( end < start )
2061 /* we've wrapped around */
2062 return -1;
2063 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2064 p = page_find(addr >> TARGET_PAGE_BITS);
2065 if( !p )
2066 return -1;
2067 if( !(p->flags & PAGE_VALID) )
2068 return -1;
2070 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2071 return -1;
2072 if (flags & PAGE_WRITE) {
2073 if (!(p->flags & PAGE_WRITE_ORG))
2074 return -1;
2075 /* unprotect the page if it was put read-only because it
2076 contains translated code */
2077 if (!(p->flags & PAGE_WRITE)) {
2078 if (!page_unprotect(addr, 0, NULL))
2079 return -1;
2081 return 0;
2084 return 0;
2087 /* called from signal handler: invalidate the code and unprotect the
2088 page. Return TRUE if the fault was succesfully handled. */
2089 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2091 unsigned int page_index, prot, pindex;
2092 PageDesc *p, *p1;
2093 target_ulong host_start, host_end, addr;
2095 /* Technically this isn't safe inside a signal handler. However we
2096 know this only ever happens in a synchronous SEGV handler, so in
2097 practice it seems to be ok. */
2098 mmap_lock();
2100 host_start = address & qemu_host_page_mask;
2101 page_index = host_start >> TARGET_PAGE_BITS;
2102 p1 = page_find(page_index);
2103 if (!p1) {
2104 mmap_unlock();
2105 return 0;
2107 host_end = host_start + qemu_host_page_size;
2108 p = p1;
2109 prot = 0;
2110 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2111 prot |= p->flags;
2112 p++;
2114 /* if the page was really writable, then we change its
2115 protection back to writable */
2116 if (prot & PAGE_WRITE_ORG) {
2117 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2118 if (!(p1[pindex].flags & PAGE_WRITE)) {
2119 mprotect((void *)g2h(host_start), qemu_host_page_size,
2120 (prot & PAGE_BITS) | PAGE_WRITE);
2121 p1[pindex].flags |= PAGE_WRITE;
2122 /* and since the content will be modified, we must invalidate
2123 the corresponding translated code. */
2124 tb_invalidate_phys_page(address, pc, puc);
2125 #ifdef DEBUG_TB_CHECK
2126 tb_invalidate_check(address);
2127 #endif
2128 mmap_unlock();
2129 return 1;
2132 mmap_unlock();
2133 return 0;
2136 static inline void tlb_set_dirty(CPUState *env,
2137 unsigned long addr, target_ulong vaddr)
2140 #endif /* defined(CONFIG_USER_ONLY) */
2142 #if !defined(CONFIG_USER_ONLY)
2143 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2144 ram_addr_t memory);
2145 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2146 ram_addr_t orig_memory);
2147 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2148 need_subpage) \
2149 do { \
2150 if (addr > start_addr) \
2151 start_addr2 = 0; \
2152 else { \
2153 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2154 if (start_addr2 > 0) \
2155 need_subpage = 1; \
2158 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2159 end_addr2 = TARGET_PAGE_SIZE - 1; \
2160 else { \
2161 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2162 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2163 need_subpage = 1; \
2165 } while (0)
2167 /* register physical memory. 'size' must be a multiple of the target
2168 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2169 io memory page */
2170 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2171 ram_addr_t size,
2172 ram_addr_t phys_offset)
2174 target_phys_addr_t addr, end_addr;
2175 PhysPageDesc *p;
2176 CPUState *env;
2177 ram_addr_t orig_size = size;
2178 void *subpage;
2180 #ifdef USE_KQEMU
2181 /* XXX: should not depend on cpu context */
2182 env = first_cpu;
2183 if (env->kqemu_enabled) {
2184 kqemu_set_phys_mem(start_addr, size, phys_offset);
2186 #endif
2187 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2188 end_addr = start_addr + (target_phys_addr_t)size;
2189 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2190 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2191 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2192 ram_addr_t orig_memory = p->phys_offset;
2193 target_phys_addr_t start_addr2, end_addr2;
2194 int need_subpage = 0;
2196 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2197 need_subpage);
2198 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2199 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2200 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2201 &p->phys_offset, orig_memory);
2202 } else {
2203 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2204 >> IO_MEM_SHIFT];
2206 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2207 } else {
2208 p->phys_offset = phys_offset;
2209 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2210 (phys_offset & IO_MEM_ROMD))
2211 phys_offset += TARGET_PAGE_SIZE;
2213 } else {
2214 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2215 p->phys_offset = phys_offset;
2216 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2217 (phys_offset & IO_MEM_ROMD))
2218 phys_offset += TARGET_PAGE_SIZE;
2219 else {
2220 target_phys_addr_t start_addr2, end_addr2;
2221 int need_subpage = 0;
2223 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2224 end_addr2, need_subpage);
2226 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2227 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2228 &p->phys_offset, IO_MEM_UNASSIGNED);
2229 subpage_register(subpage, start_addr2, end_addr2,
2230 phys_offset);
2236 /* since each CPU stores ram addresses in its TLB cache, we must
2237 reset the modified entries */
2238 /* XXX: slow ! */
2239 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2240 tlb_flush(env, 1);
2244 /* XXX: temporary until new memory mapping API */
2245 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2247 PhysPageDesc *p;
2249 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2250 if (!p)
2251 return IO_MEM_UNASSIGNED;
2252 return p->phys_offset;
2255 /* XXX: better than nothing */
2256 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2258 ram_addr_t addr;
2259 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2260 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2261 (uint64_t)size, (uint64_t)phys_ram_size);
2262 abort();
2264 addr = phys_ram_alloc_offset;
2265 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2266 return addr;
2269 void qemu_ram_free(ram_addr_t addr)
2273 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2275 #ifdef DEBUG_UNASSIGNED
2276 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2277 #endif
2278 #ifdef TARGET_SPARC
2279 do_unassigned_access(addr, 0, 0, 0);
2280 #elif TARGET_CRIS
2281 do_unassigned_access(addr, 0, 0, 0);
2282 #endif
2283 return 0;
2286 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2288 #ifdef DEBUG_UNASSIGNED
2289 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2290 #endif
2291 #ifdef TARGET_SPARC
2292 do_unassigned_access(addr, 1, 0, 0);
2293 #elif TARGET_CRIS
2294 do_unassigned_access(addr, 1, 0, 0);
2295 #endif
2298 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2299 unassigned_mem_readb,
2300 unassigned_mem_readb,
2301 unassigned_mem_readb,
2304 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2305 unassigned_mem_writeb,
2306 unassigned_mem_writeb,
2307 unassigned_mem_writeb,
2310 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2311 uint32_t val)
2313 int dirty_flags;
2314 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2315 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2316 #if !defined(CONFIG_USER_ONLY)
2317 tb_invalidate_phys_page_fast(ram_addr, 1);
2318 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2319 #endif
2321 stb_p(phys_ram_base + ram_addr, val);
2322 #ifdef USE_KQEMU
2323 if (cpu_single_env->kqemu_enabled &&
2324 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2325 kqemu_modify_page(cpu_single_env, ram_addr);
2326 #endif
2327 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2328 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2329 /* we remove the notdirty callback only if the code has been
2330 flushed */
2331 if (dirty_flags == 0xff)
2332 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2335 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2336 uint32_t val)
2338 int dirty_flags;
2339 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2340 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2341 #if !defined(CONFIG_USER_ONLY)
2342 tb_invalidate_phys_page_fast(ram_addr, 2);
2343 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2344 #endif
2346 stw_p(phys_ram_base + ram_addr, val);
2347 #ifdef USE_KQEMU
2348 if (cpu_single_env->kqemu_enabled &&
2349 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2350 kqemu_modify_page(cpu_single_env, ram_addr);
2351 #endif
2352 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2353 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2354 /* we remove the notdirty callback only if the code has been
2355 flushed */
2356 if (dirty_flags == 0xff)
2357 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2360 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2361 uint32_t val)
2363 int dirty_flags;
2364 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2365 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2366 #if !defined(CONFIG_USER_ONLY)
2367 tb_invalidate_phys_page_fast(ram_addr, 4);
2368 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2369 #endif
2371 stl_p(phys_ram_base + ram_addr, val);
2372 #ifdef USE_KQEMU
2373 if (cpu_single_env->kqemu_enabled &&
2374 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2375 kqemu_modify_page(cpu_single_env, ram_addr);
2376 #endif
2377 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2378 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2379 /* we remove the notdirty callback only if the code has been
2380 flushed */
2381 if (dirty_flags == 0xff)
2382 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2385 static CPUReadMemoryFunc *error_mem_read[3] = {
2386 NULL, /* never used */
2387 NULL, /* never used */
2388 NULL, /* never used */
2391 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2392 notdirty_mem_writeb,
2393 notdirty_mem_writew,
2394 notdirty_mem_writel,
2397 /* Generate a debug exception if a watchpoint has been hit. */
2398 static void check_watchpoint(int offset, int flags)
2400 CPUState *env = cpu_single_env;
2401 target_ulong vaddr;
2402 int i;
2404 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2405 for (i = 0; i < env->nb_watchpoints; i++) {
2406 if (vaddr == env->watchpoint[i].vaddr
2407 && (env->watchpoint[i].type & flags)) {
2408 env->watchpoint_hit = i + 1;
2409 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2410 break;
2415 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2416 so these check for a hit then pass through to the normal out-of-line
2417 phys routines. */
2418 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2420 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2421 return ldub_phys(addr);
2424 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2426 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2427 return lduw_phys(addr);
2430 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2432 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2433 return ldl_phys(addr);
2436 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2437 uint32_t val)
2439 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2440 stb_phys(addr, val);
2443 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2444 uint32_t val)
2446 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2447 stw_phys(addr, val);
2450 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2451 uint32_t val)
2453 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2454 stl_phys(addr, val);
2457 static CPUReadMemoryFunc *watch_mem_read[3] = {
2458 watch_mem_readb,
2459 watch_mem_readw,
2460 watch_mem_readl,
2463 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2464 watch_mem_writeb,
2465 watch_mem_writew,
2466 watch_mem_writel,
2469 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2470 unsigned int len)
2472 uint32_t ret;
2473 unsigned int idx;
2475 idx = SUBPAGE_IDX(addr - mmio->base);
2476 #if defined(DEBUG_SUBPAGE)
2477 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2478 mmio, len, addr, idx);
2479 #endif
2480 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2482 return ret;
2485 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2486 uint32_t value, unsigned int len)
2488 unsigned int idx;
2490 idx = SUBPAGE_IDX(addr - mmio->base);
2491 #if defined(DEBUG_SUBPAGE)
2492 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2493 mmio, len, addr, idx, value);
2494 #endif
2495 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2498 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2500 #if defined(DEBUG_SUBPAGE)
2501 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2502 #endif
2504 return subpage_readlen(opaque, addr, 0);
2507 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2508 uint32_t value)
2510 #if defined(DEBUG_SUBPAGE)
2511 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2512 #endif
2513 subpage_writelen(opaque, addr, value, 0);
2516 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2518 #if defined(DEBUG_SUBPAGE)
2519 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2520 #endif
2522 return subpage_readlen(opaque, addr, 1);
2525 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2526 uint32_t value)
2528 #if defined(DEBUG_SUBPAGE)
2529 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2530 #endif
2531 subpage_writelen(opaque, addr, value, 1);
2534 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2536 #if defined(DEBUG_SUBPAGE)
2537 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2538 #endif
2540 return subpage_readlen(opaque, addr, 2);
2543 static void subpage_writel (void *opaque,
2544 target_phys_addr_t addr, uint32_t value)
2546 #if defined(DEBUG_SUBPAGE)
2547 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2548 #endif
2549 subpage_writelen(opaque, addr, value, 2);
2552 static CPUReadMemoryFunc *subpage_read[] = {
2553 &subpage_readb,
2554 &subpage_readw,
2555 &subpage_readl,
2558 static CPUWriteMemoryFunc *subpage_write[] = {
2559 &subpage_writeb,
2560 &subpage_writew,
2561 &subpage_writel,
2564 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2565 ram_addr_t memory)
2567 int idx, eidx;
2568 unsigned int i;
2570 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2571 return -1;
2572 idx = SUBPAGE_IDX(start);
2573 eidx = SUBPAGE_IDX(end);
2574 #if defined(DEBUG_SUBPAGE)
2575 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2576 mmio, start, end, idx, eidx, memory);
2577 #endif
2578 memory >>= IO_MEM_SHIFT;
2579 for (; idx <= eidx; idx++) {
2580 for (i = 0; i < 4; i++) {
2581 if (io_mem_read[memory][i]) {
2582 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2583 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2585 if (io_mem_write[memory][i]) {
2586 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2587 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2592 return 0;
2595 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2596 ram_addr_t orig_memory)
2598 subpage_t *mmio;
2599 int subpage_memory;
2601 mmio = qemu_mallocz(sizeof(subpage_t));
2602 if (mmio != NULL) {
2603 mmio->base = base;
2604 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2605 #if defined(DEBUG_SUBPAGE)
2606 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2607 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2608 #endif
2609 *phys = subpage_memory | IO_MEM_SUBPAGE;
2610 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2613 return mmio;
2616 static int get_free_io_mem_idx(void)
2618 int i;
2620 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2621 if (!io_mem_used[i]) {
2622 io_mem_used[i] = 1;
2623 return i;
2626 return -1;
2629 static void io_mem_init(void)
2631 int i;
2633 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2634 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2635 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2636 for (i=0; i<5; i++)
2637 io_mem_used[i] = 1;
2639 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2640 watch_mem_write, NULL);
2641 /* alloc dirty bits array */
2642 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2643 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2646 /* mem_read and mem_write are arrays of functions containing the
2647 function to access byte (index 0), word (index 1) and dword (index
2648 2). Functions can be omitted with a NULL function pointer. The
2649 registered functions may be modified dynamically later.
2650 If io_index is non zero, the corresponding io zone is
2651 modified. If it is zero, a new io zone is allocated. The return
2652 value can be used with cpu_register_physical_memory(). (-1) is
2653 returned if error. */
2654 int cpu_register_io_memory(int io_index,
2655 CPUReadMemoryFunc **mem_read,
2656 CPUWriteMemoryFunc **mem_write,
2657 void *opaque)
2659 int i, subwidth = 0;
2661 if (io_index <= 0) {
2662 io_index = get_free_io_mem_idx();
2663 if (io_index == -1)
2664 return io_index;
2665 } else {
2666 if (io_index >= IO_MEM_NB_ENTRIES)
2667 return -1;
2670 for(i = 0;i < 3; i++) {
2671 if (!mem_read[i] || !mem_write[i])
2672 subwidth = IO_MEM_SUBWIDTH;
2673 io_mem_read[io_index][i] = mem_read[i];
2674 io_mem_write[io_index][i] = mem_write[i];
2676 io_mem_opaque[io_index] = opaque;
2677 return (io_index << IO_MEM_SHIFT) | subwidth;
2680 void cpu_unregister_io_memory(int io_table_address)
2682 int i;
2683 int io_index = io_table_address >> IO_MEM_SHIFT;
2685 for (i=0;i < 3; i++) {
2686 io_mem_read[io_index][i] = unassigned_mem_read[i];
2687 io_mem_write[io_index][i] = unassigned_mem_write[i];
2689 io_mem_opaque[io_index] = NULL;
2690 io_mem_used[io_index] = 0;
2693 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2695 return io_mem_write[io_index >> IO_MEM_SHIFT];
2698 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2700 return io_mem_read[io_index >> IO_MEM_SHIFT];
2703 #endif /* !defined(CONFIG_USER_ONLY) */
2705 /* physical memory access (slow version, mainly for debug) */
2706 #if defined(CONFIG_USER_ONLY)
2707 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2708 int len, int is_write)
2710 int l, flags;
2711 target_ulong page;
2712 void * p;
2714 while (len > 0) {
2715 page = addr & TARGET_PAGE_MASK;
2716 l = (page + TARGET_PAGE_SIZE) - addr;
2717 if (l > len)
2718 l = len;
2719 flags = page_get_flags(page);
2720 if (!(flags & PAGE_VALID))
2721 return;
2722 if (is_write) {
2723 if (!(flags & PAGE_WRITE))
2724 return;
2725 /* XXX: this code should not depend on lock_user */
2726 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2727 /* FIXME - should this return an error rather than just fail? */
2728 return;
2729 memcpy(p, buf, l);
2730 unlock_user(p, addr, l);
2731 } else {
2732 if (!(flags & PAGE_READ))
2733 return;
2734 /* XXX: this code should not depend on lock_user */
2735 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2736 /* FIXME - should this return an error rather than just fail? */
2737 return;
2738 memcpy(buf, p, l);
2739 unlock_user(p, addr, 0);
2741 len -= l;
2742 buf += l;
2743 addr += l;
2747 #else
2748 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2749 int len, int is_write)
2751 int l, io_index;
2752 uint8_t *ptr;
2753 uint32_t val;
2754 target_phys_addr_t page;
2755 unsigned long pd;
2756 PhysPageDesc *p;
2758 while (len > 0) {
2759 page = addr & TARGET_PAGE_MASK;
2760 l = (page + TARGET_PAGE_SIZE) - addr;
2761 if (l > len)
2762 l = len;
2763 p = phys_page_find(page >> TARGET_PAGE_BITS);
2764 if (!p) {
2765 pd = IO_MEM_UNASSIGNED;
2766 } else {
2767 pd = p->phys_offset;
2770 if (is_write) {
2771 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2772 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2773 /* XXX: could force cpu_single_env to NULL to avoid
2774 potential bugs */
2775 if (l >= 4 && ((addr & 3) == 0)) {
2776 /* 32 bit write access */
2777 val = ldl_p(buf);
2778 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2779 l = 4;
2780 } else if (l >= 2 && ((addr & 1) == 0)) {
2781 /* 16 bit write access */
2782 val = lduw_p(buf);
2783 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2784 l = 2;
2785 } else {
2786 /* 8 bit write access */
2787 val = ldub_p(buf);
2788 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2789 l = 1;
2791 } else {
2792 unsigned long addr1;
2793 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2794 /* RAM case */
2795 ptr = phys_ram_base + addr1;
2796 memcpy(ptr, buf, l);
2797 if (!cpu_physical_memory_is_dirty(addr1)) {
2798 /* invalidate code */
2799 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2800 /* set dirty bit */
2801 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2802 (0xff & ~CODE_DIRTY_FLAG);
2804 /* qemu doesn't execute guest code directly, but kvm does
2805 therefore fluch instruction caches */
2806 if (kvm_enabled())
2807 flush_icache_range((unsigned long)ptr,
2808 ((unsigned long)ptr)+l);
2810 } else {
2811 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2812 !(pd & IO_MEM_ROMD)) {
2813 /* I/O case */
2814 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2815 if (l >= 4 && ((addr & 3) == 0)) {
2816 /* 32 bit read access */
2817 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2818 stl_p(buf, val);
2819 l = 4;
2820 } else if (l >= 2 && ((addr & 1) == 0)) {
2821 /* 16 bit read access */
2822 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2823 stw_p(buf, val);
2824 l = 2;
2825 } else {
2826 /* 8 bit read access */
2827 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2828 stb_p(buf, val);
2829 l = 1;
2831 } else {
2832 /* RAM case */
2833 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2834 (addr & ~TARGET_PAGE_MASK);
2835 memcpy(buf, ptr, l);
2838 len -= l;
2839 buf += l;
2840 addr += l;
2844 /* used for ROM loading : can write in RAM and ROM */
2845 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2846 const uint8_t *buf, int len)
2848 int l;
2849 uint8_t *ptr;
2850 target_phys_addr_t page;
2851 unsigned long pd;
2852 PhysPageDesc *p;
2854 while (len > 0) {
2855 page = addr & TARGET_PAGE_MASK;
2856 l = (page + TARGET_PAGE_SIZE) - addr;
2857 if (l > len)
2858 l = len;
2859 p = phys_page_find(page >> TARGET_PAGE_BITS);
2860 if (!p) {
2861 pd = IO_MEM_UNASSIGNED;
2862 } else {
2863 pd = p->phys_offset;
2866 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2867 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2868 !(pd & IO_MEM_ROMD)) {
2869 /* do nothing */
2870 } else {
2871 unsigned long addr1;
2872 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2873 /* ROM/RAM case */
2874 ptr = phys_ram_base + addr1;
2875 memcpy(ptr, buf, l);
2877 len -= l;
2878 buf += l;
2879 addr += l;
2884 /* warning: addr must be aligned */
2885 uint32_t ldl_phys(target_phys_addr_t addr)
2887 int io_index;
2888 uint8_t *ptr;
2889 uint32_t val;
2890 unsigned long pd;
2891 PhysPageDesc *p;
2893 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2894 if (!p) {
2895 pd = IO_MEM_UNASSIGNED;
2896 } else {
2897 pd = p->phys_offset;
2900 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2901 !(pd & IO_MEM_ROMD)) {
2902 /* I/O case */
2903 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2904 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2905 } else {
2906 /* RAM case */
2907 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2908 (addr & ~TARGET_PAGE_MASK);
2909 val = ldl_p(ptr);
2911 return val;
2914 /* warning: addr must be aligned */
2915 uint64_t ldq_phys(target_phys_addr_t addr)
2917 int io_index;
2918 uint8_t *ptr;
2919 uint64_t val;
2920 unsigned long pd;
2921 PhysPageDesc *p;
2923 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2924 if (!p) {
2925 pd = IO_MEM_UNASSIGNED;
2926 } else {
2927 pd = p->phys_offset;
2930 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2931 !(pd & IO_MEM_ROMD)) {
2932 /* I/O case */
2933 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2934 #ifdef TARGET_WORDS_BIGENDIAN
2935 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2936 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2937 #else
2938 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2939 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2940 #endif
2941 } else {
2942 /* RAM case */
2943 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2944 (addr & ~TARGET_PAGE_MASK);
2945 val = ldq_p(ptr);
2947 return val;
2950 /* XXX: optimize */
2951 uint32_t ldub_phys(target_phys_addr_t addr)
2953 uint8_t val;
2954 cpu_physical_memory_read(addr, &val, 1);
2955 return val;
2958 /* XXX: optimize */
2959 uint32_t lduw_phys(target_phys_addr_t addr)
2961 uint16_t val;
2962 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2963 return tswap16(val);
2966 #ifdef __GNUC__
2967 #define likely(x) __builtin_expect(!!(x), 1)
2968 #define unlikely(x) __builtin_expect(!!(x), 0)
2969 #else
2970 #define likely(x) x
2971 #define unlikely(x) x
2972 #endif
2974 /* warning: addr must be aligned. The ram page is not masked as dirty
2975 and the code inside is not invalidated. It is useful if the dirty
2976 bits are used to track modified PTEs */
2977 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2979 int io_index;
2980 uint8_t *ptr;
2981 unsigned long pd;
2982 PhysPageDesc *p;
2984 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2985 if (!p) {
2986 pd = IO_MEM_UNASSIGNED;
2987 } else {
2988 pd = p->phys_offset;
2991 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2992 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2993 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2994 } else {
2995 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2996 ptr = phys_ram_base + addr1;
2997 stl_p(ptr, val);
2999 if (unlikely(in_migration)) {
3000 if (!cpu_physical_memory_is_dirty(addr1)) {
3001 /* invalidate code */
3002 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3003 /* set dirty bit */
3004 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3005 (0xff & ~CODE_DIRTY_FLAG);
3011 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3013 int io_index;
3014 uint8_t *ptr;
3015 unsigned long pd;
3016 PhysPageDesc *p;
3018 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3019 if (!p) {
3020 pd = IO_MEM_UNASSIGNED;
3021 } else {
3022 pd = p->phys_offset;
3025 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3026 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3027 #ifdef TARGET_WORDS_BIGENDIAN
3028 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3029 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3030 #else
3031 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3032 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3033 #endif
3034 } else {
3035 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3036 (addr & ~TARGET_PAGE_MASK);
3037 stq_p(ptr, val);
3041 /* warning: addr must be aligned */
3042 void stl_phys(target_phys_addr_t addr, uint32_t val)
3044 int io_index;
3045 uint8_t *ptr;
3046 unsigned long pd;
3047 PhysPageDesc *p;
3049 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3050 if (!p) {
3051 pd = IO_MEM_UNASSIGNED;
3052 } else {
3053 pd = p->phys_offset;
3056 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3057 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3058 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3059 } else {
3060 unsigned long addr1;
3061 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3062 /* RAM case */
3063 ptr = phys_ram_base + addr1;
3064 stl_p(ptr, val);
3065 if (!cpu_physical_memory_is_dirty(addr1)) {
3066 /* invalidate code */
3067 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3068 /* set dirty bit */
3069 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3070 (0xff & ~CODE_DIRTY_FLAG);
3075 /* XXX: optimize */
3076 void stb_phys(target_phys_addr_t addr, uint32_t val)
3078 uint8_t v = val;
3079 cpu_physical_memory_write(addr, &v, 1);
3082 /* XXX: optimize */
3083 void stw_phys(target_phys_addr_t addr, uint32_t val)
3085 uint16_t v = tswap16(val);
3086 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3089 /* XXX: optimize */
3090 void stq_phys(target_phys_addr_t addr, uint64_t val)
3092 val = tswap64(val);
3093 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3096 #endif
3098 /* virtual memory access for debug */
3099 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3100 uint8_t *buf, int len, int is_write)
3102 int l;
3103 target_phys_addr_t phys_addr;
3104 target_ulong page;
3106 while (len > 0) {
3107 page = addr & TARGET_PAGE_MASK;
3108 phys_addr = cpu_get_phys_page_debug(env, page);
3109 /* if no physical page mapped, return an error */
3110 if (phys_addr == -1)
3111 return -1;
3112 l = (page + TARGET_PAGE_SIZE) - addr;
3113 if (l > len)
3114 l = len;
3115 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3116 buf, l, is_write);
3117 len -= l;
3118 buf += l;
3119 addr += l;
3121 return 0;
3124 /* in deterministic execution mode, instructions doing device I/Os
3125 must be at the end of the TB */
3126 void cpu_io_recompile(CPUState *env, void *retaddr)
3128 TranslationBlock *tb;
3129 uint32_t n, cflags;
3130 target_ulong pc, cs_base;
3131 uint64_t flags;
3133 tb = tb_find_pc((unsigned long)retaddr);
3134 if (!tb) {
3135 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3136 retaddr);
3138 n = env->icount_decr.u16.low + tb->icount;
3139 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3140 /* Calculate how many instructions had been executed before the fault
3141 occurred. */
3142 n = n - env->icount_decr.u16.low;
3143 /* Generate a new TB ending on the I/O insn. */
3144 n++;
3145 /* On MIPS and SH, delay slot instructions can only be restarted if
3146 they were already the first instruction in the TB. If this is not
3147 the first instruction in a TB then re-execute the preceding
3148 branch. */
3149 #if defined(TARGET_MIPS)
3150 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3151 env->active_tc.PC -= 4;
3152 env->icount_decr.u16.low++;
3153 env->hflags &= ~MIPS_HFLAG_BMASK;
3155 #elif defined(TARGET_SH4)
3156 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3157 && n > 1) {
3158 env->pc -= 2;
3159 env->icount_decr.u16.low++;
3160 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3162 #endif
3163 /* This should never happen. */
3164 if (n > CF_COUNT_MASK)
3165 cpu_abort(env, "TB too big during recompile");
3167 cflags = n | CF_LAST_IO;
3168 pc = tb->pc;
3169 cs_base = tb->cs_base;
3170 flags = tb->flags;
3171 tb_phys_invalidate(tb, -1);
3172 /* FIXME: In theory this could raise an exception. In practice
3173 we have already translated the block once so it's probably ok. */
3174 tb_gen_code(env, pc, cs_base, flags, cflags);
3175 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3176 the first in the TB) then we end up generating a whole new TB and
3177 repeating the fault, which is horribly inefficient.
3178 Better would be to execute just this insn uncached, or generate a
3179 second new TB. */
3180 cpu_resume_from_signal(env, NULL);
3183 void dump_exec_info(FILE *f,
3184 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3186 int i, target_code_size, max_target_code_size;
3187 int direct_jmp_count, direct_jmp2_count, cross_page;
3188 TranslationBlock *tb;
3190 target_code_size = 0;
3191 max_target_code_size = 0;
3192 cross_page = 0;
3193 direct_jmp_count = 0;
3194 direct_jmp2_count = 0;
3195 for(i = 0; i < nb_tbs; i++) {
3196 tb = &tbs[i];
3197 target_code_size += tb->size;
3198 if (tb->size > max_target_code_size)
3199 max_target_code_size = tb->size;
3200 if (tb->page_addr[1] != -1)
3201 cross_page++;
3202 if (tb->tb_next_offset[0] != 0xffff) {
3203 direct_jmp_count++;
3204 if (tb->tb_next_offset[1] != 0xffff) {
3205 direct_jmp2_count++;
3209 /* XXX: avoid using doubles ? */
3210 cpu_fprintf(f, "Translation buffer state:\n");
3211 cpu_fprintf(f, "gen code size %ld/%ld\n",
3212 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3213 cpu_fprintf(f, "TB count %d/%d\n",
3214 nb_tbs, code_gen_max_blocks);
3215 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3216 nb_tbs ? target_code_size / nb_tbs : 0,
3217 max_target_code_size);
3218 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3219 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3220 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3221 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3222 cross_page,
3223 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3224 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3225 direct_jmp_count,
3226 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3227 direct_jmp2_count,
3228 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3229 cpu_fprintf(f, "\nStatistics:\n");
3230 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3231 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3232 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3233 tcg_dump_info(f, cpu_fprintf);
3236 #if !defined(CONFIG_USER_ONLY)
3238 #define MMUSUFFIX _cmmu
3239 #define GETPC() NULL
3240 #define env cpu_single_env
3241 #define SOFTMMU_CODE_ACCESS
3243 #define SHIFT 0
3244 #include "softmmu_template.h"
3246 #define SHIFT 1
3247 #include "softmmu_template.h"
3249 #define SHIFT 2
3250 #include "softmmu_template.h"
3252 #define SHIFT 3
3253 #include "softmmu_template.h"
3255 #undef env
3257 #endif