kvm: external module: add msr-index.h, now needed by kvm_host.h
[qemu-kvm/fedora.git] / exec.c
blob36ff1f0afe40e63f8b9c613b5cba4e8b69e890e1
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
41 #include "tcg.h"
42 #endif
43 #include "qemu-kvm.h"
45 #include "hw/hw.h"
46 #include "osdep.h"
47 #include "kvm.h"
48 #if defined(CONFIG_USER_ONLY)
49 #include <qemu.h>
50 #endif
52 //#define DEBUG_TB_INVALIDATE
53 //#define DEBUG_FLUSH
54 //#define DEBUG_TLB
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
66 #undef DEBUG_TB_CHECK
67 #endif
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #define MMAP_AREA_START 0x00000000
72 #define MMAP_AREA_END 0xa8000000
74 #if defined(TARGET_SPARC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 41
76 #elif defined(TARGET_SPARC)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #elif defined(TARGET_ALPHA)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #define TARGET_VIRT_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_PPC64)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 42
85 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
86 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 #elif defined(TARGET_IA64)
88 #define TARGET_PHYS_ADDR_SPACE_BITS 36
89 #else
90 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
91 #define TARGET_PHYS_ADDR_SPACE_BITS 32
92 #endif
94 static TranslationBlock *tbs;
95 int code_gen_max_blocks;
96 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
97 static int nb_tbs;
98 /* any access to the tbs or the page table must use this lock */
99 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
101 #if defined(__arm__) || defined(__sparc_v9__)
102 /* The prologue must be reachable with a direct jump. ARM and Sparc64
103 have limited branch ranges (possibly also PPC) so place it in a
104 section close to code segment. */
105 #define code_gen_section \
106 __attribute__((__section__(".gen_code"))) \
107 __attribute__((aligned (32)))
108 #else
109 #define code_gen_section \
110 __attribute__((aligned (32)))
111 #endif
113 uint8_t code_gen_prologue[1024] code_gen_section;
114 static uint8_t *code_gen_buffer;
115 static unsigned long code_gen_buffer_size;
116 /* threshold to flush the translated code buffer */
117 static unsigned long code_gen_buffer_max_size;
118 uint8_t *code_gen_ptr;
120 #if !defined(CONFIG_USER_ONLY)
121 ram_addr_t phys_ram_size;
122 int phys_ram_fd;
123 uint8_t *phys_ram_base;
124 uint8_t *phys_ram_dirty;
125 uint8_t *bios_mem;
126 static int in_migration;
127 static ram_addr_t phys_ram_alloc_offset = 0;
128 #endif
130 CPUState *first_cpu;
131 /* current CPU in the current thread. It is only valid inside
132 cpu_exec() */
133 CPUState *cpu_single_env;
134 /* 0 = Do not count executed instructions.
135 1 = Precise instruction counting.
136 2 = Adaptive rate instruction counting. */
137 int use_icount = 0;
138 /* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
140 int64_t qemu_icount;
142 typedef struct PageDesc {
143 /* list of TBs intersecting this ram page */
144 TranslationBlock *first_tb;
145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count;
148 uint8_t *code_bitmap;
149 #if defined(CONFIG_USER_ONLY)
150 unsigned long flags;
151 #endif
152 } PageDesc;
154 typedef struct PhysPageDesc {
155 /* offset in host memory of the page + io_index in the low bits */
156 ram_addr_t phys_offset;
157 ram_addr_t region_offset;
158 } PhysPageDesc;
160 #define L2_BITS 10
161 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162 /* XXX: this is a temporary hack for alpha target.
163 * In the future, this is to be replaced by a multi-level table
164 * to actually be able to handle the complete 64 bits address space.
166 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167 #else
168 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169 #endif
171 #define L1_SIZE (1 << L1_BITS)
172 #define L2_SIZE (1 << L2_BITS)
174 unsigned long qemu_real_host_page_size;
175 unsigned long qemu_host_page_bits;
176 unsigned long qemu_host_page_size;
177 unsigned long qemu_host_page_mask;
179 /* XXX: for system emulation, it could just be an array */
180 static PageDesc *l1_map[L1_SIZE];
181 static PhysPageDesc **l1_phys_map;
183 #if !defined(CONFIG_USER_ONLY)
184 static void io_mem_init(void);
186 /* io memory support */
187 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
188 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
189 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
190 char io_mem_used[IO_MEM_NB_ENTRIES];
191 static int io_mem_watch;
192 #endif
194 /* log support */
195 static const char *logfilename = "/tmp/qemu.log";
196 FILE *logfile;
197 int loglevel;
198 static int log_append = 0;
200 /* statistics */
201 static int tlb_flush_count;
202 static int tb_flush_count;
203 static int tb_phys_invalidate_count;
205 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
206 typedef struct subpage_t {
207 target_phys_addr_t base;
208 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
209 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
210 void *opaque[TARGET_PAGE_SIZE][2][4];
211 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
212 } subpage_t;
214 #ifdef _WIN32
215 static void map_exec(void *addr, long size)
217 DWORD old_protect;
218 VirtualProtect(addr, size,
219 PAGE_EXECUTE_READWRITE, &old_protect);
222 #else
223 static void map_exec(void *addr, long size)
225 unsigned long start, end, page_size;
227 page_size = getpagesize();
228 start = (unsigned long)addr;
229 start &= ~(page_size - 1);
231 end = (unsigned long)addr + size;
232 end += page_size - 1;
233 end &= ~(page_size - 1);
235 mprotect((void *)start, end - start,
236 PROT_READ | PROT_WRITE | PROT_EXEC);
238 #endif
240 static void page_init(void)
242 /* NOTE: we can always suppose that qemu_host_page_size >=
243 TARGET_PAGE_SIZE */
244 #ifdef _WIN32
246 SYSTEM_INFO system_info;
248 GetSystemInfo(&system_info);
249 qemu_real_host_page_size = system_info.dwPageSize;
251 #else
252 qemu_real_host_page_size = getpagesize();
253 #endif
254 if (qemu_host_page_size == 0)
255 qemu_host_page_size = qemu_real_host_page_size;
256 if (qemu_host_page_size < TARGET_PAGE_SIZE)
257 qemu_host_page_size = TARGET_PAGE_SIZE;
258 qemu_host_page_bits = 0;
259 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
260 qemu_host_page_bits++;
261 qemu_host_page_mask = ~(qemu_host_page_size - 1);
262 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
263 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
265 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
267 long long startaddr, endaddr;
268 FILE *f;
269 int n;
271 mmap_lock();
272 last_brk = (unsigned long)sbrk(0);
273 f = fopen("/proc/self/maps", "r");
274 if (f) {
275 do {
276 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
277 if (n == 2) {
278 startaddr = MIN(startaddr,
279 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280 endaddr = MIN(endaddr,
281 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
282 page_set_flags(startaddr & TARGET_PAGE_MASK,
283 TARGET_PAGE_ALIGN(endaddr),
284 PAGE_RESERVED);
286 } while (!feof(f));
287 fclose(f);
289 mmap_unlock();
291 #endif
294 static inline PageDesc **page_l1_map(target_ulong index)
296 #if TARGET_LONG_BITS > 32
297 /* Host memory outside guest VM. For 32-bit targets we have already
298 excluded high addresses. */
299 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
300 return NULL;
301 #endif
302 return &l1_map[index >> L2_BITS];
305 static inline PageDesc *page_find_alloc(target_ulong index)
307 PageDesc **lp, *p;
308 lp = page_l1_map(index);
309 if (!lp)
310 return NULL;
312 p = *lp;
313 if (!p) {
314 /* allocate if not found */
315 #if defined(CONFIG_USER_ONLY)
316 unsigned long addr;
317 size_t len = sizeof(PageDesc) * L2_SIZE;
318 /* Don't use qemu_malloc because it may recurse. */
319 p = mmap(0, len, PROT_READ | PROT_WRITE,
320 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
321 *lp = p;
322 addr = h2g(p);
323 if (addr == (target_ulong)addr) {
324 page_set_flags(addr & TARGET_PAGE_MASK,
325 TARGET_PAGE_ALIGN(addr + len),
326 PAGE_RESERVED);
328 #else
329 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
330 *lp = p;
331 #endif
333 return p + (index & (L2_SIZE - 1));
336 static inline PageDesc *page_find(target_ulong index)
338 PageDesc **lp, *p;
339 lp = page_l1_map(index);
340 if (!lp)
341 return NULL;
343 p = *lp;
344 if (!p)
345 return 0;
346 return p + (index & (L2_SIZE - 1));
349 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
351 void **lp, **p;
352 PhysPageDesc *pd;
354 p = (void **)l1_phys_map;
355 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
357 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359 #endif
360 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
361 p = *lp;
362 if (!p) {
363 /* allocate if not found */
364 if (!alloc)
365 return NULL;
366 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367 memset(p, 0, sizeof(void *) * L1_SIZE);
368 *lp = p;
370 #endif
371 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
372 pd = *lp;
373 if (!pd) {
374 int i;
375 /* allocate if not found */
376 if (!alloc)
377 return NULL;
378 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379 *lp = pd;
380 for (i = 0; i < L2_SIZE; i++)
381 pd[i].phys_offset = IO_MEM_UNASSIGNED;
383 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
386 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
388 return phys_page_find_alloc(index, 0);
391 #if !defined(CONFIG_USER_ONLY)
392 static void tlb_protect_code(ram_addr_t ram_addr);
393 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
394 target_ulong vaddr);
395 #define mmap_lock() do { } while(0)
396 #define mmap_unlock() do { } while(0)
397 #endif
399 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
401 #if defined(CONFIG_USER_ONLY)
402 /* Currently it is not recommanded to allocate big chunks of data in
403 user mode. It will change when a dedicated libc will be used */
404 #define USE_STATIC_CODE_GEN_BUFFER
405 #endif
407 #ifdef USE_STATIC_CODE_GEN_BUFFER
408 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
409 #endif
411 static void code_gen_alloc(unsigned long tb_size)
413 if (kvm_enabled())
414 return;
416 #ifdef USE_STATIC_CODE_GEN_BUFFER
417 code_gen_buffer = static_code_gen_buffer;
418 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
419 map_exec(code_gen_buffer, code_gen_buffer_size);
420 #else
421 code_gen_buffer_size = tb_size;
422 if (code_gen_buffer_size == 0) {
423 #if defined(CONFIG_USER_ONLY)
424 /* in user mode, phys_ram_size is not meaningful */
425 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
426 #else
427 /* XXX: needs ajustments */
428 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
429 #endif
431 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
432 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
433 /* The code gen buffer location may have constraints depending on
434 the host cpu and OS */
435 #if defined(__linux__)
437 int flags;
438 void *start = NULL;
440 flags = MAP_PRIVATE | MAP_ANONYMOUS;
441 #if defined(__x86_64__)
442 flags |= MAP_32BIT;
443 /* Cannot map more than that */
444 if (code_gen_buffer_size > (800 * 1024 * 1024))
445 code_gen_buffer_size = (800 * 1024 * 1024);
446 #elif defined(__sparc_v9__)
447 // Map the buffer below 2G, so we can use direct calls and branches
448 flags |= MAP_FIXED;
449 start = (void *) 0x60000000UL;
450 if (code_gen_buffer_size > (512 * 1024 * 1024))
451 code_gen_buffer_size = (512 * 1024 * 1024);
452 #elif defined(__arm__)
453 /* Map the buffer below 32M, so we can use direct calls and branches */
454 flags |= MAP_FIXED;
455 start = (void *) 0x01000000UL;
456 if (code_gen_buffer_size > 16 * 1024 * 1024)
457 code_gen_buffer_size = 16 * 1024 * 1024;
458 #endif
459 code_gen_buffer = mmap(start, code_gen_buffer_size,
460 PROT_WRITE | PROT_READ | PROT_EXEC,
461 flags, -1, 0);
462 if (code_gen_buffer == MAP_FAILED) {
463 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
464 exit(1);
467 #elif defined(__FreeBSD__)
469 int flags;
470 void *addr = NULL;
471 flags = MAP_PRIVATE | MAP_ANONYMOUS;
472 #if defined(__x86_64__)
473 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
474 * 0x40000000 is free */
475 flags |= MAP_FIXED;
476 addr = (void *)0x40000000;
477 /* Cannot map more than that */
478 if (code_gen_buffer_size > (800 * 1024 * 1024))
479 code_gen_buffer_size = (800 * 1024 * 1024);
480 #endif
481 code_gen_buffer = mmap(addr, code_gen_buffer_size,
482 PROT_WRITE | PROT_READ | PROT_EXEC,
483 flags, -1, 0);
484 if (code_gen_buffer == MAP_FAILED) {
485 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
486 exit(1);
489 #else
490 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
491 if (!code_gen_buffer) {
492 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
493 exit(1);
495 map_exec(code_gen_buffer, code_gen_buffer_size);
496 #endif
497 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
498 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
499 code_gen_buffer_max_size = code_gen_buffer_size -
500 code_gen_max_block_size();
501 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
502 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
505 /* Must be called before using the QEMU cpus. 'tb_size' is the size
506 (in bytes) allocated to the translation buffer. Zero means default
507 size. */
508 void cpu_exec_init_all(unsigned long tb_size)
510 cpu_gen_init();
511 code_gen_alloc(tb_size);
512 code_gen_ptr = code_gen_buffer;
513 page_init();
514 #if !defined(CONFIG_USER_ONLY)
515 io_mem_init();
516 #endif
519 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
521 #define CPU_COMMON_SAVE_VERSION 1
523 static void cpu_common_save(QEMUFile *f, void *opaque)
525 CPUState *env = opaque;
527 qemu_put_be32s(f, &env->halted);
528 qemu_put_be32s(f, &env->interrupt_request);
531 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
533 CPUState *env = opaque;
535 if (version_id != CPU_COMMON_SAVE_VERSION)
536 return -EINVAL;
538 qemu_get_be32s(f, &env->halted);
539 qemu_get_be32s(f, &env->interrupt_request);
540 tlb_flush(env, 1);
542 return 0;
544 #endif
546 void cpu_exec_init(CPUState *env)
548 CPUState **penv;
549 int cpu_index;
551 env->next_cpu = NULL;
552 penv = &first_cpu;
553 cpu_index = 0;
554 while (*penv != NULL) {
555 penv = (CPUState **)&(*penv)->next_cpu;
556 cpu_index++;
558 env->cpu_index = cpu_index;
559 TAILQ_INIT(&env->breakpoints);
560 TAILQ_INIT(&env->watchpoints);
561 #ifdef __WIN32
562 env->thread_id = GetCurrentProcessId();
563 #else
564 env->thread_id = getpid();
565 #endif
566 *penv = env;
567 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
568 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
569 cpu_common_save, cpu_common_load, env);
570 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
571 cpu_save, cpu_load, env);
572 #endif
575 static inline void invalidate_page_bitmap(PageDesc *p)
577 if (p->code_bitmap) {
578 qemu_free(p->code_bitmap);
579 p->code_bitmap = NULL;
581 p->code_write_count = 0;
584 /* set to NULL all the 'first_tb' fields in all PageDescs */
585 static void page_flush_tb(void)
587 int i, j;
588 PageDesc *p;
590 for(i = 0; i < L1_SIZE; i++) {
591 p = l1_map[i];
592 if (p) {
593 for(j = 0; j < L2_SIZE; j++) {
594 p->first_tb = NULL;
595 invalidate_page_bitmap(p);
596 p++;
602 /* flush all the translation blocks */
603 /* XXX: tb_flush is currently not thread safe */
604 void tb_flush(CPUState *env1)
606 CPUState *env;
607 #if defined(DEBUG_FLUSH)
608 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
609 (unsigned long)(code_gen_ptr - code_gen_buffer),
610 nb_tbs, nb_tbs > 0 ?
611 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
612 #endif
613 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
614 cpu_abort(env1, "Internal error: code buffer overflow\n");
616 nb_tbs = 0;
618 for(env = first_cpu; env != NULL; env = env->next_cpu) {
619 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
622 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
623 page_flush_tb();
625 code_gen_ptr = code_gen_buffer;
626 /* XXX: flush processor icache at this point if cache flush is
627 expensive */
628 tb_flush_count++;
631 #ifdef DEBUG_TB_CHECK
633 static void tb_invalidate_check(target_ulong address)
635 TranslationBlock *tb;
636 int i;
637 address &= TARGET_PAGE_MASK;
638 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
639 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
640 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
641 address >= tb->pc + tb->size)) {
642 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
643 address, (long)tb->pc, tb->size);
649 /* verify that all the pages have correct rights for code */
650 static void tb_page_check(void)
652 TranslationBlock *tb;
653 int i, flags1, flags2;
655 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
656 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
657 flags1 = page_get_flags(tb->pc);
658 flags2 = page_get_flags(tb->pc + tb->size - 1);
659 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
660 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
661 (long)tb->pc, tb->size, flags1, flags2);
667 static void tb_jmp_check(TranslationBlock *tb)
669 TranslationBlock *tb1;
670 unsigned int n1;
672 /* suppress any remaining jumps to this TB */
673 tb1 = tb->jmp_first;
674 for(;;) {
675 n1 = (long)tb1 & 3;
676 tb1 = (TranslationBlock *)((long)tb1 & ~3);
677 if (n1 == 2)
678 break;
679 tb1 = tb1->jmp_next[n1];
681 /* check end of list */
682 if (tb1 != tb) {
683 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
687 #endif
689 /* invalidate one TB */
690 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
691 int next_offset)
693 TranslationBlock *tb1;
694 for(;;) {
695 tb1 = *ptb;
696 if (tb1 == tb) {
697 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
698 break;
700 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
704 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
706 TranslationBlock *tb1;
707 unsigned int n1;
709 for(;;) {
710 tb1 = *ptb;
711 n1 = (long)tb1 & 3;
712 tb1 = (TranslationBlock *)((long)tb1 & ~3);
713 if (tb1 == tb) {
714 *ptb = tb1->page_next[n1];
715 break;
717 ptb = &tb1->page_next[n1];
721 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
723 TranslationBlock *tb1, **ptb;
724 unsigned int n1;
726 ptb = &tb->jmp_next[n];
727 tb1 = *ptb;
728 if (tb1) {
729 /* find tb(n) in circular list */
730 for(;;) {
731 tb1 = *ptb;
732 n1 = (long)tb1 & 3;
733 tb1 = (TranslationBlock *)((long)tb1 & ~3);
734 if (n1 == n && tb1 == tb)
735 break;
736 if (n1 == 2) {
737 ptb = &tb1->jmp_first;
738 } else {
739 ptb = &tb1->jmp_next[n1];
742 /* now we can suppress tb(n) from the list */
743 *ptb = tb->jmp_next[n];
745 tb->jmp_next[n] = NULL;
749 /* reset the jump entry 'n' of a TB so that it is not chained to
750 another TB */
751 static inline void tb_reset_jump(TranslationBlock *tb, int n)
753 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
756 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
758 CPUState *env;
759 PageDesc *p;
760 unsigned int h, n1;
761 target_phys_addr_t phys_pc;
762 TranslationBlock *tb1, *tb2;
764 /* remove the TB from the hash list */
765 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
766 h = tb_phys_hash_func(phys_pc);
767 tb_remove(&tb_phys_hash[h], tb,
768 offsetof(TranslationBlock, phys_hash_next));
770 /* remove the TB from the page list */
771 if (tb->page_addr[0] != page_addr) {
772 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
773 tb_page_remove(&p->first_tb, tb);
774 invalidate_page_bitmap(p);
776 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
777 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
778 tb_page_remove(&p->first_tb, tb);
779 invalidate_page_bitmap(p);
782 tb_invalidated_flag = 1;
784 /* remove the TB from the hash list */
785 h = tb_jmp_cache_hash_func(tb->pc);
786 for(env = first_cpu; env != NULL; env = env->next_cpu) {
787 if (env->tb_jmp_cache[h] == tb)
788 env->tb_jmp_cache[h] = NULL;
791 /* suppress this TB from the two jump lists */
792 tb_jmp_remove(tb, 0);
793 tb_jmp_remove(tb, 1);
795 /* suppress any remaining jumps to this TB */
796 tb1 = tb->jmp_first;
797 for(;;) {
798 n1 = (long)tb1 & 3;
799 if (n1 == 2)
800 break;
801 tb1 = (TranslationBlock *)((long)tb1 & ~3);
802 tb2 = tb1->jmp_next[n1];
803 tb_reset_jump(tb1, n1);
804 tb1->jmp_next[n1] = NULL;
805 tb1 = tb2;
807 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
809 tb_phys_invalidate_count++;
812 static inline void set_bits(uint8_t *tab, int start, int len)
814 int end, mask, end1;
816 end = start + len;
817 tab += start >> 3;
818 mask = 0xff << (start & 7);
819 if ((start & ~7) == (end & ~7)) {
820 if (start < end) {
821 mask &= ~(0xff << (end & 7));
822 *tab |= mask;
824 } else {
825 *tab++ |= mask;
826 start = (start + 8) & ~7;
827 end1 = end & ~7;
828 while (start < end1) {
829 *tab++ = 0xff;
830 start += 8;
832 if (start < end) {
833 mask = ~(0xff << (end & 7));
834 *tab |= mask;
839 static void build_page_bitmap(PageDesc *p)
841 int n, tb_start, tb_end;
842 TranslationBlock *tb;
844 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
845 if (!p->code_bitmap)
846 return;
848 tb = p->first_tb;
849 while (tb != NULL) {
850 n = (long)tb & 3;
851 tb = (TranslationBlock *)((long)tb & ~3);
852 /* NOTE: this is subtle as a TB may span two physical pages */
853 if (n == 0) {
854 /* NOTE: tb_end may be after the end of the page, but
855 it is not a problem */
856 tb_start = tb->pc & ~TARGET_PAGE_MASK;
857 tb_end = tb_start + tb->size;
858 if (tb_end > TARGET_PAGE_SIZE)
859 tb_end = TARGET_PAGE_SIZE;
860 } else {
861 tb_start = 0;
862 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
864 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
865 tb = tb->page_next[n];
869 TranslationBlock *tb_gen_code(CPUState *env,
870 target_ulong pc, target_ulong cs_base,
871 int flags, int cflags)
873 TranslationBlock *tb;
874 uint8_t *tc_ptr;
875 target_ulong phys_pc, phys_page2, virt_page2;
876 int code_gen_size;
878 phys_pc = get_phys_addr_code(env, pc);
879 tb = tb_alloc(pc);
880 if (!tb) {
881 /* flush must be done */
882 tb_flush(env);
883 /* cannot fail at this point */
884 tb = tb_alloc(pc);
885 /* Don't forget to invalidate previous TB info. */
886 tb_invalidated_flag = 1;
888 tc_ptr = code_gen_ptr;
889 tb->tc_ptr = tc_ptr;
890 tb->cs_base = cs_base;
891 tb->flags = flags;
892 tb->cflags = cflags;
893 cpu_gen_code(env, tb, &code_gen_size);
894 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
896 /* check next page if needed */
897 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
898 phys_page2 = -1;
899 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
900 phys_page2 = get_phys_addr_code(env, virt_page2);
902 tb_link_phys(tb, phys_pc, phys_page2);
903 return tb;
906 /* invalidate all TBs which intersect with the target physical page
907 starting in range [start;end[. NOTE: start and end must refer to
908 the same physical page. 'is_cpu_write_access' should be true if called
909 from a real cpu write access: the virtual CPU will exit the current
910 TB if code is modified inside this TB. */
911 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
912 int is_cpu_write_access)
914 TranslationBlock *tb, *tb_next, *saved_tb;
915 CPUState *env = cpu_single_env;
916 target_ulong tb_start, tb_end;
917 PageDesc *p;
918 int n;
919 #ifdef TARGET_HAS_PRECISE_SMC
920 int current_tb_not_found = is_cpu_write_access;
921 TranslationBlock *current_tb = NULL;
922 int current_tb_modified = 0;
923 target_ulong current_pc = 0;
924 target_ulong current_cs_base = 0;
925 int current_flags = 0;
926 #endif /* TARGET_HAS_PRECISE_SMC */
928 p = page_find(start >> TARGET_PAGE_BITS);
929 if (!p)
930 return;
931 if (!p->code_bitmap &&
932 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
933 is_cpu_write_access) {
934 /* build code bitmap */
935 build_page_bitmap(p);
938 /* we remove all the TBs in the range [start, end[ */
939 /* XXX: see if in some cases it could be faster to invalidate all the code */
940 tb = p->first_tb;
941 while (tb != NULL) {
942 n = (long)tb & 3;
943 tb = (TranslationBlock *)((long)tb & ~3);
944 tb_next = tb->page_next[n];
945 /* NOTE: this is subtle as a TB may span two physical pages */
946 if (n == 0) {
947 /* NOTE: tb_end may be after the end of the page, but
948 it is not a problem */
949 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
950 tb_end = tb_start + tb->size;
951 } else {
952 tb_start = tb->page_addr[1];
953 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
955 if (!(tb_end <= start || tb_start >= end)) {
956 #ifdef TARGET_HAS_PRECISE_SMC
957 if (current_tb_not_found) {
958 current_tb_not_found = 0;
959 current_tb = NULL;
960 if (env->mem_io_pc) {
961 /* now we have a real cpu fault */
962 current_tb = tb_find_pc(env->mem_io_pc);
965 if (current_tb == tb &&
966 (current_tb->cflags & CF_COUNT_MASK) != 1) {
967 /* If we are modifying the current TB, we must stop
968 its execution. We could be more precise by checking
969 that the modification is after the current PC, but it
970 would require a specialized function to partially
971 restore the CPU state */
973 current_tb_modified = 1;
974 cpu_restore_state(current_tb, env,
975 env->mem_io_pc, NULL);
976 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
977 &current_flags);
979 #endif /* TARGET_HAS_PRECISE_SMC */
980 /* we need to do that to handle the case where a signal
981 occurs while doing tb_phys_invalidate() */
982 saved_tb = NULL;
983 if (env) {
984 saved_tb = env->current_tb;
985 env->current_tb = NULL;
987 tb_phys_invalidate(tb, -1);
988 if (env) {
989 env->current_tb = saved_tb;
990 if (env->interrupt_request && env->current_tb)
991 cpu_interrupt(env, env->interrupt_request);
994 tb = tb_next;
996 #if !defined(CONFIG_USER_ONLY)
997 /* if no code remaining, no need to continue to use slow writes */
998 if (!p->first_tb) {
999 invalidate_page_bitmap(p);
1000 if (is_cpu_write_access) {
1001 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1004 #endif
1005 #ifdef TARGET_HAS_PRECISE_SMC
1006 if (current_tb_modified) {
1007 /* we generate a block containing just the instruction
1008 modifying the memory. It will ensure that it cannot modify
1009 itself */
1010 env->current_tb = NULL;
1011 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1012 cpu_resume_from_signal(env, NULL);
1014 #endif
1017 /* len must be <= 8 and start must be a multiple of len */
1018 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1020 PageDesc *p;
1021 int offset, b;
1022 #if 0
1023 if (1) {
1024 if (loglevel) {
1025 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1026 cpu_single_env->mem_io_vaddr, len,
1027 cpu_single_env->eip,
1028 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1031 #endif
1032 p = page_find(start >> TARGET_PAGE_BITS);
1033 if (!p)
1034 return;
1035 if (p->code_bitmap) {
1036 offset = start & ~TARGET_PAGE_MASK;
1037 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1038 if (b & ((1 << len) - 1))
1039 goto do_invalidate;
1040 } else {
1041 do_invalidate:
1042 tb_invalidate_phys_page_range(start, start + len, 1);
1046 #if !defined(CONFIG_SOFTMMU)
1047 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1048 unsigned long pc, void *puc)
1050 TranslationBlock *tb;
1051 PageDesc *p;
1052 int n;
1053 #ifdef TARGET_HAS_PRECISE_SMC
1054 TranslationBlock *current_tb = NULL;
1055 CPUState *env = cpu_single_env;
1056 int current_tb_modified = 0;
1057 target_ulong current_pc = 0;
1058 target_ulong current_cs_base = 0;
1059 int current_flags = 0;
1060 #endif
1062 addr &= TARGET_PAGE_MASK;
1063 p = page_find(addr >> TARGET_PAGE_BITS);
1064 if (!p)
1065 return;
1066 tb = p->first_tb;
1067 #ifdef TARGET_HAS_PRECISE_SMC
1068 if (tb && pc != 0) {
1069 current_tb = tb_find_pc(pc);
1071 #endif
1072 while (tb != NULL) {
1073 n = (long)tb & 3;
1074 tb = (TranslationBlock *)((long)tb & ~3);
1075 #ifdef TARGET_HAS_PRECISE_SMC
1076 if (current_tb == tb &&
1077 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1078 /* If we are modifying the current TB, we must stop
1079 its execution. We could be more precise by checking
1080 that the modification is after the current PC, but it
1081 would require a specialized function to partially
1082 restore the CPU state */
1084 current_tb_modified = 1;
1085 cpu_restore_state(current_tb, env, pc, puc);
1086 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1087 &current_flags);
1089 #endif /* TARGET_HAS_PRECISE_SMC */
1090 tb_phys_invalidate(tb, addr);
1091 tb = tb->page_next[n];
1093 p->first_tb = NULL;
1094 #ifdef TARGET_HAS_PRECISE_SMC
1095 if (current_tb_modified) {
1096 /* we generate a block containing just the instruction
1097 modifying the memory. It will ensure that it cannot modify
1098 itself */
1099 env->current_tb = NULL;
1100 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1101 cpu_resume_from_signal(env, puc);
1103 #endif
1105 #endif
1107 /* add the tb in the target page and protect it if necessary */
1108 static inline void tb_alloc_page(TranslationBlock *tb,
1109 unsigned int n, target_ulong page_addr)
1111 PageDesc *p;
1112 TranslationBlock *last_first_tb;
1114 tb->page_addr[n] = page_addr;
1115 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1116 tb->page_next[n] = p->first_tb;
1117 last_first_tb = p->first_tb;
1118 p->first_tb = (TranslationBlock *)((long)tb | n);
1119 invalidate_page_bitmap(p);
1121 #if defined(TARGET_HAS_SMC) || 1
1123 #if defined(CONFIG_USER_ONLY)
1124 if (p->flags & PAGE_WRITE) {
1125 target_ulong addr;
1126 PageDesc *p2;
1127 int prot;
1129 /* force the host page as non writable (writes will have a
1130 page fault + mprotect overhead) */
1131 page_addr &= qemu_host_page_mask;
1132 prot = 0;
1133 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1134 addr += TARGET_PAGE_SIZE) {
1136 p2 = page_find (addr >> TARGET_PAGE_BITS);
1137 if (!p2)
1138 continue;
1139 prot |= p2->flags;
1140 p2->flags &= ~PAGE_WRITE;
1141 page_get_flags(addr);
1143 mprotect(g2h(page_addr), qemu_host_page_size,
1144 (prot & PAGE_BITS) & ~PAGE_WRITE);
1145 #ifdef DEBUG_TB_INVALIDATE
1146 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1147 page_addr);
1148 #endif
1150 #else
1151 /* if some code is already present, then the pages are already
1152 protected. So we handle the case where only the first TB is
1153 allocated in a physical page */
1154 if (!last_first_tb) {
1155 tlb_protect_code(page_addr);
1157 #endif
1159 #endif /* TARGET_HAS_SMC */
1162 /* Allocate a new translation block. Flush the translation buffer if
1163 too many translation blocks or too much generated code. */
1164 TranslationBlock *tb_alloc(target_ulong pc)
1166 TranslationBlock *tb;
1168 if (nb_tbs >= code_gen_max_blocks ||
1169 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1170 return NULL;
1171 tb = &tbs[nb_tbs++];
1172 tb->pc = pc;
1173 tb->cflags = 0;
1174 return tb;
1177 void tb_free(TranslationBlock *tb)
1179 /* In practice this is mostly used for single use temporary TB
1180 Ignore the hard cases and just back up if this TB happens to
1181 be the last one generated. */
1182 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1183 code_gen_ptr = tb->tc_ptr;
1184 nb_tbs--;
1188 /* add a new TB and link it to the physical page tables. phys_page2 is
1189 (-1) to indicate that only one page contains the TB. */
1190 void tb_link_phys(TranslationBlock *tb,
1191 target_ulong phys_pc, target_ulong phys_page2)
1193 unsigned int h;
1194 TranslationBlock **ptb;
1196 /* Grab the mmap lock to stop another thread invalidating this TB
1197 before we are done. */
1198 mmap_lock();
1199 /* add in the physical hash table */
1200 h = tb_phys_hash_func(phys_pc);
1201 ptb = &tb_phys_hash[h];
1202 tb->phys_hash_next = *ptb;
1203 *ptb = tb;
1205 /* add in the page list */
1206 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1207 if (phys_page2 != -1)
1208 tb_alloc_page(tb, 1, phys_page2);
1209 else
1210 tb->page_addr[1] = -1;
1212 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1213 tb->jmp_next[0] = NULL;
1214 tb->jmp_next[1] = NULL;
1216 /* init original jump addresses */
1217 if (tb->tb_next_offset[0] != 0xffff)
1218 tb_reset_jump(tb, 0);
1219 if (tb->tb_next_offset[1] != 0xffff)
1220 tb_reset_jump(tb, 1);
1222 #ifdef DEBUG_TB_CHECK
1223 tb_page_check();
1224 #endif
1225 mmap_unlock();
1228 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1229 tb[1].tc_ptr. Return NULL if not found */
1230 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1232 int m_min, m_max, m;
1233 unsigned long v;
1234 TranslationBlock *tb;
1236 if (nb_tbs <= 0)
1237 return NULL;
1238 if (tc_ptr < (unsigned long)code_gen_buffer ||
1239 tc_ptr >= (unsigned long)code_gen_ptr)
1240 return NULL;
1241 /* binary search (cf Knuth) */
1242 m_min = 0;
1243 m_max = nb_tbs - 1;
1244 while (m_min <= m_max) {
1245 m = (m_min + m_max) >> 1;
1246 tb = &tbs[m];
1247 v = (unsigned long)tb->tc_ptr;
1248 if (v == tc_ptr)
1249 return tb;
1250 else if (tc_ptr < v) {
1251 m_max = m - 1;
1252 } else {
1253 m_min = m + 1;
1256 return &tbs[m_max];
1259 static void tb_reset_jump_recursive(TranslationBlock *tb);
1261 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1263 TranslationBlock *tb1, *tb_next, **ptb;
1264 unsigned int n1;
1266 tb1 = tb->jmp_next[n];
1267 if (tb1 != NULL) {
1268 /* find head of list */
1269 for(;;) {
1270 n1 = (long)tb1 & 3;
1271 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1272 if (n1 == 2)
1273 break;
1274 tb1 = tb1->jmp_next[n1];
1276 /* we are now sure now that tb jumps to tb1 */
1277 tb_next = tb1;
1279 /* remove tb from the jmp_first list */
1280 ptb = &tb_next->jmp_first;
1281 for(;;) {
1282 tb1 = *ptb;
1283 n1 = (long)tb1 & 3;
1284 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1285 if (n1 == n && tb1 == tb)
1286 break;
1287 ptb = &tb1->jmp_next[n1];
1289 *ptb = tb->jmp_next[n];
1290 tb->jmp_next[n] = NULL;
1292 /* suppress the jump to next tb in generated code */
1293 tb_reset_jump(tb, n);
1295 /* suppress jumps in the tb on which we could have jumped */
1296 tb_reset_jump_recursive(tb_next);
1300 static void tb_reset_jump_recursive(TranslationBlock *tb)
1302 tb_reset_jump_recursive2(tb, 0);
1303 tb_reset_jump_recursive2(tb, 1);
1306 #if defined(TARGET_HAS_ICE)
1307 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1309 target_phys_addr_t addr;
1310 target_ulong pd;
1311 ram_addr_t ram_addr;
1312 PhysPageDesc *p;
1314 addr = cpu_get_phys_page_debug(env, pc);
1315 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1316 if (!p) {
1317 pd = IO_MEM_UNASSIGNED;
1318 } else {
1319 pd = p->phys_offset;
1321 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1322 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1324 #endif
1326 /* Add a watchpoint. */
1327 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1328 int flags, CPUWatchpoint **watchpoint)
1330 target_ulong len_mask = ~(len - 1);
1331 CPUWatchpoint *wp;
1333 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1334 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1335 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1336 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1337 return -EINVAL;
1339 wp = qemu_malloc(sizeof(*wp));
1340 if (!wp)
1341 return -ENOMEM;
1343 wp->vaddr = addr;
1344 wp->len_mask = len_mask;
1345 wp->flags = flags;
1347 /* keep all GDB-injected watchpoints in front */
1348 if (flags & BP_GDB)
1349 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1350 else
1351 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1353 tlb_flush_page(env, addr);
1355 if (watchpoint)
1356 *watchpoint = wp;
1357 return 0;
1360 /* Remove a specific watchpoint. */
1361 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1362 int flags)
1364 target_ulong len_mask = ~(len - 1);
1365 CPUWatchpoint *wp;
1367 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1368 if (addr == wp->vaddr && len_mask == wp->len_mask
1369 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1370 cpu_watchpoint_remove_by_ref(env, wp);
1371 return 0;
1374 return -ENOENT;
1377 /* Remove a specific watchpoint by reference. */
1378 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1380 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1382 tlb_flush_page(env, watchpoint->vaddr);
1384 qemu_free(watchpoint);
1387 /* Remove all matching watchpoints. */
1388 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1390 CPUWatchpoint *wp, *next;
1392 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1393 if (wp->flags & mask)
1394 cpu_watchpoint_remove_by_ref(env, wp);
1398 /* Add a breakpoint. */
1399 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1400 CPUBreakpoint **breakpoint)
1402 #if defined(TARGET_HAS_ICE)
1403 CPUBreakpoint *bp;
1405 bp = qemu_malloc(sizeof(*bp));
1406 if (!bp)
1407 return -ENOMEM;
1409 bp->pc = pc;
1410 bp->flags = flags;
1412 /* keep all GDB-injected breakpoints in front */
1413 if (flags & BP_GDB)
1414 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1415 else
1416 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1418 if (kvm_enabled())
1419 kvm_update_debugger(env);
1421 breakpoint_invalidate(env, pc);
1423 if (breakpoint)
1424 *breakpoint = bp;
1425 return 0;
1426 #else
1427 return -ENOSYS;
1428 #endif
1431 /* Remove a specific breakpoint. */
1432 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1434 #if defined(TARGET_HAS_ICE)
1435 CPUBreakpoint *bp;
1437 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1438 if (bp->pc == pc && bp->flags == flags) {
1439 cpu_breakpoint_remove_by_ref(env, bp);
1440 return 0;
1443 return -ENOENT;
1444 #else
1445 return -ENOSYS;
1446 #endif
1449 /* Remove a specific breakpoint by reference. */
1450 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1452 #if defined(TARGET_HAS_ICE)
1453 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1455 if (kvm_enabled())
1456 kvm_update_debugger(env);
1458 breakpoint_invalidate(env, breakpoint->pc);
1460 qemu_free(breakpoint);
1461 #endif
1464 /* Remove all matching breakpoints. */
1465 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1467 #if defined(TARGET_HAS_ICE)
1468 CPUBreakpoint *bp, *next;
1470 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1471 if (bp->flags & mask)
1472 cpu_breakpoint_remove_by_ref(env, bp);
1474 #endif
1477 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1478 CPU loop after each instruction */
1479 void cpu_single_step(CPUState *env, int enabled)
1481 #if defined(TARGET_HAS_ICE)
1482 if (env->singlestep_enabled != enabled) {
1483 env->singlestep_enabled = enabled;
1484 /* must flush all the translated code to avoid inconsistancies */
1485 /* XXX: only flush what is necessary */
1486 tb_flush(env);
1488 if (kvm_enabled())
1489 kvm_update_debugger(env);
1490 #endif
1493 /* enable or disable low levels log */
1494 void cpu_set_log(int log_flags)
1496 loglevel = log_flags;
1497 if (loglevel && !logfile) {
1498 logfile = fopen(logfilename, log_append ? "a" : "w");
1499 if (!logfile) {
1500 perror(logfilename);
1501 _exit(1);
1503 #if !defined(CONFIG_SOFTMMU)
1504 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1506 static char logfile_buf[4096];
1507 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1509 #else
1510 setvbuf(logfile, NULL, _IOLBF, 0);
1511 #endif
1512 log_append = 1;
1514 if (!loglevel && logfile) {
1515 fclose(logfile);
1516 logfile = NULL;
1520 void cpu_set_log_filename(const char *filename)
1522 logfilename = strdup(filename);
1523 if (logfile) {
1524 fclose(logfile);
1525 logfile = NULL;
1527 cpu_set_log(loglevel);
1530 /* mask must never be zero, except for A20 change call */
1531 void cpu_interrupt(CPUState *env, int mask)
1533 #if !defined(USE_NPTL)
1534 TranslationBlock *tb;
1535 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1536 #endif
1537 int old_mask;
1539 old_mask = env->interrupt_request;
1540 /* FIXME: This is probably not threadsafe. A different thread could
1541 be in the middle of a read-modify-write operation. */
1542 env->interrupt_request |= mask;
1543 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1544 kvm_update_interrupt_request(env);
1545 #if defined(USE_NPTL)
1546 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1547 problem and hope the cpu will stop of its own accord. For userspace
1548 emulation this often isn't actually as bad as it sounds. Often
1549 signals are used primarily to interrupt blocking syscalls. */
1550 #else
1551 if (use_icount) {
1552 env->icount_decr.u16.high = 0xffff;
1553 #ifndef CONFIG_USER_ONLY
1554 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1555 an async event happened and we need to process it. */
1556 if (!can_do_io(env)
1557 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1558 cpu_abort(env, "Raised interrupt while not in I/O function");
1560 #endif
1561 } else {
1562 tb = env->current_tb;
1563 /* if the cpu is currently executing code, we must unlink it and
1564 all the potentially executing TB */
1565 if (tb && !testandset(&interrupt_lock)) {
1566 env->current_tb = NULL;
1567 tb_reset_jump_recursive(tb);
1568 resetlock(&interrupt_lock);
1571 #endif
1574 void cpu_reset_interrupt(CPUState *env, int mask)
1576 env->interrupt_request &= ~mask;
1579 const CPULogItem cpu_log_items[] = {
1580 { CPU_LOG_TB_OUT_ASM, "out_asm",
1581 "show generated host assembly code for each compiled TB" },
1582 { CPU_LOG_TB_IN_ASM, "in_asm",
1583 "show target assembly code for each compiled TB" },
1584 { CPU_LOG_TB_OP, "op",
1585 "show micro ops for each compiled TB" },
1586 { CPU_LOG_TB_OP_OPT, "op_opt",
1587 "show micro ops "
1588 #ifdef TARGET_I386
1589 "before eflags optimization and "
1590 #endif
1591 "after liveness analysis" },
1592 { CPU_LOG_INT, "int",
1593 "show interrupts/exceptions in short format" },
1594 { CPU_LOG_EXEC, "exec",
1595 "show trace before each executed TB (lots of logs)" },
1596 { CPU_LOG_TB_CPU, "cpu",
1597 "show CPU state before block translation" },
1598 #ifdef TARGET_I386
1599 { CPU_LOG_PCALL, "pcall",
1600 "show protected mode far calls/returns/exceptions" },
1601 #endif
1602 #ifdef DEBUG_IOPORT
1603 { CPU_LOG_IOPORT, "ioport",
1604 "show all i/o ports accesses" },
1605 #endif
1606 { 0, NULL, NULL },
1609 static int cmp1(const char *s1, int n, const char *s2)
1611 if (strlen(s2) != n)
1612 return 0;
1613 return memcmp(s1, s2, n) == 0;
1616 /* takes a comma separated list of log masks. Return 0 if error. */
1617 int cpu_str_to_log_mask(const char *str)
1619 const CPULogItem *item;
1620 int mask;
1621 const char *p, *p1;
1623 p = str;
1624 mask = 0;
1625 for(;;) {
1626 p1 = strchr(p, ',');
1627 if (!p1)
1628 p1 = p + strlen(p);
1629 if(cmp1(p,p1-p,"all")) {
1630 for(item = cpu_log_items; item->mask != 0; item++) {
1631 mask |= item->mask;
1633 } else {
1634 for(item = cpu_log_items; item->mask != 0; item++) {
1635 if (cmp1(p, p1 - p, item->name))
1636 goto found;
1638 return 0;
1640 found:
1641 mask |= item->mask;
1642 if (*p1 != ',')
1643 break;
1644 p = p1 + 1;
1646 return mask;
1649 void cpu_abort(CPUState *env, const char *fmt, ...)
1651 va_list ap;
1652 va_list ap2;
1654 va_start(ap, fmt);
1655 va_copy(ap2, ap);
1656 fprintf(stderr, "qemu: fatal: ");
1657 vfprintf(stderr, fmt, ap);
1658 fprintf(stderr, "\n");
1659 #ifdef TARGET_I386
1660 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1661 #else
1662 cpu_dump_state(env, stderr, fprintf, 0);
1663 #endif
1664 if (logfile) {
1665 fprintf(logfile, "qemu: fatal: ");
1666 vfprintf(logfile, fmt, ap2);
1667 fprintf(logfile, "\n");
1668 #ifdef TARGET_I386
1669 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1670 #else
1671 cpu_dump_state(env, logfile, fprintf, 0);
1672 #endif
1673 fflush(logfile);
1674 fclose(logfile);
1676 va_end(ap2);
1677 va_end(ap);
1678 abort();
1681 CPUState *cpu_copy(CPUState *env)
1683 CPUState *new_env = cpu_init(env->cpu_model_str);
1684 /* preserve chaining and index */
1685 CPUState *next_cpu = new_env->next_cpu;
1686 int cpu_index = new_env->cpu_index;
1687 memcpy(new_env, env, sizeof(CPUState));
1688 new_env->next_cpu = next_cpu;
1689 new_env->cpu_index = cpu_index;
1690 return new_env;
1693 #if !defined(CONFIG_USER_ONLY)
1695 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1697 unsigned int i;
1699 /* Discard jump cache entries for any tb which might potentially
1700 overlap the flushed page. */
1701 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1702 memset (&env->tb_jmp_cache[i], 0,
1703 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1705 i = tb_jmp_cache_hash_page(addr);
1706 memset (&env->tb_jmp_cache[i], 0,
1707 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1710 /* NOTE: if flush_global is true, also flush global entries (not
1711 implemented yet) */
1712 void tlb_flush(CPUState *env, int flush_global)
1714 int i;
1716 #if defined(DEBUG_TLB)
1717 printf("tlb_flush:\n");
1718 #endif
1719 /* must reset current TB so that interrupts cannot modify the
1720 links while we are modifying them */
1721 env->current_tb = NULL;
1723 for(i = 0; i < CPU_TLB_SIZE; i++) {
1724 env->tlb_table[0][i].addr_read = -1;
1725 env->tlb_table[0][i].addr_write = -1;
1726 env->tlb_table[0][i].addr_code = -1;
1727 env->tlb_table[1][i].addr_read = -1;
1728 env->tlb_table[1][i].addr_write = -1;
1729 env->tlb_table[1][i].addr_code = -1;
1730 #if (NB_MMU_MODES >= 3)
1731 env->tlb_table[2][i].addr_read = -1;
1732 env->tlb_table[2][i].addr_write = -1;
1733 env->tlb_table[2][i].addr_code = -1;
1734 #if (NB_MMU_MODES == 4)
1735 env->tlb_table[3][i].addr_read = -1;
1736 env->tlb_table[3][i].addr_write = -1;
1737 env->tlb_table[3][i].addr_code = -1;
1738 #endif
1739 #endif
1742 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1744 #ifdef USE_KQEMU
1745 if (env->kqemu_enabled) {
1746 kqemu_flush(env, flush_global);
1748 #endif
1749 tlb_flush_count++;
1752 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1754 if (addr == (tlb_entry->addr_read &
1755 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1756 addr == (tlb_entry->addr_write &
1757 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1758 addr == (tlb_entry->addr_code &
1759 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1760 tlb_entry->addr_read = -1;
1761 tlb_entry->addr_write = -1;
1762 tlb_entry->addr_code = -1;
1766 void tlb_flush_page(CPUState *env, target_ulong addr)
1768 int i;
1770 #if defined(DEBUG_TLB)
1771 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1772 #endif
1773 /* must reset current TB so that interrupts cannot modify the
1774 links while we are modifying them */
1775 env->current_tb = NULL;
1777 addr &= TARGET_PAGE_MASK;
1778 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1779 tlb_flush_entry(&env->tlb_table[0][i], addr);
1780 tlb_flush_entry(&env->tlb_table[1][i], addr);
1781 #if (NB_MMU_MODES >= 3)
1782 tlb_flush_entry(&env->tlb_table[2][i], addr);
1783 #if (NB_MMU_MODES == 4)
1784 tlb_flush_entry(&env->tlb_table[3][i], addr);
1785 #endif
1786 #endif
1788 tlb_flush_jmp_cache(env, addr);
1790 #ifdef USE_KQEMU
1791 if (env->kqemu_enabled) {
1792 kqemu_flush_page(env, addr);
1794 #endif
1797 /* update the TLBs so that writes to code in the virtual page 'addr'
1798 can be detected */
1799 static void tlb_protect_code(ram_addr_t ram_addr)
1801 cpu_physical_memory_reset_dirty(ram_addr,
1802 ram_addr + TARGET_PAGE_SIZE,
1803 CODE_DIRTY_FLAG);
1806 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1807 tested for self modifying code */
1808 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1809 target_ulong vaddr)
1811 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1814 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1815 unsigned long start, unsigned long length)
1817 unsigned long addr;
1818 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1819 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1820 if ((addr - start) < length) {
1821 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1826 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1827 int dirty_flags)
1829 CPUState *env;
1830 unsigned long length, start1;
1831 int i, mask, len;
1832 uint8_t *p;
1834 start &= TARGET_PAGE_MASK;
1835 end = TARGET_PAGE_ALIGN(end);
1837 length = end - start;
1838 if (length == 0)
1839 return;
1840 len = length >> TARGET_PAGE_BITS;
1841 #ifdef USE_KQEMU
1842 /* XXX: should not depend on cpu context */
1843 env = first_cpu;
1844 if (env->kqemu_enabled) {
1845 ram_addr_t addr;
1846 addr = start;
1847 for(i = 0; i < len; i++) {
1848 kqemu_set_notdirty(env, addr);
1849 addr += TARGET_PAGE_SIZE;
1852 #endif
1853 mask = ~dirty_flags;
1854 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1855 for(i = 0; i < len; i++)
1856 p[i] &= mask;
1858 /* we modify the TLB cache so that the dirty bit will be set again
1859 when accessing the range */
1860 start1 = start + (unsigned long)phys_ram_base;
1861 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1862 for(i = 0; i < CPU_TLB_SIZE; i++)
1863 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1864 for(i = 0; i < CPU_TLB_SIZE; i++)
1865 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1866 #if (NB_MMU_MODES >= 3)
1867 for(i = 0; i < CPU_TLB_SIZE; i++)
1868 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1869 #if (NB_MMU_MODES == 4)
1870 for(i = 0; i < CPU_TLB_SIZE; i++)
1871 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1872 #endif
1873 #endif
1877 int cpu_physical_memory_set_dirty_tracking(int enable)
1879 int r=0;
1881 if (kvm_enabled())
1882 r = kvm_physical_memory_set_dirty_tracking(enable);
1883 in_migration = enable;
1884 return r;
1887 int cpu_physical_memory_get_dirty_tracking(void)
1889 return in_migration;
1892 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1894 if (kvm_enabled())
1895 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1898 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1900 ram_addr_t ram_addr;
1902 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1903 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1904 tlb_entry->addend - (unsigned long)phys_ram_base;
1905 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1906 tlb_entry->addr_write |= TLB_NOTDIRTY;
1911 /* update the TLB according to the current state of the dirty bits */
1912 void cpu_tlb_update_dirty(CPUState *env)
1914 int i;
1915 for(i = 0; i < CPU_TLB_SIZE; i++)
1916 tlb_update_dirty(&env->tlb_table[0][i]);
1917 for(i = 0; i < CPU_TLB_SIZE; i++)
1918 tlb_update_dirty(&env->tlb_table[1][i]);
1919 #if (NB_MMU_MODES >= 3)
1920 for(i = 0; i < CPU_TLB_SIZE; i++)
1921 tlb_update_dirty(&env->tlb_table[2][i]);
1922 #if (NB_MMU_MODES == 4)
1923 for(i = 0; i < CPU_TLB_SIZE; i++)
1924 tlb_update_dirty(&env->tlb_table[3][i]);
1925 #endif
1926 #endif
1929 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1931 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1932 tlb_entry->addr_write = vaddr;
1935 /* update the TLB corresponding to virtual page vaddr
1936 so that it is no longer dirty */
1937 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1939 int i;
1941 vaddr &= TARGET_PAGE_MASK;
1942 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1943 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1944 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1945 #if (NB_MMU_MODES >= 3)
1946 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1947 #if (NB_MMU_MODES == 4)
1948 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1949 #endif
1950 #endif
1953 /* add a new TLB entry. At most one entry for a given virtual address
1954 is permitted. Return 0 if OK or 2 if the page could not be mapped
1955 (can only happen in non SOFTMMU mode for I/O pages or pages
1956 conflicting with the host address space). */
1957 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1958 target_phys_addr_t paddr, int prot,
1959 int mmu_idx, int is_softmmu)
1961 PhysPageDesc *p;
1962 unsigned long pd;
1963 unsigned int index;
1964 target_ulong address;
1965 target_ulong code_address;
1966 target_phys_addr_t addend;
1967 int ret;
1968 CPUTLBEntry *te;
1969 CPUWatchpoint *wp;
1970 target_phys_addr_t iotlb;
1972 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1973 if (!p) {
1974 pd = IO_MEM_UNASSIGNED;
1975 } else {
1976 pd = p->phys_offset;
1978 #if defined(DEBUG_TLB)
1979 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1980 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1981 #endif
1983 ret = 0;
1984 address = vaddr;
1985 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1986 /* IO memory case (romd handled later) */
1987 address |= TLB_MMIO;
1989 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1990 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1991 /* Normal RAM. */
1992 iotlb = pd & TARGET_PAGE_MASK;
1993 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1994 iotlb |= IO_MEM_NOTDIRTY;
1995 else
1996 iotlb |= IO_MEM_ROM;
1997 } else {
1998 /* IO handlers are currently passed a phsical address.
1999 It would be nice to pass an offset from the base address
2000 of that region. This would avoid having to special case RAM,
2001 and avoid full address decoding in every device.
2002 We can't use the high bits of pd for this because
2003 IO_MEM_ROMD uses these as a ram address. */
2004 iotlb = (pd & ~TARGET_PAGE_MASK);
2005 if (p) {
2006 iotlb += p->region_offset;
2007 } else {
2008 iotlb += paddr;
2012 code_address = address;
2013 /* Make accesses to pages with watchpoints go via the
2014 watchpoint trap routines. */
2015 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2016 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2017 iotlb = io_mem_watch + paddr;
2018 /* TODO: The memory case can be optimized by not trapping
2019 reads of pages with a write breakpoint. */
2020 address |= TLB_MMIO;
2024 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2025 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2026 te = &env->tlb_table[mmu_idx][index];
2027 te->addend = addend - vaddr;
2028 if (prot & PAGE_READ) {
2029 te->addr_read = address;
2030 } else {
2031 te->addr_read = -1;
2034 if (prot & PAGE_EXEC) {
2035 te->addr_code = code_address;
2036 } else {
2037 te->addr_code = -1;
2039 if (prot & PAGE_WRITE) {
2040 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2041 (pd & IO_MEM_ROMD)) {
2042 /* Write access calls the I/O callback. */
2043 te->addr_write = address | TLB_MMIO;
2044 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2045 !cpu_physical_memory_is_dirty(pd)) {
2046 te->addr_write = address | TLB_NOTDIRTY;
2047 } else {
2048 te->addr_write = address;
2050 } else {
2051 te->addr_write = -1;
2053 return ret;
2056 #else
2058 void tlb_flush(CPUState *env, int flush_global)
2062 void tlb_flush_page(CPUState *env, target_ulong addr)
2066 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2067 target_phys_addr_t paddr, int prot,
2068 int mmu_idx, int is_softmmu)
2070 return 0;
2073 /* dump memory mappings */
2074 void page_dump(FILE *f)
2076 unsigned long start, end;
2077 int i, j, prot, prot1;
2078 PageDesc *p;
2080 fprintf(f, "%-8s %-8s %-8s %s\n",
2081 "start", "end", "size", "prot");
2082 start = -1;
2083 end = -1;
2084 prot = 0;
2085 for(i = 0; i <= L1_SIZE; i++) {
2086 if (i < L1_SIZE)
2087 p = l1_map[i];
2088 else
2089 p = NULL;
2090 for(j = 0;j < L2_SIZE; j++) {
2091 if (!p)
2092 prot1 = 0;
2093 else
2094 prot1 = p[j].flags;
2095 if (prot1 != prot) {
2096 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2097 if (start != -1) {
2098 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2099 start, end, end - start,
2100 prot & PAGE_READ ? 'r' : '-',
2101 prot & PAGE_WRITE ? 'w' : '-',
2102 prot & PAGE_EXEC ? 'x' : '-');
2104 if (prot1 != 0)
2105 start = end;
2106 else
2107 start = -1;
2108 prot = prot1;
2110 if (!p)
2111 break;
2116 int page_get_flags(target_ulong address)
2118 PageDesc *p;
2120 p = page_find(address >> TARGET_PAGE_BITS);
2121 if (!p)
2122 return 0;
2123 return p->flags;
2126 /* modify the flags of a page and invalidate the code if
2127 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2128 depending on PAGE_WRITE */
2129 void page_set_flags(target_ulong start, target_ulong end, int flags)
2131 PageDesc *p;
2132 target_ulong addr;
2134 /* mmap_lock should already be held. */
2135 start = start & TARGET_PAGE_MASK;
2136 end = TARGET_PAGE_ALIGN(end);
2137 if (flags & PAGE_WRITE)
2138 flags |= PAGE_WRITE_ORG;
2139 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2140 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2141 /* We may be called for host regions that are outside guest
2142 address space. */
2143 if (!p)
2144 return;
2145 /* if the write protection is set, then we invalidate the code
2146 inside */
2147 if (!(p->flags & PAGE_WRITE) &&
2148 (flags & PAGE_WRITE) &&
2149 p->first_tb) {
2150 tb_invalidate_phys_page(addr, 0, NULL);
2152 p->flags = flags;
2156 int page_check_range(target_ulong start, target_ulong len, int flags)
2158 PageDesc *p;
2159 target_ulong end;
2160 target_ulong addr;
2162 if (start + len < start)
2163 /* we've wrapped around */
2164 return -1;
2166 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2167 start = start & TARGET_PAGE_MASK;
2169 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2170 p = page_find(addr >> TARGET_PAGE_BITS);
2171 if( !p )
2172 return -1;
2173 if( !(p->flags & PAGE_VALID) )
2174 return -1;
2176 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2177 return -1;
2178 if (flags & PAGE_WRITE) {
2179 if (!(p->flags & PAGE_WRITE_ORG))
2180 return -1;
2181 /* unprotect the page if it was put read-only because it
2182 contains translated code */
2183 if (!(p->flags & PAGE_WRITE)) {
2184 if (!page_unprotect(addr, 0, NULL))
2185 return -1;
2187 return 0;
2190 return 0;
2193 /* called from signal handler: invalidate the code and unprotect the
2194 page. Return TRUE if the fault was succesfully handled. */
2195 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2197 unsigned int page_index, prot, pindex;
2198 PageDesc *p, *p1;
2199 target_ulong host_start, host_end, addr;
2201 /* Technically this isn't safe inside a signal handler. However we
2202 know this only ever happens in a synchronous SEGV handler, so in
2203 practice it seems to be ok. */
2204 mmap_lock();
2206 host_start = address & qemu_host_page_mask;
2207 page_index = host_start >> TARGET_PAGE_BITS;
2208 p1 = page_find(page_index);
2209 if (!p1) {
2210 mmap_unlock();
2211 return 0;
2213 host_end = host_start + qemu_host_page_size;
2214 p = p1;
2215 prot = 0;
2216 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2217 prot |= p->flags;
2218 p++;
2220 /* if the page was really writable, then we change its
2221 protection back to writable */
2222 if (prot & PAGE_WRITE_ORG) {
2223 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2224 if (!(p1[pindex].flags & PAGE_WRITE)) {
2225 mprotect((void *)g2h(host_start), qemu_host_page_size,
2226 (prot & PAGE_BITS) | PAGE_WRITE);
2227 p1[pindex].flags |= PAGE_WRITE;
2228 /* and since the content will be modified, we must invalidate
2229 the corresponding translated code. */
2230 tb_invalidate_phys_page(address, pc, puc);
2231 #ifdef DEBUG_TB_CHECK
2232 tb_invalidate_check(address);
2233 #endif
2234 mmap_unlock();
2235 return 1;
2238 mmap_unlock();
2239 return 0;
2242 static inline void tlb_set_dirty(CPUState *env,
2243 unsigned long addr, target_ulong vaddr)
2246 #endif /* defined(CONFIG_USER_ONLY) */
2248 #if !defined(CONFIG_USER_ONLY)
2250 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2251 ram_addr_t memory, ram_addr_t region_offset);
2252 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2253 ram_addr_t orig_memory, ram_addr_t region_offset);
2254 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2255 need_subpage) \
2256 do { \
2257 if (addr > start_addr) \
2258 start_addr2 = 0; \
2259 else { \
2260 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2261 if (start_addr2 > 0) \
2262 need_subpage = 1; \
2265 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2266 end_addr2 = TARGET_PAGE_SIZE - 1; \
2267 else { \
2268 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2269 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2270 need_subpage = 1; \
2272 } while (0)
2274 /* register physical memory. 'size' must be a multiple of the target
2275 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2276 io memory page. The address used when calling the IO function is
2277 the offset from the start of the region, plus region_offset. Both
2278 start_region and regon_offset are rounded down to a page boundary
2279 before calculating this offset. This should not be a problem unless
2280 the low bits of start_addr and region_offset differ. */
2281 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2282 ram_addr_t size,
2283 ram_addr_t phys_offset,
2284 ram_addr_t region_offset)
2286 target_phys_addr_t addr, end_addr;
2287 PhysPageDesc *p;
2288 CPUState *env;
2289 ram_addr_t orig_size = size;
2290 void *subpage;
2292 #ifdef USE_KQEMU
2293 /* XXX: should not depend on cpu context */
2294 env = first_cpu;
2295 if (env->kqemu_enabled) {
2296 kqemu_set_phys_mem(start_addr, size, phys_offset);
2298 #endif
2299 if (kvm_enabled())
2300 kvm_set_phys_mem(start_addr, size, phys_offset);
2302 region_offset &= TARGET_PAGE_MASK;
2303 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2304 end_addr = start_addr + (target_phys_addr_t)size;
2305 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2306 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2307 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2308 ram_addr_t orig_memory = p->phys_offset;
2309 target_phys_addr_t start_addr2, end_addr2;
2310 int need_subpage = 0;
2312 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2313 need_subpage);
2314 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2315 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2316 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2317 &p->phys_offset, orig_memory,
2318 p->region_offset);
2319 } else {
2320 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2321 >> IO_MEM_SHIFT];
2323 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2324 region_offset);
2325 p->region_offset = 0;
2326 } else {
2327 p->phys_offset = phys_offset;
2328 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2329 (phys_offset & IO_MEM_ROMD))
2330 phys_offset += TARGET_PAGE_SIZE;
2332 } else {
2333 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2334 p->phys_offset = phys_offset;
2335 p->region_offset = region_offset;
2336 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2337 (phys_offset & IO_MEM_ROMD)) {
2338 phys_offset += TARGET_PAGE_SIZE;
2339 } else {
2340 target_phys_addr_t start_addr2, end_addr2;
2341 int need_subpage = 0;
2343 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2344 end_addr2, need_subpage);
2346 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2347 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2348 &p->phys_offset, IO_MEM_UNASSIGNED,
2350 subpage_register(subpage, start_addr2, end_addr2,
2351 phys_offset, region_offset);
2352 p->region_offset = 0;
2356 region_offset += TARGET_PAGE_SIZE;
2359 /* since each CPU stores ram addresses in its TLB cache, we must
2360 reset the modified entries */
2361 /* XXX: slow ! */
2362 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2363 tlb_flush(env, 1);
2367 /* XXX: temporary until new memory mapping API */
2368 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2370 PhysPageDesc *p;
2372 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2373 if (!p)
2374 return IO_MEM_UNASSIGNED;
2375 return p->phys_offset;
2378 /* XXX: better than nothing */
2379 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2381 ram_addr_t addr;
2382 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2383 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2384 (uint64_t)size, (uint64_t)phys_ram_size);
2385 abort();
2387 addr = phys_ram_alloc_offset;
2388 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2389 return addr;
2392 void qemu_ram_free(ram_addr_t addr)
2396 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2398 #ifdef DEBUG_UNASSIGNED
2399 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2400 #endif
2401 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2402 do_unassigned_access(addr, 0, 0, 0, 1);
2403 #endif
2404 return 0;
2407 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2409 #ifdef DEBUG_UNASSIGNED
2410 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2411 #endif
2412 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2413 do_unassigned_access(addr, 0, 0, 0, 2);
2414 #endif
2415 return 0;
2418 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2420 #ifdef DEBUG_UNASSIGNED
2421 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2422 #endif
2423 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2424 do_unassigned_access(addr, 0, 0, 0, 4);
2425 #endif
2426 return 0;
2429 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2431 #ifdef DEBUG_UNASSIGNED
2432 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2433 #endif
2434 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2435 do_unassigned_access(addr, 1, 0, 0, 1);
2436 #endif
2439 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2441 #ifdef DEBUG_UNASSIGNED
2442 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2443 #endif
2444 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2445 do_unassigned_access(addr, 1, 0, 0, 2);
2446 #endif
2449 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2451 #ifdef DEBUG_UNASSIGNED
2452 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2453 #endif
2454 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2455 do_unassigned_access(addr, 1, 0, 0, 4);
2456 #endif
2459 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2460 unassigned_mem_readb,
2461 unassigned_mem_readw,
2462 unassigned_mem_readl,
2465 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2466 unassigned_mem_writeb,
2467 unassigned_mem_writew,
2468 unassigned_mem_writel,
2471 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2472 uint32_t val)
2474 int dirty_flags;
2475 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2476 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2477 #if !defined(CONFIG_USER_ONLY)
2478 tb_invalidate_phys_page_fast(ram_addr, 1);
2479 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2480 #endif
2482 stb_p(phys_ram_base + ram_addr, val);
2483 #ifdef USE_KQEMU
2484 if (cpu_single_env->kqemu_enabled &&
2485 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2486 kqemu_modify_page(cpu_single_env, ram_addr);
2487 #endif
2488 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2489 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2490 /* we remove the notdirty callback only if the code has been
2491 flushed */
2492 if (dirty_flags == 0xff)
2493 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2496 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2497 uint32_t val)
2499 int dirty_flags;
2500 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2501 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2502 #if !defined(CONFIG_USER_ONLY)
2503 tb_invalidate_phys_page_fast(ram_addr, 2);
2504 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2505 #endif
2507 stw_p(phys_ram_base + ram_addr, val);
2508 #ifdef USE_KQEMU
2509 if (cpu_single_env->kqemu_enabled &&
2510 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2511 kqemu_modify_page(cpu_single_env, ram_addr);
2512 #endif
2513 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2514 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2515 /* we remove the notdirty callback only if the code has been
2516 flushed */
2517 if (dirty_flags == 0xff)
2518 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2521 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2522 uint32_t val)
2524 int dirty_flags;
2525 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2526 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2527 #if !defined(CONFIG_USER_ONLY)
2528 tb_invalidate_phys_page_fast(ram_addr, 4);
2529 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2530 #endif
2532 stl_p(phys_ram_base + ram_addr, val);
2533 #ifdef USE_KQEMU
2534 if (cpu_single_env->kqemu_enabled &&
2535 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2536 kqemu_modify_page(cpu_single_env, ram_addr);
2537 #endif
2538 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2539 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2540 /* we remove the notdirty callback only if the code has been
2541 flushed */
2542 if (dirty_flags == 0xff)
2543 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2546 static CPUReadMemoryFunc *error_mem_read[3] = {
2547 NULL, /* never used */
2548 NULL, /* never used */
2549 NULL, /* never used */
2552 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2553 notdirty_mem_writeb,
2554 notdirty_mem_writew,
2555 notdirty_mem_writel,
2558 /* Generate a debug exception if a watchpoint has been hit. */
2559 static void check_watchpoint(int offset, int len_mask, int flags)
2561 CPUState *env = cpu_single_env;
2562 target_ulong pc, cs_base;
2563 TranslationBlock *tb;
2564 target_ulong vaddr;
2565 CPUWatchpoint *wp;
2566 int cpu_flags;
2568 if (env->watchpoint_hit) {
2569 /* We re-entered the check after replacing the TB. Now raise
2570 * the debug interrupt so that is will trigger after the
2571 * current instruction. */
2572 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2573 return;
2575 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2576 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2577 if ((vaddr == (wp->vaddr & len_mask) ||
2578 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2579 wp->flags |= BP_WATCHPOINT_HIT;
2580 if (!env->watchpoint_hit) {
2581 env->watchpoint_hit = wp;
2582 tb = tb_find_pc(env->mem_io_pc);
2583 if (!tb) {
2584 cpu_abort(env, "check_watchpoint: could not find TB for "
2585 "pc=%p", (void *)env->mem_io_pc);
2587 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2588 tb_phys_invalidate(tb, -1);
2589 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2590 env->exception_index = EXCP_DEBUG;
2591 } else {
2592 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2593 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2595 cpu_resume_from_signal(env, NULL);
2597 } else {
2598 wp->flags &= ~BP_WATCHPOINT_HIT;
2603 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2604 so these check for a hit then pass through to the normal out-of-line
2605 phys routines. */
2606 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2608 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2609 return ldub_phys(addr);
2612 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2614 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2615 return lduw_phys(addr);
2618 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2620 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2621 return ldl_phys(addr);
2624 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2625 uint32_t val)
2627 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2628 stb_phys(addr, val);
2631 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2632 uint32_t val)
2634 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2635 stw_phys(addr, val);
2638 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2639 uint32_t val)
2641 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2642 stl_phys(addr, val);
2645 static CPUReadMemoryFunc *watch_mem_read[3] = {
2646 watch_mem_readb,
2647 watch_mem_readw,
2648 watch_mem_readl,
2651 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2652 watch_mem_writeb,
2653 watch_mem_writew,
2654 watch_mem_writel,
2657 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2658 unsigned int len)
2660 uint32_t ret;
2661 unsigned int idx;
2663 idx = SUBPAGE_IDX(addr);
2664 #if defined(DEBUG_SUBPAGE)
2665 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2666 mmio, len, addr, idx);
2667 #endif
2668 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2669 addr + mmio->region_offset[idx][0][len]);
2671 return ret;
2674 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2675 uint32_t value, unsigned int len)
2677 unsigned int idx;
2679 idx = SUBPAGE_IDX(addr);
2680 #if defined(DEBUG_SUBPAGE)
2681 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2682 mmio, len, addr, idx, value);
2683 #endif
2684 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2685 addr + mmio->region_offset[idx][1][len],
2686 value);
2689 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2691 #if defined(DEBUG_SUBPAGE)
2692 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2693 #endif
2695 return subpage_readlen(opaque, addr, 0);
2698 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2699 uint32_t value)
2701 #if defined(DEBUG_SUBPAGE)
2702 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2703 #endif
2704 subpage_writelen(opaque, addr, value, 0);
2707 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2709 #if defined(DEBUG_SUBPAGE)
2710 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2711 #endif
2713 return subpage_readlen(opaque, addr, 1);
2716 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2717 uint32_t value)
2719 #if defined(DEBUG_SUBPAGE)
2720 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2721 #endif
2722 subpage_writelen(opaque, addr, value, 1);
2725 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2727 #if defined(DEBUG_SUBPAGE)
2728 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2729 #endif
2731 return subpage_readlen(opaque, addr, 2);
2734 static void subpage_writel (void *opaque,
2735 target_phys_addr_t addr, uint32_t value)
2737 #if defined(DEBUG_SUBPAGE)
2738 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2739 #endif
2740 subpage_writelen(opaque, addr, value, 2);
2743 static CPUReadMemoryFunc *subpage_read[] = {
2744 &subpage_readb,
2745 &subpage_readw,
2746 &subpage_readl,
2749 static CPUWriteMemoryFunc *subpage_write[] = {
2750 &subpage_writeb,
2751 &subpage_writew,
2752 &subpage_writel,
2755 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2756 ram_addr_t memory, ram_addr_t region_offset)
2758 int idx, eidx;
2759 unsigned int i;
2761 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2762 return -1;
2763 idx = SUBPAGE_IDX(start);
2764 eidx = SUBPAGE_IDX(end);
2765 #if defined(DEBUG_SUBPAGE)
2766 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2767 mmio, start, end, idx, eidx, memory);
2768 #endif
2769 memory >>= IO_MEM_SHIFT;
2770 for (; idx <= eidx; idx++) {
2771 for (i = 0; i < 4; i++) {
2772 if (io_mem_read[memory][i]) {
2773 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2774 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2775 mmio->region_offset[idx][0][i] = region_offset;
2777 if (io_mem_write[memory][i]) {
2778 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2779 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2780 mmio->region_offset[idx][1][i] = region_offset;
2785 return 0;
2788 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2789 ram_addr_t orig_memory, ram_addr_t region_offset)
2791 subpage_t *mmio;
2792 int subpage_memory;
2794 mmio = qemu_mallocz(sizeof(subpage_t));
2795 if (mmio != NULL) {
2796 mmio->base = base;
2797 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2798 #if defined(DEBUG_SUBPAGE)
2799 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2800 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2801 #endif
2802 *phys = subpage_memory | IO_MEM_SUBPAGE;
2803 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2804 region_offset);
2807 return mmio;
2810 static int get_free_io_mem_idx(void)
2812 int i;
2814 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2815 if (!io_mem_used[i]) {
2816 io_mem_used[i] = 1;
2817 return i;
2820 return -1;
2823 static void io_mem_init(void)
2825 int i;
2827 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2828 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2829 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2830 for (i=0; i<5; i++)
2831 io_mem_used[i] = 1;
2833 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2834 watch_mem_write, NULL);
2835 /* alloc dirty bits array */
2836 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2837 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2840 /* mem_read and mem_write are arrays of functions containing the
2841 function to access byte (index 0), word (index 1) and dword (index
2842 2). Functions can be omitted with a NULL function pointer. The
2843 registered functions may be modified dynamically later.
2844 If io_index is non zero, the corresponding io zone is
2845 modified. If it is zero, a new io zone is allocated. The return
2846 value can be used with cpu_register_physical_memory(). (-1) is
2847 returned if error. */
2848 int cpu_register_io_memory(int io_index,
2849 CPUReadMemoryFunc **mem_read,
2850 CPUWriteMemoryFunc **mem_write,
2851 void *opaque)
2853 int i, subwidth = 0;
2855 if (io_index <= 0) {
2856 io_index = get_free_io_mem_idx();
2857 if (io_index == -1)
2858 return io_index;
2859 } else {
2860 if (io_index >= IO_MEM_NB_ENTRIES)
2861 return -1;
2864 for(i = 0;i < 3; i++) {
2865 if (!mem_read[i] || !mem_write[i])
2866 subwidth = IO_MEM_SUBWIDTH;
2867 io_mem_read[io_index][i] = mem_read[i];
2868 io_mem_write[io_index][i] = mem_write[i];
2870 io_mem_opaque[io_index] = opaque;
2871 return (io_index << IO_MEM_SHIFT) | subwidth;
2874 void cpu_unregister_io_memory(int io_table_address)
2876 int i;
2877 int io_index = io_table_address >> IO_MEM_SHIFT;
2879 for (i=0;i < 3; i++) {
2880 io_mem_read[io_index][i] = unassigned_mem_read[i];
2881 io_mem_write[io_index][i] = unassigned_mem_write[i];
2883 io_mem_opaque[io_index] = NULL;
2884 io_mem_used[io_index] = 0;
2887 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2889 return io_mem_write[io_index >> IO_MEM_SHIFT];
2892 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2894 return io_mem_read[io_index >> IO_MEM_SHIFT];
2897 #endif /* !defined(CONFIG_USER_ONLY) */
2899 /* physical memory access (slow version, mainly for debug) */
2900 #if defined(CONFIG_USER_ONLY)
2901 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2902 int len, int is_write)
2904 int l, flags;
2905 target_ulong page;
2906 void * p;
2908 while (len > 0) {
2909 page = addr & TARGET_PAGE_MASK;
2910 l = (page + TARGET_PAGE_SIZE) - addr;
2911 if (l > len)
2912 l = len;
2913 flags = page_get_flags(page);
2914 if (!(flags & PAGE_VALID))
2915 return;
2916 if (is_write) {
2917 if (!(flags & PAGE_WRITE))
2918 return;
2919 /* XXX: this code should not depend on lock_user */
2920 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2921 /* FIXME - should this return an error rather than just fail? */
2922 return;
2923 memcpy(p, buf, l);
2924 unlock_user(p, addr, l);
2925 } else {
2926 if (!(flags & PAGE_READ))
2927 return;
2928 /* XXX: this code should not depend on lock_user */
2929 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2930 /* FIXME - should this return an error rather than just fail? */
2931 return;
2932 memcpy(buf, p, l);
2933 unlock_user(p, addr, 0);
2935 len -= l;
2936 buf += l;
2937 addr += l;
2941 #else
2942 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2943 int len, int is_write)
2945 int l, io_index;
2946 uint8_t *ptr;
2947 uint32_t val;
2948 target_phys_addr_t page;
2949 unsigned long pd;
2950 PhysPageDesc *p;
2952 while (len > 0) {
2953 page = addr & TARGET_PAGE_MASK;
2954 l = (page + TARGET_PAGE_SIZE) - addr;
2955 if (l > len)
2956 l = len;
2957 p = phys_page_find(page >> TARGET_PAGE_BITS);
2958 if (!p) {
2959 pd = IO_MEM_UNASSIGNED;
2960 } else {
2961 pd = p->phys_offset;
2964 if (is_write) {
2965 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2966 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2967 if (p)
2968 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2969 /* XXX: could force cpu_single_env to NULL to avoid
2970 potential bugs */
2971 if (l >= 4 && ((addr & 3) == 0)) {
2972 /* 32 bit write access */
2973 val = ldl_p(buf);
2974 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2975 l = 4;
2976 } else if (l >= 2 && ((addr & 1) == 0)) {
2977 /* 16 bit write access */
2978 val = lduw_p(buf);
2979 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2980 l = 2;
2981 } else {
2982 /* 8 bit write access */
2983 val = ldub_p(buf);
2984 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2985 l = 1;
2987 } else {
2988 unsigned long addr1;
2989 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2990 /* RAM case */
2991 ptr = phys_ram_base + addr1;
2992 memcpy(ptr, buf, l);
2993 if (!cpu_physical_memory_is_dirty(addr1)) {
2994 /* invalidate code */
2995 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2996 /* set dirty bit */
2997 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2998 (0xff & ~CODE_DIRTY_FLAG);
3000 /* qemu doesn't execute guest code directly, but kvm does
3001 therefore fluch instruction caches */
3002 if (kvm_enabled())
3003 flush_icache_range((unsigned long)ptr,
3004 ((unsigned long)ptr)+l);
3006 } else {
3007 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3008 !(pd & IO_MEM_ROMD)) {
3009 /* I/O case */
3010 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3011 if (p)
3012 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3013 if (l >= 4 && ((addr & 3) == 0)) {
3014 /* 32 bit read access */
3015 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3016 stl_p(buf, val);
3017 l = 4;
3018 } else if (l >= 2 && ((addr & 1) == 0)) {
3019 /* 16 bit read access */
3020 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3021 stw_p(buf, val);
3022 l = 2;
3023 } else {
3024 /* 8 bit read access */
3025 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3026 stb_p(buf, val);
3027 l = 1;
3029 } else {
3030 /* RAM case */
3031 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3032 (addr & ~TARGET_PAGE_MASK);
3033 memcpy(buf, ptr, l);
3036 len -= l;
3037 buf += l;
3038 addr += l;
3042 /* used for ROM loading : can write in RAM and ROM */
3043 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3044 const uint8_t *buf, int len)
3046 int l;
3047 uint8_t *ptr;
3048 target_phys_addr_t page;
3049 unsigned long pd;
3050 PhysPageDesc *p;
3052 while (len > 0) {
3053 page = addr & TARGET_PAGE_MASK;
3054 l = (page + TARGET_PAGE_SIZE) - addr;
3055 if (l > len)
3056 l = len;
3057 p = phys_page_find(page >> TARGET_PAGE_BITS);
3058 if (!p) {
3059 pd = IO_MEM_UNASSIGNED;
3060 } else {
3061 pd = p->phys_offset;
3064 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3065 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3066 !(pd & IO_MEM_ROMD)) {
3067 /* do nothing */
3068 } else {
3069 unsigned long addr1;
3070 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3071 /* ROM/RAM case */
3072 ptr = phys_ram_base + addr1;
3073 memcpy(ptr, buf, l);
3075 len -= l;
3076 buf += l;
3077 addr += l;
3082 /* warning: addr must be aligned */
3083 uint32_t ldl_phys(target_phys_addr_t addr)
3085 int io_index;
3086 uint8_t *ptr;
3087 uint32_t val;
3088 unsigned long pd;
3089 PhysPageDesc *p;
3091 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3092 if (!p) {
3093 pd = IO_MEM_UNASSIGNED;
3094 } else {
3095 pd = p->phys_offset;
3098 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3099 !(pd & IO_MEM_ROMD)) {
3100 /* I/O case */
3101 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3102 if (p)
3103 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3104 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3105 } else {
3106 /* RAM case */
3107 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3108 (addr & ~TARGET_PAGE_MASK);
3109 val = ldl_p(ptr);
3111 return val;
3114 /* warning: addr must be aligned */
3115 uint64_t ldq_phys(target_phys_addr_t addr)
3117 int io_index;
3118 uint8_t *ptr;
3119 uint64_t val;
3120 unsigned long pd;
3121 PhysPageDesc *p;
3123 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3124 if (!p) {
3125 pd = IO_MEM_UNASSIGNED;
3126 } else {
3127 pd = p->phys_offset;
3130 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3131 !(pd & IO_MEM_ROMD)) {
3132 /* I/O case */
3133 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3134 if (p)
3135 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3136 #ifdef TARGET_WORDS_BIGENDIAN
3137 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3138 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3139 #else
3140 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3141 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3142 #endif
3143 } else {
3144 /* RAM case */
3145 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3146 (addr & ~TARGET_PAGE_MASK);
3147 val = ldq_p(ptr);
3149 return val;
3152 /* XXX: optimize */
3153 uint32_t ldub_phys(target_phys_addr_t addr)
3155 uint8_t val;
3156 cpu_physical_memory_read(addr, &val, 1);
3157 return val;
3160 /* XXX: optimize */
3161 uint32_t lduw_phys(target_phys_addr_t addr)
3163 uint16_t val;
3164 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3165 return tswap16(val);
3168 #ifdef __GNUC__
3169 #define likely(x) __builtin_expect(!!(x), 1)
3170 #define unlikely(x) __builtin_expect(!!(x), 0)
3171 #else
3172 #define likely(x) x
3173 #define unlikely(x) x
3174 #endif
3176 /* warning: addr must be aligned. The ram page is not masked as dirty
3177 and the code inside is not invalidated. It is useful if the dirty
3178 bits are used to track modified PTEs */
3179 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3181 int io_index;
3182 uint8_t *ptr;
3183 unsigned long pd;
3184 PhysPageDesc *p;
3186 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3187 if (!p) {
3188 pd = IO_MEM_UNASSIGNED;
3189 } else {
3190 pd = p->phys_offset;
3193 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3194 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3195 if (p)
3196 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3197 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3198 } else {
3199 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3200 ptr = phys_ram_base + addr1;
3201 stl_p(ptr, val);
3203 if (unlikely(in_migration)) {
3204 if (!cpu_physical_memory_is_dirty(addr1)) {
3205 /* invalidate code */
3206 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3207 /* set dirty bit */
3208 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3209 (0xff & ~CODE_DIRTY_FLAG);
3215 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3217 int io_index;
3218 uint8_t *ptr;
3219 unsigned long pd;
3220 PhysPageDesc *p;
3222 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3223 if (!p) {
3224 pd = IO_MEM_UNASSIGNED;
3225 } else {
3226 pd = p->phys_offset;
3229 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3230 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3231 if (p)
3232 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3233 #ifdef TARGET_WORDS_BIGENDIAN
3234 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3235 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3236 #else
3237 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3238 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3239 #endif
3240 } else {
3241 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3242 (addr & ~TARGET_PAGE_MASK);
3243 stq_p(ptr, val);
3247 /* warning: addr must be aligned */
3248 void stl_phys(target_phys_addr_t addr, uint32_t val)
3250 int io_index;
3251 uint8_t *ptr;
3252 unsigned long pd;
3253 PhysPageDesc *p;
3255 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3256 if (!p) {
3257 pd = IO_MEM_UNASSIGNED;
3258 } else {
3259 pd = p->phys_offset;
3262 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3263 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3264 if (p)
3265 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3266 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3267 } else {
3268 unsigned long addr1;
3269 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3270 /* RAM case */
3271 ptr = phys_ram_base + addr1;
3272 stl_p(ptr, val);
3273 if (!cpu_physical_memory_is_dirty(addr1)) {
3274 /* invalidate code */
3275 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3276 /* set dirty bit */
3277 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3278 (0xff & ~CODE_DIRTY_FLAG);
3283 /* XXX: optimize */
3284 void stb_phys(target_phys_addr_t addr, uint32_t val)
3286 uint8_t v = val;
3287 cpu_physical_memory_write(addr, &v, 1);
3290 /* XXX: optimize */
3291 void stw_phys(target_phys_addr_t addr, uint32_t val)
3293 uint16_t v = tswap16(val);
3294 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3297 /* XXX: optimize */
3298 void stq_phys(target_phys_addr_t addr, uint64_t val)
3300 val = tswap64(val);
3301 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3304 #endif
3306 /* virtual memory access for debug */
3307 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3308 uint8_t *buf, int len, int is_write)
3310 int l;
3311 target_phys_addr_t phys_addr;
3312 target_ulong page;
3314 while (len > 0) {
3315 page = addr & TARGET_PAGE_MASK;
3316 phys_addr = cpu_get_phys_page_debug(env, page);
3317 /* if no physical page mapped, return an error */
3318 if (phys_addr == -1)
3319 return -1;
3320 l = (page + TARGET_PAGE_SIZE) - addr;
3321 if (l > len)
3322 l = len;
3323 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3324 buf, l, is_write);
3325 len -= l;
3326 buf += l;
3327 addr += l;
3329 return 0;
3332 /* in deterministic execution mode, instructions doing device I/Os
3333 must be at the end of the TB */
3334 void cpu_io_recompile(CPUState *env, void *retaddr)
3336 TranslationBlock *tb;
3337 uint32_t n, cflags;
3338 target_ulong pc, cs_base;
3339 uint64_t flags;
3341 tb = tb_find_pc((unsigned long)retaddr);
3342 if (!tb) {
3343 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3344 retaddr);
3346 n = env->icount_decr.u16.low + tb->icount;
3347 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3348 /* Calculate how many instructions had been executed before the fault
3349 occurred. */
3350 n = n - env->icount_decr.u16.low;
3351 /* Generate a new TB ending on the I/O insn. */
3352 n++;
3353 /* On MIPS and SH, delay slot instructions can only be restarted if
3354 they were already the first instruction in the TB. If this is not
3355 the first instruction in a TB then re-execute the preceding
3356 branch. */
3357 #if defined(TARGET_MIPS)
3358 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3359 env->active_tc.PC -= 4;
3360 env->icount_decr.u16.low++;
3361 env->hflags &= ~MIPS_HFLAG_BMASK;
3363 #elif defined(TARGET_SH4)
3364 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3365 && n > 1) {
3366 env->pc -= 2;
3367 env->icount_decr.u16.low++;
3368 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3370 #endif
3371 /* This should never happen. */
3372 if (n > CF_COUNT_MASK)
3373 cpu_abort(env, "TB too big during recompile");
3375 cflags = n | CF_LAST_IO;
3376 pc = tb->pc;
3377 cs_base = tb->cs_base;
3378 flags = tb->flags;
3379 tb_phys_invalidate(tb, -1);
3380 /* FIXME: In theory this could raise an exception. In practice
3381 we have already translated the block once so it's probably ok. */
3382 tb_gen_code(env, pc, cs_base, flags, cflags);
3383 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3384 the first in the TB) then we end up generating a whole new TB and
3385 repeating the fault, which is horribly inefficient.
3386 Better would be to execute just this insn uncached, or generate a
3387 second new TB. */
3388 cpu_resume_from_signal(env, NULL);
3391 void dump_exec_info(FILE *f,
3392 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3394 int i, target_code_size, max_target_code_size;
3395 int direct_jmp_count, direct_jmp2_count, cross_page;
3396 TranslationBlock *tb;
3398 target_code_size = 0;
3399 max_target_code_size = 0;
3400 cross_page = 0;
3401 direct_jmp_count = 0;
3402 direct_jmp2_count = 0;
3403 for(i = 0; i < nb_tbs; i++) {
3404 tb = &tbs[i];
3405 target_code_size += tb->size;
3406 if (tb->size > max_target_code_size)
3407 max_target_code_size = tb->size;
3408 if (tb->page_addr[1] != -1)
3409 cross_page++;
3410 if (tb->tb_next_offset[0] != 0xffff) {
3411 direct_jmp_count++;
3412 if (tb->tb_next_offset[1] != 0xffff) {
3413 direct_jmp2_count++;
3417 /* XXX: avoid using doubles ? */
3418 cpu_fprintf(f, "Translation buffer state:\n");
3419 cpu_fprintf(f, "gen code size %ld/%ld\n",
3420 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3421 cpu_fprintf(f, "TB count %d/%d\n",
3422 nb_tbs, code_gen_max_blocks);
3423 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3424 nb_tbs ? target_code_size / nb_tbs : 0,
3425 max_target_code_size);
3426 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3427 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3428 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3429 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3430 cross_page,
3431 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3432 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3433 direct_jmp_count,
3434 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3435 direct_jmp2_count,
3436 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3437 cpu_fprintf(f, "\nStatistics:\n");
3438 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3439 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3440 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3441 tcg_dump_info(f, cpu_fprintf);
3444 #if !defined(CONFIG_USER_ONLY)
3446 #define MMUSUFFIX _cmmu
3447 #define GETPC() NULL
3448 #define env cpu_single_env
3449 #define SOFTMMU_CODE_ACCESS
3451 #define SHIFT 0
3452 #include "softmmu_template.h"
3454 #define SHIFT 1
3455 #include "softmmu_template.h"
3457 #define SHIFT 2
3458 #include "softmmu_template.h"
3460 #define SHIFT 3
3461 #include "softmmu_template.h"
3463 #undef env
3465 #endif