kvm: external module: Fix pre-2.6.18 missing pci_dev.msi_enabled
[qemu-kvm/fedora.git] / exec.c
blob095a3aa5b7268de0bee2a866a82d5aa9c65146d1
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
41 #include "tcg.h"
42 #endif
43 #include "qemu-kvm.h"
45 #include "hw/hw.h"
46 #include "osdep.h"
47 #include "kvm.h"
48 #if defined(CONFIG_USER_ONLY)
49 #include <qemu.h>
50 #endif
52 //#define DEBUG_TB_INVALIDATE
53 //#define DEBUG_FLUSH
54 //#define DEBUG_TLB
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
66 #undef DEBUG_TB_CHECK
67 #endif
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #define MMAP_AREA_START 0x00000000
72 #define MMAP_AREA_END 0xa8000000
74 #if defined(TARGET_SPARC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 41
76 #elif defined(TARGET_SPARC)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #elif defined(TARGET_ALPHA)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #define TARGET_VIRT_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_PPC64)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 42
85 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
86 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 #elif defined(TARGET_IA64)
88 #define TARGET_PHYS_ADDR_SPACE_BITS 36
89 #else
90 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
91 #define TARGET_PHYS_ADDR_SPACE_BITS 32
92 #endif
94 static TranslationBlock *tbs;
95 int code_gen_max_blocks;
96 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
97 static int nb_tbs;
98 /* any access to the tbs or the page table must use this lock */
99 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
101 #if defined(__arm__) || defined(__sparc_v9__)
102 /* The prologue must be reachable with a direct jump. ARM and Sparc64
103 have limited branch ranges (possibly also PPC) so place it in a
104 section close to code segment. */
105 #define code_gen_section \
106 __attribute__((__section__(".gen_code"))) \
107 __attribute__((aligned (32)))
108 #else
109 #define code_gen_section \
110 __attribute__((aligned (32)))
111 #endif
113 uint8_t code_gen_prologue[1024] code_gen_section;
114 static uint8_t *code_gen_buffer;
115 static unsigned long code_gen_buffer_size;
116 /* threshold to flush the translated code buffer */
117 static unsigned long code_gen_buffer_max_size;
118 uint8_t *code_gen_ptr;
120 #if !defined(CONFIG_USER_ONLY)
121 ram_addr_t phys_ram_size;
122 int phys_ram_fd;
123 uint8_t *phys_ram_base;
124 uint8_t *phys_ram_dirty;
125 uint8_t *bios_mem;
126 static int in_migration;
127 static ram_addr_t phys_ram_alloc_offset = 0;
128 #endif
130 CPUState *first_cpu;
131 /* current CPU in the current thread. It is only valid inside
132 cpu_exec() */
133 CPUState *cpu_single_env;
134 /* 0 = Do not count executed instructions.
135 1 = Precise instruction counting.
136 2 = Adaptive rate instruction counting. */
137 int use_icount = 0;
138 /* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
140 int64_t qemu_icount;
142 typedef struct PageDesc {
143 /* list of TBs intersecting this ram page */
144 TranslationBlock *first_tb;
145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count;
148 uint8_t *code_bitmap;
149 #if defined(CONFIG_USER_ONLY)
150 unsigned long flags;
151 #endif
152 } PageDesc;
154 typedef struct PhysPageDesc {
155 /* offset in host memory of the page + io_index in the low bits */
156 ram_addr_t phys_offset;
157 ram_addr_t region_offset;
158 } PhysPageDesc;
160 #define L2_BITS 10
161 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162 /* XXX: this is a temporary hack for alpha target.
163 * In the future, this is to be replaced by a multi-level table
164 * to actually be able to handle the complete 64 bits address space.
166 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167 #else
168 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169 #endif
171 #define L1_SIZE (1 << L1_BITS)
172 #define L2_SIZE (1 << L2_BITS)
174 unsigned long qemu_real_host_page_size;
175 unsigned long qemu_host_page_bits;
176 unsigned long qemu_host_page_size;
177 unsigned long qemu_host_page_mask;
179 /* XXX: for system emulation, it could just be an array */
180 static PageDesc *l1_map[L1_SIZE];
181 static PhysPageDesc **l1_phys_map;
183 #if !defined(CONFIG_USER_ONLY)
184 static void io_mem_init(void);
186 /* io memory support */
187 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
188 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
189 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
190 char io_mem_used[IO_MEM_NB_ENTRIES];
191 static int io_mem_watch;
192 #endif
194 /* log support */
195 static const char *logfilename = "/tmp/qemu.log";
196 FILE *logfile;
197 int loglevel;
198 static int log_append = 0;
200 /* statistics */
201 static int tlb_flush_count;
202 static int tb_flush_count;
203 static int tb_phys_invalidate_count;
205 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
206 typedef struct subpage_t {
207 target_phys_addr_t base;
208 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
209 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
210 void *opaque[TARGET_PAGE_SIZE][2][4];
211 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
212 } subpage_t;
214 #ifdef _WIN32
215 static void map_exec(void *addr, long size)
217 DWORD old_protect;
218 VirtualProtect(addr, size,
219 PAGE_EXECUTE_READWRITE, &old_protect);
222 #else
223 static void map_exec(void *addr, long size)
225 unsigned long start, end, page_size;
227 page_size = getpagesize();
228 start = (unsigned long)addr;
229 start &= ~(page_size - 1);
231 end = (unsigned long)addr + size;
232 end += page_size - 1;
233 end &= ~(page_size - 1);
235 mprotect((void *)start, end - start,
236 PROT_READ | PROT_WRITE | PROT_EXEC);
238 #endif
240 static void page_init(void)
242 /* NOTE: we can always suppose that qemu_host_page_size >=
243 TARGET_PAGE_SIZE */
244 #ifdef _WIN32
246 SYSTEM_INFO system_info;
248 GetSystemInfo(&system_info);
249 qemu_real_host_page_size = system_info.dwPageSize;
251 #else
252 qemu_real_host_page_size = getpagesize();
253 #endif
254 if (qemu_host_page_size == 0)
255 qemu_host_page_size = qemu_real_host_page_size;
256 if (qemu_host_page_size < TARGET_PAGE_SIZE)
257 qemu_host_page_size = TARGET_PAGE_SIZE;
258 qemu_host_page_bits = 0;
259 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
260 qemu_host_page_bits++;
261 qemu_host_page_mask = ~(qemu_host_page_size - 1);
262 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
263 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
265 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
267 long long startaddr, endaddr;
268 FILE *f;
269 int n;
271 mmap_lock();
272 last_brk = (unsigned long)sbrk(0);
273 f = fopen("/proc/self/maps", "r");
274 if (f) {
275 do {
276 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
277 if (n == 2) {
278 startaddr = MIN(startaddr,
279 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280 endaddr = MIN(endaddr,
281 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
282 page_set_flags(startaddr & TARGET_PAGE_MASK,
283 TARGET_PAGE_ALIGN(endaddr),
284 PAGE_RESERVED);
286 } while (!feof(f));
287 fclose(f);
289 mmap_unlock();
291 #endif
294 static inline PageDesc **page_l1_map(target_ulong index)
296 #if TARGET_LONG_BITS > 32
297 /* Host memory outside guest VM. For 32-bit targets we have already
298 excluded high addresses. */
299 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
300 return NULL;
301 #endif
302 return &l1_map[index >> L2_BITS];
305 static inline PageDesc *page_find_alloc(target_ulong index)
307 PageDesc **lp, *p;
308 lp = page_l1_map(index);
309 if (!lp)
310 return NULL;
312 p = *lp;
313 if (!p) {
314 /* allocate if not found */
315 #if defined(CONFIG_USER_ONLY)
316 size_t len = sizeof(PageDesc) * L2_SIZE;
317 /* Don't use qemu_malloc because it may recurse. */
318 p = mmap(0, len, PROT_READ | PROT_WRITE,
319 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
320 *lp = p;
321 if (h2g_valid(p)) {
322 unsigned long addr = h2g(p);
323 page_set_flags(addr & TARGET_PAGE_MASK,
324 TARGET_PAGE_ALIGN(addr + len),
325 PAGE_RESERVED);
327 #else
328 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
329 *lp = p;
330 #endif
332 return p + (index & (L2_SIZE - 1));
335 static inline PageDesc *page_find(target_ulong index)
337 PageDesc **lp, *p;
338 lp = page_l1_map(index);
339 if (!lp)
340 return NULL;
342 p = *lp;
343 if (!p)
344 return 0;
345 return p + (index & (L2_SIZE - 1));
348 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
350 void **lp, **p;
351 PhysPageDesc *pd;
353 p = (void **)l1_phys_map;
354 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
356 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
357 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
358 #endif
359 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
360 p = *lp;
361 if (!p) {
362 /* allocate if not found */
363 if (!alloc)
364 return NULL;
365 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
366 memset(p, 0, sizeof(void *) * L1_SIZE);
367 *lp = p;
369 #endif
370 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
371 pd = *lp;
372 if (!pd) {
373 int i;
374 /* allocate if not found */
375 if (!alloc)
376 return NULL;
377 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
378 *lp = pd;
379 for (i = 0; i < L2_SIZE; i++)
380 pd[i].phys_offset = IO_MEM_UNASSIGNED;
382 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
385 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
387 return phys_page_find_alloc(index, 0);
390 #if !defined(CONFIG_USER_ONLY)
391 static void tlb_protect_code(ram_addr_t ram_addr);
392 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
393 target_ulong vaddr);
394 #define mmap_lock() do { } while(0)
395 #define mmap_unlock() do { } while(0)
396 #endif
398 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
400 #if defined(CONFIG_USER_ONLY)
401 /* Currently it is not recommanded to allocate big chunks of data in
402 user mode. It will change when a dedicated libc will be used */
403 #define USE_STATIC_CODE_GEN_BUFFER
404 #endif
406 #ifdef USE_STATIC_CODE_GEN_BUFFER
407 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
408 #endif
410 static void code_gen_alloc(unsigned long tb_size)
412 if (kvm_enabled())
413 return;
415 #ifdef USE_STATIC_CODE_GEN_BUFFER
416 code_gen_buffer = static_code_gen_buffer;
417 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
418 map_exec(code_gen_buffer, code_gen_buffer_size);
419 #else
420 code_gen_buffer_size = tb_size;
421 if (code_gen_buffer_size == 0) {
422 #if defined(CONFIG_USER_ONLY)
423 /* in user mode, phys_ram_size is not meaningful */
424 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
425 #else
426 /* XXX: needs ajustments */
427 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
428 #endif
430 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
431 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
432 /* The code gen buffer location may have constraints depending on
433 the host cpu and OS */
434 #if defined(__linux__)
436 int flags;
437 void *start = NULL;
439 flags = MAP_PRIVATE | MAP_ANONYMOUS;
440 #if defined(__x86_64__)
441 flags |= MAP_32BIT;
442 /* Cannot map more than that */
443 if (code_gen_buffer_size > (800 * 1024 * 1024))
444 code_gen_buffer_size = (800 * 1024 * 1024);
445 #elif defined(__sparc_v9__)
446 // Map the buffer below 2G, so we can use direct calls and branches
447 flags |= MAP_FIXED;
448 start = (void *) 0x60000000UL;
449 if (code_gen_buffer_size > (512 * 1024 * 1024))
450 code_gen_buffer_size = (512 * 1024 * 1024);
451 #elif defined(__arm__)
452 /* Map the buffer below 32M, so we can use direct calls and branches */
453 flags |= MAP_FIXED;
454 start = (void *) 0x01000000UL;
455 if (code_gen_buffer_size > 16 * 1024 * 1024)
456 code_gen_buffer_size = 16 * 1024 * 1024;
457 #endif
458 code_gen_buffer = mmap(start, code_gen_buffer_size,
459 PROT_WRITE | PROT_READ | PROT_EXEC,
460 flags, -1, 0);
461 if (code_gen_buffer == MAP_FAILED) {
462 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
463 exit(1);
466 #elif defined(__FreeBSD__)
468 int flags;
469 void *addr = NULL;
470 flags = MAP_PRIVATE | MAP_ANONYMOUS;
471 #if defined(__x86_64__)
472 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
473 * 0x40000000 is free */
474 flags |= MAP_FIXED;
475 addr = (void *)0x40000000;
476 /* Cannot map more than that */
477 if (code_gen_buffer_size > (800 * 1024 * 1024))
478 code_gen_buffer_size = (800 * 1024 * 1024);
479 #endif
480 code_gen_buffer = mmap(addr, code_gen_buffer_size,
481 PROT_WRITE | PROT_READ | PROT_EXEC,
482 flags, -1, 0);
483 if (code_gen_buffer == MAP_FAILED) {
484 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
485 exit(1);
488 #else
489 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
490 if (!code_gen_buffer) {
491 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
492 exit(1);
494 map_exec(code_gen_buffer, code_gen_buffer_size);
495 #endif
496 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
497 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
498 code_gen_buffer_max_size = code_gen_buffer_size -
499 code_gen_max_block_size();
500 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
501 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
504 /* Must be called before using the QEMU cpus. 'tb_size' is the size
505 (in bytes) allocated to the translation buffer. Zero means default
506 size. */
507 void cpu_exec_init_all(unsigned long tb_size)
509 cpu_gen_init();
510 code_gen_alloc(tb_size);
511 code_gen_ptr = code_gen_buffer;
512 page_init();
513 #if !defined(CONFIG_USER_ONLY)
514 io_mem_init();
515 #endif
518 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
520 #define CPU_COMMON_SAVE_VERSION 1
522 static void cpu_common_save(QEMUFile *f, void *opaque)
524 CPUState *env = opaque;
526 qemu_put_be32s(f, &env->halted);
527 qemu_put_be32s(f, &env->interrupt_request);
530 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
532 CPUState *env = opaque;
534 if (version_id != CPU_COMMON_SAVE_VERSION)
535 return -EINVAL;
537 qemu_get_be32s(f, &env->halted);
538 qemu_get_be32s(f, &env->interrupt_request);
539 tlb_flush(env, 1);
541 return 0;
543 #endif
545 void cpu_exec_init(CPUState *env)
547 CPUState **penv;
548 int cpu_index;
550 env->next_cpu = NULL;
551 penv = &first_cpu;
552 cpu_index = 0;
553 while (*penv != NULL) {
554 penv = (CPUState **)&(*penv)->next_cpu;
555 cpu_index++;
557 env->cpu_index = cpu_index;
558 TAILQ_INIT(&env->breakpoints);
559 TAILQ_INIT(&env->watchpoints);
560 #ifdef __WIN32
561 env->thread_id = GetCurrentProcessId();
562 #else
563 env->thread_id = getpid();
564 #endif
565 *penv = env;
566 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
567 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
568 cpu_common_save, cpu_common_load, env);
569 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
570 cpu_save, cpu_load, env);
571 #endif
574 static inline void invalidate_page_bitmap(PageDesc *p)
576 if (p->code_bitmap) {
577 qemu_free(p->code_bitmap);
578 p->code_bitmap = NULL;
580 p->code_write_count = 0;
583 /* set to NULL all the 'first_tb' fields in all PageDescs */
584 static void page_flush_tb(void)
586 int i, j;
587 PageDesc *p;
589 for(i = 0; i < L1_SIZE; i++) {
590 p = l1_map[i];
591 if (p) {
592 for(j = 0; j < L2_SIZE; j++) {
593 p->first_tb = NULL;
594 invalidate_page_bitmap(p);
595 p++;
601 /* flush all the translation blocks */
602 /* XXX: tb_flush is currently not thread safe */
603 void tb_flush(CPUState *env1)
605 CPUState *env;
606 #if defined(DEBUG_FLUSH)
607 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
608 (unsigned long)(code_gen_ptr - code_gen_buffer),
609 nb_tbs, nb_tbs > 0 ?
610 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
611 #endif
612 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
613 cpu_abort(env1, "Internal error: code buffer overflow\n");
615 nb_tbs = 0;
617 for(env = first_cpu; env != NULL; env = env->next_cpu) {
618 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
621 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
622 page_flush_tb();
624 code_gen_ptr = code_gen_buffer;
625 /* XXX: flush processor icache at this point if cache flush is
626 expensive */
627 tb_flush_count++;
630 #ifdef DEBUG_TB_CHECK
632 static void tb_invalidate_check(target_ulong address)
634 TranslationBlock *tb;
635 int i;
636 address &= TARGET_PAGE_MASK;
637 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
638 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
639 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
640 address >= tb->pc + tb->size)) {
641 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
642 address, (long)tb->pc, tb->size);
648 /* verify that all the pages have correct rights for code */
649 static void tb_page_check(void)
651 TranslationBlock *tb;
652 int i, flags1, flags2;
654 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
655 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
656 flags1 = page_get_flags(tb->pc);
657 flags2 = page_get_flags(tb->pc + tb->size - 1);
658 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
659 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
660 (long)tb->pc, tb->size, flags1, flags2);
666 static void tb_jmp_check(TranslationBlock *tb)
668 TranslationBlock *tb1;
669 unsigned int n1;
671 /* suppress any remaining jumps to this TB */
672 tb1 = tb->jmp_first;
673 for(;;) {
674 n1 = (long)tb1 & 3;
675 tb1 = (TranslationBlock *)((long)tb1 & ~3);
676 if (n1 == 2)
677 break;
678 tb1 = tb1->jmp_next[n1];
680 /* check end of list */
681 if (tb1 != tb) {
682 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
686 #endif
688 /* invalidate one TB */
689 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
690 int next_offset)
692 TranslationBlock *tb1;
693 for(;;) {
694 tb1 = *ptb;
695 if (tb1 == tb) {
696 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
697 break;
699 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
703 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
705 TranslationBlock *tb1;
706 unsigned int n1;
708 for(;;) {
709 tb1 = *ptb;
710 n1 = (long)tb1 & 3;
711 tb1 = (TranslationBlock *)((long)tb1 & ~3);
712 if (tb1 == tb) {
713 *ptb = tb1->page_next[n1];
714 break;
716 ptb = &tb1->page_next[n1];
720 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
722 TranslationBlock *tb1, **ptb;
723 unsigned int n1;
725 ptb = &tb->jmp_next[n];
726 tb1 = *ptb;
727 if (tb1) {
728 /* find tb(n) in circular list */
729 for(;;) {
730 tb1 = *ptb;
731 n1 = (long)tb1 & 3;
732 tb1 = (TranslationBlock *)((long)tb1 & ~3);
733 if (n1 == n && tb1 == tb)
734 break;
735 if (n1 == 2) {
736 ptb = &tb1->jmp_first;
737 } else {
738 ptb = &tb1->jmp_next[n1];
741 /* now we can suppress tb(n) from the list */
742 *ptb = tb->jmp_next[n];
744 tb->jmp_next[n] = NULL;
748 /* reset the jump entry 'n' of a TB so that it is not chained to
749 another TB */
750 static inline void tb_reset_jump(TranslationBlock *tb, int n)
752 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
755 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
757 CPUState *env;
758 PageDesc *p;
759 unsigned int h, n1;
760 target_phys_addr_t phys_pc;
761 TranslationBlock *tb1, *tb2;
763 /* remove the TB from the hash list */
764 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
765 h = tb_phys_hash_func(phys_pc);
766 tb_remove(&tb_phys_hash[h], tb,
767 offsetof(TranslationBlock, phys_hash_next));
769 /* remove the TB from the page list */
770 if (tb->page_addr[0] != page_addr) {
771 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
772 tb_page_remove(&p->first_tb, tb);
773 invalidate_page_bitmap(p);
775 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
776 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
777 tb_page_remove(&p->first_tb, tb);
778 invalidate_page_bitmap(p);
781 tb_invalidated_flag = 1;
783 /* remove the TB from the hash list */
784 h = tb_jmp_cache_hash_func(tb->pc);
785 for(env = first_cpu; env != NULL; env = env->next_cpu) {
786 if (env->tb_jmp_cache[h] == tb)
787 env->tb_jmp_cache[h] = NULL;
790 /* suppress this TB from the two jump lists */
791 tb_jmp_remove(tb, 0);
792 tb_jmp_remove(tb, 1);
794 /* suppress any remaining jumps to this TB */
795 tb1 = tb->jmp_first;
796 for(;;) {
797 n1 = (long)tb1 & 3;
798 if (n1 == 2)
799 break;
800 tb1 = (TranslationBlock *)((long)tb1 & ~3);
801 tb2 = tb1->jmp_next[n1];
802 tb_reset_jump(tb1, n1);
803 tb1->jmp_next[n1] = NULL;
804 tb1 = tb2;
806 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
808 tb_phys_invalidate_count++;
811 static inline void set_bits(uint8_t *tab, int start, int len)
813 int end, mask, end1;
815 end = start + len;
816 tab += start >> 3;
817 mask = 0xff << (start & 7);
818 if ((start & ~7) == (end & ~7)) {
819 if (start < end) {
820 mask &= ~(0xff << (end & 7));
821 *tab |= mask;
823 } else {
824 *tab++ |= mask;
825 start = (start + 8) & ~7;
826 end1 = end & ~7;
827 while (start < end1) {
828 *tab++ = 0xff;
829 start += 8;
831 if (start < end) {
832 mask = ~(0xff << (end & 7));
833 *tab |= mask;
838 static void build_page_bitmap(PageDesc *p)
840 int n, tb_start, tb_end;
841 TranslationBlock *tb;
843 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
844 if (!p->code_bitmap)
845 return;
847 tb = p->first_tb;
848 while (tb != NULL) {
849 n = (long)tb & 3;
850 tb = (TranslationBlock *)((long)tb & ~3);
851 /* NOTE: this is subtle as a TB may span two physical pages */
852 if (n == 0) {
853 /* NOTE: tb_end may be after the end of the page, but
854 it is not a problem */
855 tb_start = tb->pc & ~TARGET_PAGE_MASK;
856 tb_end = tb_start + tb->size;
857 if (tb_end > TARGET_PAGE_SIZE)
858 tb_end = TARGET_PAGE_SIZE;
859 } else {
860 tb_start = 0;
861 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
863 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
864 tb = tb->page_next[n];
868 TranslationBlock *tb_gen_code(CPUState *env,
869 target_ulong pc, target_ulong cs_base,
870 int flags, int cflags)
872 TranslationBlock *tb;
873 uint8_t *tc_ptr;
874 target_ulong phys_pc, phys_page2, virt_page2;
875 int code_gen_size;
877 phys_pc = get_phys_addr_code(env, pc);
878 tb = tb_alloc(pc);
879 if (!tb) {
880 /* flush must be done */
881 tb_flush(env);
882 /* cannot fail at this point */
883 tb = tb_alloc(pc);
884 /* Don't forget to invalidate previous TB info. */
885 tb_invalidated_flag = 1;
887 tc_ptr = code_gen_ptr;
888 tb->tc_ptr = tc_ptr;
889 tb->cs_base = cs_base;
890 tb->flags = flags;
891 tb->cflags = cflags;
892 cpu_gen_code(env, tb, &code_gen_size);
893 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
895 /* check next page if needed */
896 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
897 phys_page2 = -1;
898 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
899 phys_page2 = get_phys_addr_code(env, virt_page2);
901 tb_link_phys(tb, phys_pc, phys_page2);
902 return tb;
905 /* invalidate all TBs which intersect with the target physical page
906 starting in range [start;end[. NOTE: start and end must refer to
907 the same physical page. 'is_cpu_write_access' should be true if called
908 from a real cpu write access: the virtual CPU will exit the current
909 TB if code is modified inside this TB. */
910 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
911 int is_cpu_write_access)
913 TranslationBlock *tb, *tb_next, *saved_tb;
914 CPUState *env = cpu_single_env;
915 target_ulong tb_start, tb_end;
916 PageDesc *p;
917 int n;
918 #ifdef TARGET_HAS_PRECISE_SMC
919 int current_tb_not_found = is_cpu_write_access;
920 TranslationBlock *current_tb = NULL;
921 int current_tb_modified = 0;
922 target_ulong current_pc = 0;
923 target_ulong current_cs_base = 0;
924 int current_flags = 0;
925 #endif /* TARGET_HAS_PRECISE_SMC */
927 p = page_find(start >> TARGET_PAGE_BITS);
928 if (!p)
929 return;
930 if (!p->code_bitmap &&
931 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
932 is_cpu_write_access) {
933 /* build code bitmap */
934 build_page_bitmap(p);
937 /* we remove all the TBs in the range [start, end[ */
938 /* XXX: see if in some cases it could be faster to invalidate all the code */
939 tb = p->first_tb;
940 while (tb != NULL) {
941 n = (long)tb & 3;
942 tb = (TranslationBlock *)((long)tb & ~3);
943 tb_next = tb->page_next[n];
944 /* NOTE: this is subtle as a TB may span two physical pages */
945 if (n == 0) {
946 /* NOTE: tb_end may be after the end of the page, but
947 it is not a problem */
948 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
949 tb_end = tb_start + tb->size;
950 } else {
951 tb_start = tb->page_addr[1];
952 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
954 if (!(tb_end <= start || tb_start >= end)) {
955 #ifdef TARGET_HAS_PRECISE_SMC
956 if (current_tb_not_found) {
957 current_tb_not_found = 0;
958 current_tb = NULL;
959 if (env->mem_io_pc) {
960 /* now we have a real cpu fault */
961 current_tb = tb_find_pc(env->mem_io_pc);
964 if (current_tb == tb &&
965 (current_tb->cflags & CF_COUNT_MASK) != 1) {
966 /* If we are modifying the current TB, we must stop
967 its execution. We could be more precise by checking
968 that the modification is after the current PC, but it
969 would require a specialized function to partially
970 restore the CPU state */
972 current_tb_modified = 1;
973 cpu_restore_state(current_tb, env,
974 env->mem_io_pc, NULL);
975 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
976 &current_flags);
978 #endif /* TARGET_HAS_PRECISE_SMC */
979 /* we need to do that to handle the case where a signal
980 occurs while doing tb_phys_invalidate() */
981 saved_tb = NULL;
982 if (env) {
983 saved_tb = env->current_tb;
984 env->current_tb = NULL;
986 tb_phys_invalidate(tb, -1);
987 if (env) {
988 env->current_tb = saved_tb;
989 if (env->interrupt_request && env->current_tb)
990 cpu_interrupt(env, env->interrupt_request);
993 tb = tb_next;
995 #if !defined(CONFIG_USER_ONLY)
996 /* if no code remaining, no need to continue to use slow writes */
997 if (!p->first_tb) {
998 invalidate_page_bitmap(p);
999 if (is_cpu_write_access) {
1000 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1003 #endif
1004 #ifdef TARGET_HAS_PRECISE_SMC
1005 if (current_tb_modified) {
1006 /* we generate a block containing just the instruction
1007 modifying the memory. It will ensure that it cannot modify
1008 itself */
1009 env->current_tb = NULL;
1010 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1011 cpu_resume_from_signal(env, NULL);
1013 #endif
1016 /* len must be <= 8 and start must be a multiple of len */
1017 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1019 PageDesc *p;
1020 int offset, b;
1021 #if 0
1022 if (1) {
1023 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1024 cpu_single_env->mem_io_vaddr, len,
1025 cpu_single_env->eip,
1026 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1028 #endif
1029 p = page_find(start >> TARGET_PAGE_BITS);
1030 if (!p)
1031 return;
1032 if (p->code_bitmap) {
1033 offset = start & ~TARGET_PAGE_MASK;
1034 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1035 if (b & ((1 << len) - 1))
1036 goto do_invalidate;
1037 } else {
1038 do_invalidate:
1039 tb_invalidate_phys_page_range(start, start + len, 1);
1043 #if !defined(CONFIG_SOFTMMU)
1044 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1045 unsigned long pc, void *puc)
1047 TranslationBlock *tb;
1048 PageDesc *p;
1049 int n;
1050 #ifdef TARGET_HAS_PRECISE_SMC
1051 TranslationBlock *current_tb = NULL;
1052 CPUState *env = cpu_single_env;
1053 int current_tb_modified = 0;
1054 target_ulong current_pc = 0;
1055 target_ulong current_cs_base = 0;
1056 int current_flags = 0;
1057 #endif
1059 addr &= TARGET_PAGE_MASK;
1060 p = page_find(addr >> TARGET_PAGE_BITS);
1061 if (!p)
1062 return;
1063 tb = p->first_tb;
1064 #ifdef TARGET_HAS_PRECISE_SMC
1065 if (tb && pc != 0) {
1066 current_tb = tb_find_pc(pc);
1068 #endif
1069 while (tb != NULL) {
1070 n = (long)tb & 3;
1071 tb = (TranslationBlock *)((long)tb & ~3);
1072 #ifdef TARGET_HAS_PRECISE_SMC
1073 if (current_tb == tb &&
1074 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1075 /* If we are modifying the current TB, we must stop
1076 its execution. We could be more precise by checking
1077 that the modification is after the current PC, but it
1078 would require a specialized function to partially
1079 restore the CPU state */
1081 current_tb_modified = 1;
1082 cpu_restore_state(current_tb, env, pc, puc);
1083 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1084 &current_flags);
1086 #endif /* TARGET_HAS_PRECISE_SMC */
1087 tb_phys_invalidate(tb, addr);
1088 tb = tb->page_next[n];
1090 p->first_tb = NULL;
1091 #ifdef TARGET_HAS_PRECISE_SMC
1092 if (current_tb_modified) {
1093 /* we generate a block containing just the instruction
1094 modifying the memory. It will ensure that it cannot modify
1095 itself */
1096 env->current_tb = NULL;
1097 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1098 cpu_resume_from_signal(env, puc);
1100 #endif
1102 #endif
1104 /* add the tb in the target page and protect it if necessary */
1105 static inline void tb_alloc_page(TranslationBlock *tb,
1106 unsigned int n, target_ulong page_addr)
1108 PageDesc *p;
1109 TranslationBlock *last_first_tb;
1111 tb->page_addr[n] = page_addr;
1112 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1113 tb->page_next[n] = p->first_tb;
1114 last_first_tb = p->first_tb;
1115 p->first_tb = (TranslationBlock *)((long)tb | n);
1116 invalidate_page_bitmap(p);
1118 #if defined(TARGET_HAS_SMC) || 1
1120 #if defined(CONFIG_USER_ONLY)
1121 if (p->flags & PAGE_WRITE) {
1122 target_ulong addr;
1123 PageDesc *p2;
1124 int prot;
1126 /* force the host page as non writable (writes will have a
1127 page fault + mprotect overhead) */
1128 page_addr &= qemu_host_page_mask;
1129 prot = 0;
1130 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1131 addr += TARGET_PAGE_SIZE) {
1133 p2 = page_find (addr >> TARGET_PAGE_BITS);
1134 if (!p2)
1135 continue;
1136 prot |= p2->flags;
1137 p2->flags &= ~PAGE_WRITE;
1138 page_get_flags(addr);
1140 mprotect(g2h(page_addr), qemu_host_page_size,
1141 (prot & PAGE_BITS) & ~PAGE_WRITE);
1142 #ifdef DEBUG_TB_INVALIDATE
1143 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1144 page_addr);
1145 #endif
1147 #else
1148 /* if some code is already present, then the pages are already
1149 protected. So we handle the case where only the first TB is
1150 allocated in a physical page */
1151 if (!last_first_tb) {
1152 tlb_protect_code(page_addr);
1154 #endif
1156 #endif /* TARGET_HAS_SMC */
1159 /* Allocate a new translation block. Flush the translation buffer if
1160 too many translation blocks or too much generated code. */
1161 TranslationBlock *tb_alloc(target_ulong pc)
1163 TranslationBlock *tb;
1165 if (nb_tbs >= code_gen_max_blocks ||
1166 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1167 return NULL;
1168 tb = &tbs[nb_tbs++];
1169 tb->pc = pc;
1170 tb->cflags = 0;
1171 return tb;
1174 void tb_free(TranslationBlock *tb)
1176 /* In practice this is mostly used for single use temporary TB
1177 Ignore the hard cases and just back up if this TB happens to
1178 be the last one generated. */
1179 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1180 code_gen_ptr = tb->tc_ptr;
1181 nb_tbs--;
1185 /* add a new TB and link it to the physical page tables. phys_page2 is
1186 (-1) to indicate that only one page contains the TB. */
1187 void tb_link_phys(TranslationBlock *tb,
1188 target_ulong phys_pc, target_ulong phys_page2)
1190 unsigned int h;
1191 TranslationBlock **ptb;
1193 /* Grab the mmap lock to stop another thread invalidating this TB
1194 before we are done. */
1195 mmap_lock();
1196 /* add in the physical hash table */
1197 h = tb_phys_hash_func(phys_pc);
1198 ptb = &tb_phys_hash[h];
1199 tb->phys_hash_next = *ptb;
1200 *ptb = tb;
1202 /* add in the page list */
1203 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1204 if (phys_page2 != -1)
1205 tb_alloc_page(tb, 1, phys_page2);
1206 else
1207 tb->page_addr[1] = -1;
1209 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1210 tb->jmp_next[0] = NULL;
1211 tb->jmp_next[1] = NULL;
1213 /* init original jump addresses */
1214 if (tb->tb_next_offset[0] != 0xffff)
1215 tb_reset_jump(tb, 0);
1216 if (tb->tb_next_offset[1] != 0xffff)
1217 tb_reset_jump(tb, 1);
1219 #ifdef DEBUG_TB_CHECK
1220 tb_page_check();
1221 #endif
1222 mmap_unlock();
1225 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1226 tb[1].tc_ptr. Return NULL if not found */
1227 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1229 int m_min, m_max, m;
1230 unsigned long v;
1231 TranslationBlock *tb;
1233 if (nb_tbs <= 0)
1234 return NULL;
1235 if (tc_ptr < (unsigned long)code_gen_buffer ||
1236 tc_ptr >= (unsigned long)code_gen_ptr)
1237 return NULL;
1238 /* binary search (cf Knuth) */
1239 m_min = 0;
1240 m_max = nb_tbs - 1;
1241 while (m_min <= m_max) {
1242 m = (m_min + m_max) >> 1;
1243 tb = &tbs[m];
1244 v = (unsigned long)tb->tc_ptr;
1245 if (v == tc_ptr)
1246 return tb;
1247 else if (tc_ptr < v) {
1248 m_max = m - 1;
1249 } else {
1250 m_min = m + 1;
1253 return &tbs[m_max];
1256 static void tb_reset_jump_recursive(TranslationBlock *tb);
1258 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1260 TranslationBlock *tb1, *tb_next, **ptb;
1261 unsigned int n1;
1263 tb1 = tb->jmp_next[n];
1264 if (tb1 != NULL) {
1265 /* find head of list */
1266 for(;;) {
1267 n1 = (long)tb1 & 3;
1268 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1269 if (n1 == 2)
1270 break;
1271 tb1 = tb1->jmp_next[n1];
1273 /* we are now sure now that tb jumps to tb1 */
1274 tb_next = tb1;
1276 /* remove tb from the jmp_first list */
1277 ptb = &tb_next->jmp_first;
1278 for(;;) {
1279 tb1 = *ptb;
1280 n1 = (long)tb1 & 3;
1281 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1282 if (n1 == n && tb1 == tb)
1283 break;
1284 ptb = &tb1->jmp_next[n1];
1286 *ptb = tb->jmp_next[n];
1287 tb->jmp_next[n] = NULL;
1289 /* suppress the jump to next tb in generated code */
1290 tb_reset_jump(tb, n);
1292 /* suppress jumps in the tb on which we could have jumped */
1293 tb_reset_jump_recursive(tb_next);
1297 static void tb_reset_jump_recursive(TranslationBlock *tb)
1299 tb_reset_jump_recursive2(tb, 0);
1300 tb_reset_jump_recursive2(tb, 1);
1303 #if defined(TARGET_HAS_ICE)
1304 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1306 target_phys_addr_t addr;
1307 target_ulong pd;
1308 ram_addr_t ram_addr;
1309 PhysPageDesc *p;
1311 addr = cpu_get_phys_page_debug(env, pc);
1312 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1313 if (!p) {
1314 pd = IO_MEM_UNASSIGNED;
1315 } else {
1316 pd = p->phys_offset;
1318 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1319 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1321 #endif
1323 /* Add a watchpoint. */
1324 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1325 int flags, CPUWatchpoint **watchpoint)
1327 target_ulong len_mask = ~(len - 1);
1328 CPUWatchpoint *wp;
1330 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1331 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1332 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1333 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1334 return -EINVAL;
1336 wp = qemu_malloc(sizeof(*wp));
1337 if (!wp)
1338 return -ENOMEM;
1340 wp->vaddr = addr;
1341 wp->len_mask = len_mask;
1342 wp->flags = flags;
1344 /* keep all GDB-injected watchpoints in front */
1345 if (flags & BP_GDB)
1346 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1347 else
1348 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1350 tlb_flush_page(env, addr);
1352 if (watchpoint)
1353 *watchpoint = wp;
1354 return 0;
1357 /* Remove a specific watchpoint. */
1358 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1359 int flags)
1361 target_ulong len_mask = ~(len - 1);
1362 CPUWatchpoint *wp;
1364 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1365 if (addr == wp->vaddr && len_mask == wp->len_mask
1366 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1367 cpu_watchpoint_remove_by_ref(env, wp);
1368 return 0;
1371 return -ENOENT;
1374 /* Remove a specific watchpoint by reference. */
1375 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1377 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1379 tlb_flush_page(env, watchpoint->vaddr);
1381 qemu_free(watchpoint);
1384 /* Remove all matching watchpoints. */
1385 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1387 CPUWatchpoint *wp, *next;
1389 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1390 if (wp->flags & mask)
1391 cpu_watchpoint_remove_by_ref(env, wp);
1395 /* Add a breakpoint. */
1396 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1397 CPUBreakpoint **breakpoint)
1399 #if defined(TARGET_HAS_ICE)
1400 CPUBreakpoint *bp;
1402 bp = qemu_malloc(sizeof(*bp));
1403 if (!bp)
1404 return -ENOMEM;
1406 bp->pc = pc;
1407 bp->flags = flags;
1409 /* keep all GDB-injected breakpoints in front */
1410 if (flags & BP_GDB)
1411 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1412 else
1413 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1415 breakpoint_invalidate(env, pc);
1417 if (breakpoint)
1418 *breakpoint = bp;
1419 return 0;
1420 #else
1421 return -ENOSYS;
1422 #endif
1425 /* Remove a specific breakpoint. */
1426 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1428 #if defined(TARGET_HAS_ICE)
1429 CPUBreakpoint *bp;
1431 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1432 if (bp->pc == pc && bp->flags == flags) {
1433 cpu_breakpoint_remove_by_ref(env, bp);
1434 return 0;
1437 return -ENOENT;
1438 #else
1439 return -ENOSYS;
1440 #endif
1443 /* Remove a specific breakpoint by reference. */
1444 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1446 #if defined(TARGET_HAS_ICE)
1447 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1449 breakpoint_invalidate(env, breakpoint->pc);
1451 qemu_free(breakpoint);
1452 #endif
1455 /* Remove all matching breakpoints. */
1456 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1458 #if defined(TARGET_HAS_ICE)
1459 CPUBreakpoint *bp, *next;
1461 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1462 if (bp->flags & mask)
1463 cpu_breakpoint_remove_by_ref(env, bp);
1465 #endif
1468 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1469 CPU loop after each instruction */
1470 void cpu_single_step(CPUState *env, int enabled)
1472 #if defined(TARGET_HAS_ICE)
1473 if (env->singlestep_enabled != enabled) {
1474 env->singlestep_enabled = enabled;
1475 if (kvm_enabled())
1476 kvm_update_guest_debug(env, 0);
1477 else {
1478 /* must flush all the translated code to avoid inconsistancies */
1479 /* XXX: only flush what is necessary */
1480 tb_flush(env);
1483 #endif
1486 /* enable or disable low levels log */
1487 void cpu_set_log(int log_flags)
1489 loglevel = log_flags;
1490 if (loglevel && !logfile) {
1491 logfile = fopen(logfilename, log_append ? "a" : "w");
1492 if (!logfile) {
1493 perror(logfilename);
1494 _exit(1);
1496 #if !defined(CONFIG_SOFTMMU)
1497 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1499 static char logfile_buf[4096];
1500 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1502 #else
1503 setvbuf(logfile, NULL, _IOLBF, 0);
1504 #endif
1505 log_append = 1;
1507 if (!loglevel && logfile) {
1508 fclose(logfile);
1509 logfile = NULL;
1513 void cpu_set_log_filename(const char *filename)
1515 logfilename = strdup(filename);
1516 if (logfile) {
1517 fclose(logfile);
1518 logfile = NULL;
1520 cpu_set_log(loglevel);
1523 /* mask must never be zero, except for A20 change call */
1524 void cpu_interrupt(CPUState *env, int mask)
1526 #if !defined(USE_NPTL)
1527 TranslationBlock *tb;
1528 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1529 #endif
1530 int old_mask;
1532 old_mask = env->interrupt_request;
1533 /* FIXME: This is probably not threadsafe. A different thread could
1534 be in the middle of a read-modify-write operation. */
1535 env->interrupt_request |= mask;
1536 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1537 kvm_update_interrupt_request(env);
1538 #if defined(USE_NPTL)
1539 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1540 problem and hope the cpu will stop of its own accord. For userspace
1541 emulation this often isn't actually as bad as it sounds. Often
1542 signals are used primarily to interrupt blocking syscalls. */
1543 #else
1544 if (use_icount) {
1545 env->icount_decr.u16.high = 0xffff;
1546 #ifndef CONFIG_USER_ONLY
1547 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1548 an async event happened and we need to process it. */
1549 if (!can_do_io(env)
1550 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1551 cpu_abort(env, "Raised interrupt while not in I/O function");
1553 #endif
1554 } else {
1555 tb = env->current_tb;
1556 /* if the cpu is currently executing code, we must unlink it and
1557 all the potentially executing TB */
1558 if (tb && !testandset(&interrupt_lock)) {
1559 env->current_tb = NULL;
1560 tb_reset_jump_recursive(tb);
1561 resetlock(&interrupt_lock);
1564 #endif
1567 void cpu_reset_interrupt(CPUState *env, int mask)
1569 env->interrupt_request &= ~mask;
1572 const CPULogItem cpu_log_items[] = {
1573 { CPU_LOG_TB_OUT_ASM, "out_asm",
1574 "show generated host assembly code for each compiled TB" },
1575 { CPU_LOG_TB_IN_ASM, "in_asm",
1576 "show target assembly code for each compiled TB" },
1577 { CPU_LOG_TB_OP, "op",
1578 "show micro ops for each compiled TB" },
1579 { CPU_LOG_TB_OP_OPT, "op_opt",
1580 "show micro ops "
1581 #ifdef TARGET_I386
1582 "before eflags optimization and "
1583 #endif
1584 "after liveness analysis" },
1585 { CPU_LOG_INT, "int",
1586 "show interrupts/exceptions in short format" },
1587 { CPU_LOG_EXEC, "exec",
1588 "show trace before each executed TB (lots of logs)" },
1589 { CPU_LOG_TB_CPU, "cpu",
1590 "show CPU state before block translation" },
1591 #ifdef TARGET_I386
1592 { CPU_LOG_PCALL, "pcall",
1593 "show protected mode far calls/returns/exceptions" },
1594 { CPU_LOG_RESET, "cpu_reset",
1595 "show CPU state before CPU resets" },
1596 #endif
1597 #ifdef DEBUG_IOPORT
1598 { CPU_LOG_IOPORT, "ioport",
1599 "show all i/o ports accesses" },
1600 #endif
1601 { 0, NULL, NULL },
1604 static int cmp1(const char *s1, int n, const char *s2)
1606 if (strlen(s2) != n)
1607 return 0;
1608 return memcmp(s1, s2, n) == 0;
1611 /* takes a comma separated list of log masks. Return 0 if error. */
1612 int cpu_str_to_log_mask(const char *str)
1614 const CPULogItem *item;
1615 int mask;
1616 const char *p, *p1;
1618 p = str;
1619 mask = 0;
1620 for(;;) {
1621 p1 = strchr(p, ',');
1622 if (!p1)
1623 p1 = p + strlen(p);
1624 if(cmp1(p,p1-p,"all")) {
1625 for(item = cpu_log_items; item->mask != 0; item++) {
1626 mask |= item->mask;
1628 } else {
1629 for(item = cpu_log_items; item->mask != 0; item++) {
1630 if (cmp1(p, p1 - p, item->name))
1631 goto found;
1633 return 0;
1635 found:
1636 mask |= item->mask;
1637 if (*p1 != ',')
1638 break;
1639 p = p1 + 1;
1641 return mask;
1644 void cpu_abort(CPUState *env, const char *fmt, ...)
1646 va_list ap;
1647 va_list ap2;
1649 va_start(ap, fmt);
1650 va_copy(ap2, ap);
1651 fprintf(stderr, "qemu: fatal: ");
1652 vfprintf(stderr, fmt, ap);
1653 fprintf(stderr, "\n");
1654 #ifdef TARGET_I386
1655 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1656 #else
1657 cpu_dump_state(env, stderr, fprintf, 0);
1658 #endif
1659 if (qemu_log_enabled()) {
1660 qemu_log("qemu: fatal: ");
1661 qemu_log_vprintf(fmt, ap2);
1662 qemu_log("\n");
1663 #ifdef TARGET_I386
1664 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1665 #else
1666 log_cpu_state(env, 0);
1667 #endif
1668 qemu_log_flush();
1669 qemu_log_close();
1671 va_end(ap2);
1672 va_end(ap);
1673 abort();
1676 CPUState *cpu_copy(CPUState *env)
1678 CPUState *new_env = cpu_init(env->cpu_model_str);
1679 CPUState *next_cpu = new_env->next_cpu;
1680 int cpu_index = new_env->cpu_index;
1681 #if defined(TARGET_HAS_ICE)
1682 CPUBreakpoint *bp;
1683 CPUWatchpoint *wp;
1684 #endif
1686 memcpy(new_env, env, sizeof(CPUState));
1688 /* Preserve chaining and index. */
1689 new_env->next_cpu = next_cpu;
1690 new_env->cpu_index = cpu_index;
1692 /* Clone all break/watchpoints.
1693 Note: Once we support ptrace with hw-debug register access, make sure
1694 BP_CPU break/watchpoints are handled correctly on clone. */
1695 TAILQ_INIT(&env->breakpoints);
1696 TAILQ_INIT(&env->watchpoints);
1697 #if defined(TARGET_HAS_ICE)
1698 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1699 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1701 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1702 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1703 wp->flags, NULL);
1705 #endif
1707 return new_env;
1710 #if !defined(CONFIG_USER_ONLY)
1712 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1714 unsigned int i;
1716 /* Discard jump cache entries for any tb which might potentially
1717 overlap the flushed page. */
1718 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1719 memset (&env->tb_jmp_cache[i], 0,
1720 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1722 i = tb_jmp_cache_hash_page(addr);
1723 memset (&env->tb_jmp_cache[i], 0,
1724 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1727 /* NOTE: if flush_global is true, also flush global entries (not
1728 implemented yet) */
1729 void tlb_flush(CPUState *env, int flush_global)
1731 int i;
1733 #if defined(DEBUG_TLB)
1734 printf("tlb_flush:\n");
1735 #endif
1736 /* must reset current TB so that interrupts cannot modify the
1737 links while we are modifying them */
1738 env->current_tb = NULL;
1740 for(i = 0; i < CPU_TLB_SIZE; i++) {
1741 env->tlb_table[0][i].addr_read = -1;
1742 env->tlb_table[0][i].addr_write = -1;
1743 env->tlb_table[0][i].addr_code = -1;
1744 env->tlb_table[1][i].addr_read = -1;
1745 env->tlb_table[1][i].addr_write = -1;
1746 env->tlb_table[1][i].addr_code = -1;
1747 #if (NB_MMU_MODES >= 3)
1748 env->tlb_table[2][i].addr_read = -1;
1749 env->tlb_table[2][i].addr_write = -1;
1750 env->tlb_table[2][i].addr_code = -1;
1751 #if (NB_MMU_MODES == 4)
1752 env->tlb_table[3][i].addr_read = -1;
1753 env->tlb_table[3][i].addr_write = -1;
1754 env->tlb_table[3][i].addr_code = -1;
1755 #endif
1756 #endif
1759 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1761 #ifdef USE_KQEMU
1762 if (env->kqemu_enabled) {
1763 kqemu_flush(env, flush_global);
1765 #endif
1766 tlb_flush_count++;
1769 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1771 if (addr == (tlb_entry->addr_read &
1772 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1773 addr == (tlb_entry->addr_write &
1774 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1775 addr == (tlb_entry->addr_code &
1776 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1777 tlb_entry->addr_read = -1;
1778 tlb_entry->addr_write = -1;
1779 tlb_entry->addr_code = -1;
1783 void tlb_flush_page(CPUState *env, target_ulong addr)
1785 int i;
1787 #if defined(DEBUG_TLB)
1788 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1789 #endif
1790 /* must reset current TB so that interrupts cannot modify the
1791 links while we are modifying them */
1792 env->current_tb = NULL;
1794 addr &= TARGET_PAGE_MASK;
1795 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1796 tlb_flush_entry(&env->tlb_table[0][i], addr);
1797 tlb_flush_entry(&env->tlb_table[1][i], addr);
1798 #if (NB_MMU_MODES >= 3)
1799 tlb_flush_entry(&env->tlb_table[2][i], addr);
1800 #if (NB_MMU_MODES == 4)
1801 tlb_flush_entry(&env->tlb_table[3][i], addr);
1802 #endif
1803 #endif
1805 tlb_flush_jmp_cache(env, addr);
1807 #ifdef USE_KQEMU
1808 if (env->kqemu_enabled) {
1809 kqemu_flush_page(env, addr);
1811 #endif
1814 /* update the TLBs so that writes to code in the virtual page 'addr'
1815 can be detected */
1816 static void tlb_protect_code(ram_addr_t ram_addr)
1818 cpu_physical_memory_reset_dirty(ram_addr,
1819 ram_addr + TARGET_PAGE_SIZE,
1820 CODE_DIRTY_FLAG);
1823 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1824 tested for self modifying code */
1825 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1826 target_ulong vaddr)
1828 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1831 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1832 unsigned long start, unsigned long length)
1834 unsigned long addr;
1835 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1836 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1837 if ((addr - start) < length) {
1838 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1843 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1844 int dirty_flags)
1846 CPUState *env;
1847 unsigned long length, start1;
1848 int i, mask, len;
1849 uint8_t *p;
1851 start &= TARGET_PAGE_MASK;
1852 end = TARGET_PAGE_ALIGN(end);
1854 length = end - start;
1855 if (length == 0)
1856 return;
1857 len = length >> TARGET_PAGE_BITS;
1858 #ifdef USE_KQEMU
1859 /* XXX: should not depend on cpu context */
1860 env = first_cpu;
1861 if (env->kqemu_enabled) {
1862 ram_addr_t addr;
1863 addr = start;
1864 for(i = 0; i < len; i++) {
1865 kqemu_set_notdirty(env, addr);
1866 addr += TARGET_PAGE_SIZE;
1869 #endif
1870 mask = ~dirty_flags;
1871 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1872 for(i = 0; i < len; i++)
1873 p[i] &= mask;
1875 /* we modify the TLB cache so that the dirty bit will be set again
1876 when accessing the range */
1877 start1 = start + (unsigned long)phys_ram_base;
1878 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1879 for(i = 0; i < CPU_TLB_SIZE; i++)
1880 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1881 for(i = 0; i < CPU_TLB_SIZE; i++)
1882 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1883 #if (NB_MMU_MODES >= 3)
1884 for(i = 0; i < CPU_TLB_SIZE; i++)
1885 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1886 #if (NB_MMU_MODES == 4)
1887 for(i = 0; i < CPU_TLB_SIZE; i++)
1888 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1889 #endif
1890 #endif
1894 int cpu_physical_memory_set_dirty_tracking(int enable)
1896 int r=0;
1898 if (kvm_enabled())
1899 r = kvm_physical_memory_set_dirty_tracking(enable);
1900 in_migration = enable;
1901 return r;
1904 int cpu_physical_memory_get_dirty_tracking(void)
1906 return in_migration;
1909 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1911 if (kvm_enabled())
1912 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1915 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1917 ram_addr_t ram_addr;
1919 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1920 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1921 tlb_entry->addend - (unsigned long)phys_ram_base;
1922 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1923 tlb_entry->addr_write |= TLB_NOTDIRTY;
1928 /* update the TLB according to the current state of the dirty bits */
1929 void cpu_tlb_update_dirty(CPUState *env)
1931 int i;
1932 for(i = 0; i < CPU_TLB_SIZE; i++)
1933 tlb_update_dirty(&env->tlb_table[0][i]);
1934 for(i = 0; i < CPU_TLB_SIZE; i++)
1935 tlb_update_dirty(&env->tlb_table[1][i]);
1936 #if (NB_MMU_MODES >= 3)
1937 for(i = 0; i < CPU_TLB_SIZE; i++)
1938 tlb_update_dirty(&env->tlb_table[2][i]);
1939 #if (NB_MMU_MODES == 4)
1940 for(i = 0; i < CPU_TLB_SIZE; i++)
1941 tlb_update_dirty(&env->tlb_table[3][i]);
1942 #endif
1943 #endif
1946 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1948 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1949 tlb_entry->addr_write = vaddr;
1952 /* update the TLB corresponding to virtual page vaddr
1953 so that it is no longer dirty */
1954 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1956 int i;
1958 vaddr &= TARGET_PAGE_MASK;
1959 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1960 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1961 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1962 #if (NB_MMU_MODES >= 3)
1963 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1964 #if (NB_MMU_MODES == 4)
1965 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1966 #endif
1967 #endif
1970 /* add a new TLB entry. At most one entry for a given virtual address
1971 is permitted. Return 0 if OK or 2 if the page could not be mapped
1972 (can only happen in non SOFTMMU mode for I/O pages or pages
1973 conflicting with the host address space). */
1974 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1975 target_phys_addr_t paddr, int prot,
1976 int mmu_idx, int is_softmmu)
1978 PhysPageDesc *p;
1979 unsigned long pd;
1980 unsigned int index;
1981 target_ulong address;
1982 target_ulong code_address;
1983 target_phys_addr_t addend;
1984 int ret;
1985 CPUTLBEntry *te;
1986 CPUWatchpoint *wp;
1987 target_phys_addr_t iotlb;
1989 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1990 if (!p) {
1991 pd = IO_MEM_UNASSIGNED;
1992 } else {
1993 pd = p->phys_offset;
1995 #if defined(DEBUG_TLB)
1996 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1997 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1998 #endif
2000 ret = 0;
2001 address = vaddr;
2002 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2003 /* IO memory case (romd handled later) */
2004 address |= TLB_MMIO;
2006 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2007 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2008 /* Normal RAM. */
2009 iotlb = pd & TARGET_PAGE_MASK;
2010 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2011 iotlb |= IO_MEM_NOTDIRTY;
2012 else
2013 iotlb |= IO_MEM_ROM;
2014 } else {
2015 /* IO handlers are currently passed a phsical address.
2016 It would be nice to pass an offset from the base address
2017 of that region. This would avoid having to special case RAM,
2018 and avoid full address decoding in every device.
2019 We can't use the high bits of pd for this because
2020 IO_MEM_ROMD uses these as a ram address. */
2021 iotlb = (pd & ~TARGET_PAGE_MASK);
2022 if (p) {
2023 iotlb += p->region_offset;
2024 } else {
2025 iotlb += paddr;
2029 code_address = address;
2030 /* Make accesses to pages with watchpoints go via the
2031 watchpoint trap routines. */
2032 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2033 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2034 iotlb = io_mem_watch + paddr;
2035 /* TODO: The memory case can be optimized by not trapping
2036 reads of pages with a write breakpoint. */
2037 address |= TLB_MMIO;
2041 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2042 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2043 te = &env->tlb_table[mmu_idx][index];
2044 te->addend = addend - vaddr;
2045 if (prot & PAGE_READ) {
2046 te->addr_read = address;
2047 } else {
2048 te->addr_read = -1;
2051 if (prot & PAGE_EXEC) {
2052 te->addr_code = code_address;
2053 } else {
2054 te->addr_code = -1;
2056 if (prot & PAGE_WRITE) {
2057 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2058 (pd & IO_MEM_ROMD)) {
2059 /* Write access calls the I/O callback. */
2060 te->addr_write = address | TLB_MMIO;
2061 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2062 !cpu_physical_memory_is_dirty(pd)) {
2063 te->addr_write = address | TLB_NOTDIRTY;
2064 } else {
2065 te->addr_write = address;
2067 } else {
2068 te->addr_write = -1;
2070 return ret;
2073 #else
2075 void tlb_flush(CPUState *env, int flush_global)
2079 void tlb_flush_page(CPUState *env, target_ulong addr)
2083 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2084 target_phys_addr_t paddr, int prot,
2085 int mmu_idx, int is_softmmu)
2087 return 0;
2090 /* dump memory mappings */
2091 void page_dump(FILE *f)
2093 unsigned long start, end;
2094 int i, j, prot, prot1;
2095 PageDesc *p;
2097 fprintf(f, "%-8s %-8s %-8s %s\n",
2098 "start", "end", "size", "prot");
2099 start = -1;
2100 end = -1;
2101 prot = 0;
2102 for(i = 0; i <= L1_SIZE; i++) {
2103 if (i < L1_SIZE)
2104 p = l1_map[i];
2105 else
2106 p = NULL;
2107 for(j = 0;j < L2_SIZE; j++) {
2108 if (!p)
2109 prot1 = 0;
2110 else
2111 prot1 = p[j].flags;
2112 if (prot1 != prot) {
2113 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2114 if (start != -1) {
2115 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2116 start, end, end - start,
2117 prot & PAGE_READ ? 'r' : '-',
2118 prot & PAGE_WRITE ? 'w' : '-',
2119 prot & PAGE_EXEC ? 'x' : '-');
2121 if (prot1 != 0)
2122 start = end;
2123 else
2124 start = -1;
2125 prot = prot1;
2127 if (!p)
2128 break;
2133 int page_get_flags(target_ulong address)
2135 PageDesc *p;
2137 p = page_find(address >> TARGET_PAGE_BITS);
2138 if (!p)
2139 return 0;
2140 return p->flags;
2143 /* modify the flags of a page and invalidate the code if
2144 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2145 depending on PAGE_WRITE */
2146 void page_set_flags(target_ulong start, target_ulong end, int flags)
2148 PageDesc *p;
2149 target_ulong addr;
2151 /* mmap_lock should already be held. */
2152 start = start & TARGET_PAGE_MASK;
2153 end = TARGET_PAGE_ALIGN(end);
2154 if (flags & PAGE_WRITE)
2155 flags |= PAGE_WRITE_ORG;
2156 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2157 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2158 /* We may be called for host regions that are outside guest
2159 address space. */
2160 if (!p)
2161 return;
2162 /* if the write protection is set, then we invalidate the code
2163 inside */
2164 if (!(p->flags & PAGE_WRITE) &&
2165 (flags & PAGE_WRITE) &&
2166 p->first_tb) {
2167 tb_invalidate_phys_page(addr, 0, NULL);
2169 p->flags = flags;
2173 int page_check_range(target_ulong start, target_ulong len, int flags)
2175 PageDesc *p;
2176 target_ulong end;
2177 target_ulong addr;
2179 if (start + len < start)
2180 /* we've wrapped around */
2181 return -1;
2183 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2184 start = start & TARGET_PAGE_MASK;
2186 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2187 p = page_find(addr >> TARGET_PAGE_BITS);
2188 if( !p )
2189 return -1;
2190 if( !(p->flags & PAGE_VALID) )
2191 return -1;
2193 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2194 return -1;
2195 if (flags & PAGE_WRITE) {
2196 if (!(p->flags & PAGE_WRITE_ORG))
2197 return -1;
2198 /* unprotect the page if it was put read-only because it
2199 contains translated code */
2200 if (!(p->flags & PAGE_WRITE)) {
2201 if (!page_unprotect(addr, 0, NULL))
2202 return -1;
2204 return 0;
2207 return 0;
2210 /* called from signal handler: invalidate the code and unprotect the
2211 page. Return TRUE if the fault was succesfully handled. */
2212 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2214 unsigned int page_index, prot, pindex;
2215 PageDesc *p, *p1;
2216 target_ulong host_start, host_end, addr;
2218 /* Technically this isn't safe inside a signal handler. However we
2219 know this only ever happens in a synchronous SEGV handler, so in
2220 practice it seems to be ok. */
2221 mmap_lock();
2223 host_start = address & qemu_host_page_mask;
2224 page_index = host_start >> TARGET_PAGE_BITS;
2225 p1 = page_find(page_index);
2226 if (!p1) {
2227 mmap_unlock();
2228 return 0;
2230 host_end = host_start + qemu_host_page_size;
2231 p = p1;
2232 prot = 0;
2233 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2234 prot |= p->flags;
2235 p++;
2237 /* if the page was really writable, then we change its
2238 protection back to writable */
2239 if (prot & PAGE_WRITE_ORG) {
2240 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2241 if (!(p1[pindex].flags & PAGE_WRITE)) {
2242 mprotect((void *)g2h(host_start), qemu_host_page_size,
2243 (prot & PAGE_BITS) | PAGE_WRITE);
2244 p1[pindex].flags |= PAGE_WRITE;
2245 /* and since the content will be modified, we must invalidate
2246 the corresponding translated code. */
2247 tb_invalidate_phys_page(address, pc, puc);
2248 #ifdef DEBUG_TB_CHECK
2249 tb_invalidate_check(address);
2250 #endif
2251 mmap_unlock();
2252 return 1;
2255 mmap_unlock();
2256 return 0;
2259 static inline void tlb_set_dirty(CPUState *env,
2260 unsigned long addr, target_ulong vaddr)
2263 #endif /* defined(CONFIG_USER_ONLY) */
2265 #if !defined(CONFIG_USER_ONLY)
2267 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2268 ram_addr_t memory, ram_addr_t region_offset);
2269 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2270 ram_addr_t orig_memory, ram_addr_t region_offset);
2271 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2272 need_subpage) \
2273 do { \
2274 if (addr > start_addr) \
2275 start_addr2 = 0; \
2276 else { \
2277 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2278 if (start_addr2 > 0) \
2279 need_subpage = 1; \
2282 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2283 end_addr2 = TARGET_PAGE_SIZE - 1; \
2284 else { \
2285 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2286 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2287 need_subpage = 1; \
2289 } while (0)
2291 /* register physical memory. 'size' must be a multiple of the target
2292 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2293 io memory page. The address used when calling the IO function is
2294 the offset from the start of the region, plus region_offset. Both
2295 start_region and regon_offset are rounded down to a page boundary
2296 before calculating this offset. This should not be a problem unless
2297 the low bits of start_addr and region_offset differ. */
2298 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2299 ram_addr_t size,
2300 ram_addr_t phys_offset,
2301 ram_addr_t region_offset)
2303 target_phys_addr_t addr, end_addr;
2304 PhysPageDesc *p;
2305 CPUState *env;
2306 ram_addr_t orig_size = size;
2307 void *subpage;
2309 #ifdef USE_KQEMU
2310 /* XXX: should not depend on cpu context */
2311 env = first_cpu;
2312 if (env->kqemu_enabled) {
2313 kqemu_set_phys_mem(start_addr, size, phys_offset);
2315 #endif
2316 if (kvm_enabled())
2317 kvm_set_phys_mem(start_addr, size, phys_offset);
2319 region_offset &= TARGET_PAGE_MASK;
2320 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2321 end_addr = start_addr + (target_phys_addr_t)size;
2322 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2323 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2324 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2325 ram_addr_t orig_memory = p->phys_offset;
2326 target_phys_addr_t start_addr2, end_addr2;
2327 int need_subpage = 0;
2329 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2330 need_subpage);
2331 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2332 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2333 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2334 &p->phys_offset, orig_memory,
2335 p->region_offset);
2336 } else {
2337 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2338 >> IO_MEM_SHIFT];
2340 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2341 region_offset);
2342 p->region_offset = 0;
2343 } else {
2344 p->phys_offset = phys_offset;
2345 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2346 (phys_offset & IO_MEM_ROMD))
2347 phys_offset += TARGET_PAGE_SIZE;
2349 } else {
2350 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2351 p->phys_offset = phys_offset;
2352 p->region_offset = region_offset;
2353 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2354 (phys_offset & IO_MEM_ROMD)) {
2355 phys_offset += TARGET_PAGE_SIZE;
2356 } else {
2357 target_phys_addr_t start_addr2, end_addr2;
2358 int need_subpage = 0;
2360 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2361 end_addr2, need_subpage);
2363 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2364 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2365 &p->phys_offset, IO_MEM_UNASSIGNED,
2367 subpage_register(subpage, start_addr2, end_addr2,
2368 phys_offset, region_offset);
2369 p->region_offset = 0;
2373 region_offset += TARGET_PAGE_SIZE;
2376 /* since each CPU stores ram addresses in its TLB cache, we must
2377 reset the modified entries */
2378 /* XXX: slow ! */
2379 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2380 tlb_flush(env, 1);
2384 /* XXX: temporary until new memory mapping API */
2385 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2387 PhysPageDesc *p;
2389 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2390 if (!p)
2391 return IO_MEM_UNASSIGNED;
2392 return p->phys_offset;
2395 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2397 if (kvm_enabled())
2398 kvm_coalesce_mmio_region(addr, size);
2401 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2403 if (kvm_enabled())
2404 kvm_uncoalesce_mmio_region(addr, size);
2407 /* XXX: better than nothing */
2408 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2410 ram_addr_t addr;
2411 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2412 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2413 (uint64_t)size, (uint64_t)phys_ram_size);
2414 abort();
2416 addr = phys_ram_alloc_offset;
2417 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2418 return addr;
2421 void qemu_ram_free(ram_addr_t addr)
2425 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2427 #ifdef DEBUG_UNASSIGNED
2428 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2429 #endif
2430 #if defined(TARGET_SPARC)
2431 do_unassigned_access(addr, 0, 0, 0, 1);
2432 #endif
2433 return 0;
2436 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2438 #ifdef DEBUG_UNASSIGNED
2439 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2440 #endif
2441 #if defined(TARGET_SPARC)
2442 do_unassigned_access(addr, 0, 0, 0, 2);
2443 #endif
2444 return 0;
2447 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2449 #ifdef DEBUG_UNASSIGNED
2450 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2451 #endif
2452 #if defined(TARGET_SPARC)
2453 do_unassigned_access(addr, 0, 0, 0, 4);
2454 #endif
2455 return 0;
2458 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2460 #ifdef DEBUG_UNASSIGNED
2461 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2462 #endif
2463 #if defined(TARGET_SPARC)
2464 do_unassigned_access(addr, 1, 0, 0, 1);
2465 #endif
2468 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2470 #ifdef DEBUG_UNASSIGNED
2471 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2472 #endif
2473 #if defined(TARGET_SPARC)
2474 do_unassigned_access(addr, 1, 0, 0, 2);
2475 #endif
2478 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2480 #ifdef DEBUG_UNASSIGNED
2481 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2482 #endif
2483 #if defined(TARGET_SPARC)
2484 do_unassigned_access(addr, 1, 0, 0, 4);
2485 #endif
2488 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2489 unassigned_mem_readb,
2490 unassigned_mem_readw,
2491 unassigned_mem_readl,
2494 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2495 unassigned_mem_writeb,
2496 unassigned_mem_writew,
2497 unassigned_mem_writel,
2500 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2501 uint32_t val)
2503 int dirty_flags;
2504 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2505 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2506 #if !defined(CONFIG_USER_ONLY)
2507 tb_invalidate_phys_page_fast(ram_addr, 1);
2508 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2509 #endif
2511 stb_p(phys_ram_base + ram_addr, val);
2512 #ifdef USE_KQEMU
2513 if (cpu_single_env->kqemu_enabled &&
2514 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2515 kqemu_modify_page(cpu_single_env, ram_addr);
2516 #endif
2517 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2518 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2519 /* we remove the notdirty callback only if the code has been
2520 flushed */
2521 if (dirty_flags == 0xff)
2522 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2525 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2526 uint32_t val)
2528 int dirty_flags;
2529 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2530 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2531 #if !defined(CONFIG_USER_ONLY)
2532 tb_invalidate_phys_page_fast(ram_addr, 2);
2533 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2534 #endif
2536 stw_p(phys_ram_base + ram_addr, val);
2537 #ifdef USE_KQEMU
2538 if (cpu_single_env->kqemu_enabled &&
2539 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2540 kqemu_modify_page(cpu_single_env, ram_addr);
2541 #endif
2542 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2543 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2544 /* we remove the notdirty callback only if the code has been
2545 flushed */
2546 if (dirty_flags == 0xff)
2547 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2550 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2551 uint32_t val)
2553 int dirty_flags;
2554 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2555 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2556 #if !defined(CONFIG_USER_ONLY)
2557 tb_invalidate_phys_page_fast(ram_addr, 4);
2558 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2559 #endif
2561 stl_p(phys_ram_base + ram_addr, val);
2562 #ifdef USE_KQEMU
2563 if (cpu_single_env->kqemu_enabled &&
2564 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2565 kqemu_modify_page(cpu_single_env, ram_addr);
2566 #endif
2567 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2568 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2569 /* we remove the notdirty callback only if the code has been
2570 flushed */
2571 if (dirty_flags == 0xff)
2572 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2575 static CPUReadMemoryFunc *error_mem_read[3] = {
2576 NULL, /* never used */
2577 NULL, /* never used */
2578 NULL, /* never used */
2581 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2582 notdirty_mem_writeb,
2583 notdirty_mem_writew,
2584 notdirty_mem_writel,
2587 /* Generate a debug exception if a watchpoint has been hit. */
2588 static void check_watchpoint(int offset, int len_mask, int flags)
2590 CPUState *env = cpu_single_env;
2591 target_ulong pc, cs_base;
2592 TranslationBlock *tb;
2593 target_ulong vaddr;
2594 CPUWatchpoint *wp;
2595 int cpu_flags;
2597 if (env->watchpoint_hit) {
2598 /* We re-entered the check after replacing the TB. Now raise
2599 * the debug interrupt so that is will trigger after the
2600 * current instruction. */
2601 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2602 return;
2604 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2605 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2606 if ((vaddr == (wp->vaddr & len_mask) ||
2607 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2608 wp->flags |= BP_WATCHPOINT_HIT;
2609 if (!env->watchpoint_hit) {
2610 env->watchpoint_hit = wp;
2611 tb = tb_find_pc(env->mem_io_pc);
2612 if (!tb) {
2613 cpu_abort(env, "check_watchpoint: could not find TB for "
2614 "pc=%p", (void *)env->mem_io_pc);
2616 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2617 tb_phys_invalidate(tb, -1);
2618 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2619 env->exception_index = EXCP_DEBUG;
2620 } else {
2621 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2622 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2624 cpu_resume_from_signal(env, NULL);
2626 } else {
2627 wp->flags &= ~BP_WATCHPOINT_HIT;
2632 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2633 so these check for a hit then pass through to the normal out-of-line
2634 phys routines. */
2635 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2637 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2638 return ldub_phys(addr);
2641 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2643 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2644 return lduw_phys(addr);
2647 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2649 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2650 return ldl_phys(addr);
2653 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2654 uint32_t val)
2656 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2657 stb_phys(addr, val);
2660 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2661 uint32_t val)
2663 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2664 stw_phys(addr, val);
2667 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2668 uint32_t val)
2670 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2671 stl_phys(addr, val);
2674 static CPUReadMemoryFunc *watch_mem_read[3] = {
2675 watch_mem_readb,
2676 watch_mem_readw,
2677 watch_mem_readl,
2680 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2681 watch_mem_writeb,
2682 watch_mem_writew,
2683 watch_mem_writel,
2686 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2687 unsigned int len)
2689 uint32_t ret;
2690 unsigned int idx;
2692 idx = SUBPAGE_IDX(addr);
2693 #if defined(DEBUG_SUBPAGE)
2694 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2695 mmio, len, addr, idx);
2696 #endif
2697 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2698 addr + mmio->region_offset[idx][0][len]);
2700 return ret;
2703 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2704 uint32_t value, unsigned int len)
2706 unsigned int idx;
2708 idx = SUBPAGE_IDX(addr);
2709 #if defined(DEBUG_SUBPAGE)
2710 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2711 mmio, len, addr, idx, value);
2712 #endif
2713 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2714 addr + mmio->region_offset[idx][1][len],
2715 value);
2718 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2720 #if defined(DEBUG_SUBPAGE)
2721 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2722 #endif
2724 return subpage_readlen(opaque, addr, 0);
2727 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2728 uint32_t value)
2730 #if defined(DEBUG_SUBPAGE)
2731 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2732 #endif
2733 subpage_writelen(opaque, addr, value, 0);
2736 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2738 #if defined(DEBUG_SUBPAGE)
2739 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2740 #endif
2742 return subpage_readlen(opaque, addr, 1);
2745 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2746 uint32_t value)
2748 #if defined(DEBUG_SUBPAGE)
2749 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2750 #endif
2751 subpage_writelen(opaque, addr, value, 1);
2754 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2756 #if defined(DEBUG_SUBPAGE)
2757 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2758 #endif
2760 return subpage_readlen(opaque, addr, 2);
2763 static void subpage_writel (void *opaque,
2764 target_phys_addr_t addr, uint32_t value)
2766 #if defined(DEBUG_SUBPAGE)
2767 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2768 #endif
2769 subpage_writelen(opaque, addr, value, 2);
2772 static CPUReadMemoryFunc *subpage_read[] = {
2773 &subpage_readb,
2774 &subpage_readw,
2775 &subpage_readl,
2778 static CPUWriteMemoryFunc *subpage_write[] = {
2779 &subpage_writeb,
2780 &subpage_writew,
2781 &subpage_writel,
2784 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2785 ram_addr_t memory, ram_addr_t region_offset)
2787 int idx, eidx;
2788 unsigned int i;
2790 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2791 return -1;
2792 idx = SUBPAGE_IDX(start);
2793 eidx = SUBPAGE_IDX(end);
2794 #if defined(DEBUG_SUBPAGE)
2795 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2796 mmio, start, end, idx, eidx, memory);
2797 #endif
2798 memory >>= IO_MEM_SHIFT;
2799 for (; idx <= eidx; idx++) {
2800 for (i = 0; i < 4; i++) {
2801 if (io_mem_read[memory][i]) {
2802 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2803 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2804 mmio->region_offset[idx][0][i] = region_offset;
2806 if (io_mem_write[memory][i]) {
2807 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2808 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2809 mmio->region_offset[idx][1][i] = region_offset;
2814 return 0;
2817 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2818 ram_addr_t orig_memory, ram_addr_t region_offset)
2820 subpage_t *mmio;
2821 int subpage_memory;
2823 mmio = qemu_mallocz(sizeof(subpage_t));
2824 if (mmio != NULL) {
2825 mmio->base = base;
2826 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2827 #if defined(DEBUG_SUBPAGE)
2828 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2829 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2830 #endif
2831 *phys = subpage_memory | IO_MEM_SUBPAGE;
2832 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2833 region_offset);
2836 return mmio;
2839 static int get_free_io_mem_idx(void)
2841 int i;
2843 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2844 if (!io_mem_used[i]) {
2845 io_mem_used[i] = 1;
2846 return i;
2849 return -1;
2852 static void io_mem_init(void)
2854 int i;
2856 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2857 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2858 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2859 for (i=0; i<5; i++)
2860 io_mem_used[i] = 1;
2862 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2863 watch_mem_write, NULL);
2864 /* alloc dirty bits array */
2865 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2866 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2869 /* mem_read and mem_write are arrays of functions containing the
2870 function to access byte (index 0), word (index 1) and dword (index
2871 2). Functions can be omitted with a NULL function pointer. The
2872 registered functions may be modified dynamically later.
2873 If io_index is non zero, the corresponding io zone is
2874 modified. If it is zero, a new io zone is allocated. The return
2875 value can be used with cpu_register_physical_memory(). (-1) is
2876 returned if error. */
2877 int cpu_register_io_memory(int io_index,
2878 CPUReadMemoryFunc **mem_read,
2879 CPUWriteMemoryFunc **mem_write,
2880 void *opaque)
2882 int i, subwidth = 0;
2884 if (io_index <= 0) {
2885 io_index = get_free_io_mem_idx();
2886 if (io_index == -1)
2887 return io_index;
2888 } else {
2889 if (io_index >= IO_MEM_NB_ENTRIES)
2890 return -1;
2893 for(i = 0;i < 3; i++) {
2894 if (!mem_read[i] || !mem_write[i])
2895 subwidth = IO_MEM_SUBWIDTH;
2896 io_mem_read[io_index][i] = mem_read[i];
2897 io_mem_write[io_index][i] = mem_write[i];
2899 io_mem_opaque[io_index] = opaque;
2900 return (io_index << IO_MEM_SHIFT) | subwidth;
2903 void cpu_unregister_io_memory(int io_table_address)
2905 int i;
2906 int io_index = io_table_address >> IO_MEM_SHIFT;
2908 for (i=0;i < 3; i++) {
2909 io_mem_read[io_index][i] = unassigned_mem_read[i];
2910 io_mem_write[io_index][i] = unassigned_mem_write[i];
2912 io_mem_opaque[io_index] = NULL;
2913 io_mem_used[io_index] = 0;
2916 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2918 return io_mem_write[io_index >> IO_MEM_SHIFT];
2921 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2923 return io_mem_read[io_index >> IO_MEM_SHIFT];
2926 #endif /* !defined(CONFIG_USER_ONLY) */
2928 /* physical memory access (slow version, mainly for debug) */
2929 #if defined(CONFIG_USER_ONLY)
2930 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2931 int len, int is_write)
2933 int l, flags;
2934 target_ulong page;
2935 void * p;
2937 while (len > 0) {
2938 page = addr & TARGET_PAGE_MASK;
2939 l = (page + TARGET_PAGE_SIZE) - addr;
2940 if (l > len)
2941 l = len;
2942 flags = page_get_flags(page);
2943 if (!(flags & PAGE_VALID))
2944 return;
2945 if (is_write) {
2946 if (!(flags & PAGE_WRITE))
2947 return;
2948 /* XXX: this code should not depend on lock_user */
2949 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2950 /* FIXME - should this return an error rather than just fail? */
2951 return;
2952 memcpy(p, buf, l);
2953 unlock_user(p, addr, l);
2954 } else {
2955 if (!(flags & PAGE_READ))
2956 return;
2957 /* XXX: this code should not depend on lock_user */
2958 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2959 /* FIXME - should this return an error rather than just fail? */
2960 return;
2961 memcpy(buf, p, l);
2962 unlock_user(p, addr, 0);
2964 len -= l;
2965 buf += l;
2966 addr += l;
2970 #else
2971 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2972 int len, int is_write)
2974 int l, io_index;
2975 uint8_t *ptr;
2976 uint32_t val;
2977 target_phys_addr_t page;
2978 unsigned long pd;
2979 PhysPageDesc *p;
2981 while (len > 0) {
2982 page = addr & TARGET_PAGE_MASK;
2983 l = (page + TARGET_PAGE_SIZE) - addr;
2984 if (l > len)
2985 l = len;
2986 p = phys_page_find(page >> TARGET_PAGE_BITS);
2987 if (!p) {
2988 pd = IO_MEM_UNASSIGNED;
2989 } else {
2990 pd = p->phys_offset;
2993 if (is_write) {
2994 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2995 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2996 if (p)
2997 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2998 /* XXX: could force cpu_single_env to NULL to avoid
2999 potential bugs */
3000 if (l >= 4 && ((addr & 3) == 0)) {
3001 /* 32 bit write access */
3002 val = ldl_p(buf);
3003 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3004 l = 4;
3005 } else if (l >= 2 && ((addr & 1) == 0)) {
3006 /* 16 bit write access */
3007 val = lduw_p(buf);
3008 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3009 l = 2;
3010 } else {
3011 /* 8 bit write access */
3012 val = ldub_p(buf);
3013 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3014 l = 1;
3016 } else {
3017 unsigned long addr1;
3018 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3019 /* RAM case */
3020 ptr = phys_ram_base + addr1;
3021 memcpy(ptr, buf, l);
3022 if (!cpu_physical_memory_is_dirty(addr1)) {
3023 /* invalidate code */
3024 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3025 /* set dirty bit */
3026 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3027 (0xff & ~CODE_DIRTY_FLAG);
3029 /* qemu doesn't execute guest code directly, but kvm does
3030 therefore fluch instruction caches */
3031 if (kvm_enabled())
3032 flush_icache_range((unsigned long)ptr,
3033 ((unsigned long)ptr)+l);
3035 } else {
3036 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3037 !(pd & IO_MEM_ROMD)) {
3038 /* I/O case */
3039 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3040 if (p)
3041 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3042 if (l >= 4 && ((addr & 3) == 0)) {
3043 /* 32 bit read access */
3044 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3045 stl_p(buf, val);
3046 l = 4;
3047 } else if (l >= 2 && ((addr & 1) == 0)) {
3048 /* 16 bit read access */
3049 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3050 stw_p(buf, val);
3051 l = 2;
3052 } else {
3053 /* 8 bit read access */
3054 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3055 stb_p(buf, val);
3056 l = 1;
3058 } else {
3059 /* RAM case */
3060 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3061 (addr & ~TARGET_PAGE_MASK);
3062 memcpy(buf, ptr, l);
3065 len -= l;
3066 buf += l;
3067 addr += l;
3071 /* used for ROM loading : can write in RAM and ROM */
3072 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3073 const uint8_t *buf, int len)
3075 int l;
3076 uint8_t *ptr;
3077 target_phys_addr_t page;
3078 unsigned long pd;
3079 PhysPageDesc *p;
3081 while (len > 0) {
3082 page = addr & TARGET_PAGE_MASK;
3083 l = (page + TARGET_PAGE_SIZE) - addr;
3084 if (l > len)
3085 l = len;
3086 p = phys_page_find(page >> TARGET_PAGE_BITS);
3087 if (!p) {
3088 pd = IO_MEM_UNASSIGNED;
3089 } else {
3090 pd = p->phys_offset;
3093 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3094 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3095 !(pd & IO_MEM_ROMD)) {
3096 /* do nothing */
3097 } else {
3098 unsigned long addr1;
3099 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3100 /* ROM/RAM case */
3101 ptr = phys_ram_base + addr1;
3102 memcpy(ptr, buf, l);
3104 len -= l;
3105 buf += l;
3106 addr += l;
3110 typedef struct {
3111 void *buffer;
3112 target_phys_addr_t addr;
3113 target_phys_addr_t len;
3114 } BounceBuffer;
3116 static BounceBuffer bounce;
3118 typedef struct MapClient {
3119 void *opaque;
3120 void (*callback)(void *opaque);
3121 LIST_ENTRY(MapClient) link;
3122 } MapClient;
3124 static LIST_HEAD(map_client_list, MapClient) map_client_list
3125 = LIST_HEAD_INITIALIZER(map_client_list);
3127 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3129 MapClient *client = qemu_malloc(sizeof(*client));
3131 client->opaque = opaque;
3132 client->callback = callback;
3133 LIST_INSERT_HEAD(&map_client_list, client, link);
3134 return client;
3137 void cpu_unregister_map_client(void *_client)
3139 MapClient *client = (MapClient *)_client;
3141 LIST_REMOVE(client, link);
3144 static void cpu_notify_map_clients(void)
3146 MapClient *client;
3148 while (!LIST_EMPTY(&map_client_list)) {
3149 client = LIST_FIRST(&map_client_list);
3150 client->callback(client->opaque);
3151 LIST_REMOVE(client, link);
3155 /* Map a physical memory region into a host virtual address.
3156 * May map a subset of the requested range, given by and returned in *plen.
3157 * May return NULL if resources needed to perform the mapping are exhausted.
3158 * Use only for reads OR writes - not for read-modify-write operations.
3159 * Use cpu_register_map_client() to know when retrying the map operation is
3160 * likely to succeed.
3162 void *cpu_physical_memory_map(target_phys_addr_t addr,
3163 target_phys_addr_t *plen,
3164 int is_write)
3166 target_phys_addr_t len = *plen;
3167 target_phys_addr_t done = 0;
3168 int l;
3169 uint8_t *ret = NULL;
3170 uint8_t *ptr;
3171 target_phys_addr_t page;
3172 unsigned long pd;
3173 PhysPageDesc *p;
3174 unsigned long addr1;
3176 while (len > 0) {
3177 page = addr & TARGET_PAGE_MASK;
3178 l = (page + TARGET_PAGE_SIZE) - addr;
3179 if (l > len)
3180 l = len;
3181 p = phys_page_find(page >> TARGET_PAGE_BITS);
3182 if (!p) {
3183 pd = IO_MEM_UNASSIGNED;
3184 } else {
3185 pd = p->phys_offset;
3188 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3189 if (done || bounce.buffer) {
3190 break;
3192 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3193 bounce.addr = addr;
3194 bounce.len = l;
3195 if (!is_write) {
3196 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3198 ptr = bounce.buffer;
3199 } else {
3200 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3201 ptr = phys_ram_base + addr1;
3203 if (!done) {
3204 ret = ptr;
3205 } else if (ret + done != ptr) {
3206 break;
3209 len -= l;
3210 addr += l;
3211 done += l;
3213 *plen = done;
3214 return ret;
3217 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3218 * Will also mark the memory as dirty if is_write == 1. access_len gives
3219 * the amount of memory that was actually read or written by the caller.
3221 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3222 int is_write, target_phys_addr_t access_len)
3224 if (buffer != bounce.buffer) {
3225 if (is_write) {
3226 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3227 while (access_len) {
3228 unsigned l;
3229 l = TARGET_PAGE_SIZE;
3230 if (l > access_len)
3231 l = access_len;
3232 if (!cpu_physical_memory_is_dirty(addr1)) {
3233 /* invalidate code */
3234 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3235 /* set dirty bit */
3236 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3237 (0xff & ~CODE_DIRTY_FLAG);
3239 addr1 += l;
3240 access_len -= l;
3243 return;
3245 if (is_write) {
3246 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3248 qemu_free(bounce.buffer);
3249 bounce.buffer = NULL;
3250 cpu_notify_map_clients();
3253 /* warning: addr must be aligned */
3254 uint32_t ldl_phys(target_phys_addr_t addr)
3256 int io_index;
3257 uint8_t *ptr;
3258 uint32_t val;
3259 unsigned long pd;
3260 PhysPageDesc *p;
3262 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3263 if (!p) {
3264 pd = IO_MEM_UNASSIGNED;
3265 } else {
3266 pd = p->phys_offset;
3269 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3270 !(pd & IO_MEM_ROMD)) {
3271 /* I/O case */
3272 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3273 if (p)
3274 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3275 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3276 } else {
3277 /* RAM case */
3278 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3279 (addr & ~TARGET_PAGE_MASK);
3280 val = ldl_p(ptr);
3282 return val;
3285 /* warning: addr must be aligned */
3286 uint64_t ldq_phys(target_phys_addr_t addr)
3288 int io_index;
3289 uint8_t *ptr;
3290 uint64_t val;
3291 unsigned long pd;
3292 PhysPageDesc *p;
3294 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3295 if (!p) {
3296 pd = IO_MEM_UNASSIGNED;
3297 } else {
3298 pd = p->phys_offset;
3301 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3302 !(pd & IO_MEM_ROMD)) {
3303 /* I/O case */
3304 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3305 if (p)
3306 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3307 #ifdef TARGET_WORDS_BIGENDIAN
3308 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3309 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3310 #else
3311 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3312 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3313 #endif
3314 } else {
3315 /* RAM case */
3316 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3317 (addr & ~TARGET_PAGE_MASK);
3318 val = ldq_p(ptr);
3320 return val;
3323 /* XXX: optimize */
3324 uint32_t ldub_phys(target_phys_addr_t addr)
3326 uint8_t val;
3327 cpu_physical_memory_read(addr, &val, 1);
3328 return val;
3331 /* XXX: optimize */
3332 uint32_t lduw_phys(target_phys_addr_t addr)
3334 uint16_t val;
3335 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3336 return tswap16(val);
3339 #ifdef __GNUC__
3340 #define likely(x) __builtin_expect(!!(x), 1)
3341 #define unlikely(x) __builtin_expect(!!(x), 0)
3342 #else
3343 #define likely(x) x
3344 #define unlikely(x) x
3345 #endif
3347 /* warning: addr must be aligned. The ram page is not masked as dirty
3348 and the code inside is not invalidated. It is useful if the dirty
3349 bits are used to track modified PTEs */
3350 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3352 int io_index;
3353 uint8_t *ptr;
3354 unsigned long pd;
3355 PhysPageDesc *p;
3357 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3358 if (!p) {
3359 pd = IO_MEM_UNASSIGNED;
3360 } else {
3361 pd = p->phys_offset;
3364 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3365 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3366 if (p)
3367 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3368 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3369 } else {
3370 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3371 ptr = phys_ram_base + addr1;
3372 stl_p(ptr, val);
3374 if (unlikely(in_migration)) {
3375 if (!cpu_physical_memory_is_dirty(addr1)) {
3376 /* invalidate code */
3377 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3378 /* set dirty bit */
3379 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3380 (0xff & ~CODE_DIRTY_FLAG);
3386 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3388 int io_index;
3389 uint8_t *ptr;
3390 unsigned long pd;
3391 PhysPageDesc *p;
3393 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3394 if (!p) {
3395 pd = IO_MEM_UNASSIGNED;
3396 } else {
3397 pd = p->phys_offset;
3400 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3401 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3402 if (p)
3403 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3404 #ifdef TARGET_WORDS_BIGENDIAN
3405 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3406 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3407 #else
3408 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3409 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3410 #endif
3411 } else {
3412 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3413 (addr & ~TARGET_PAGE_MASK);
3414 stq_p(ptr, val);
3418 /* warning: addr must be aligned */
3419 void stl_phys(target_phys_addr_t addr, uint32_t val)
3421 int io_index;
3422 uint8_t *ptr;
3423 unsigned long pd;
3424 PhysPageDesc *p;
3426 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3427 if (!p) {
3428 pd = IO_MEM_UNASSIGNED;
3429 } else {
3430 pd = p->phys_offset;
3433 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3434 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3435 if (p)
3436 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3437 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3438 } else {
3439 unsigned long addr1;
3440 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3441 /* RAM case */
3442 ptr = phys_ram_base + addr1;
3443 stl_p(ptr, val);
3444 if (!cpu_physical_memory_is_dirty(addr1)) {
3445 /* invalidate code */
3446 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3447 /* set dirty bit */
3448 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3449 (0xff & ~CODE_DIRTY_FLAG);
3454 /* XXX: optimize */
3455 void stb_phys(target_phys_addr_t addr, uint32_t val)
3457 uint8_t v = val;
3458 cpu_physical_memory_write(addr, &v, 1);
3461 /* XXX: optimize */
3462 void stw_phys(target_phys_addr_t addr, uint32_t val)
3464 uint16_t v = tswap16(val);
3465 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3468 /* XXX: optimize */
3469 void stq_phys(target_phys_addr_t addr, uint64_t val)
3471 val = tswap64(val);
3472 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3475 #endif
3477 /* virtual memory access for debug */
3478 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3479 uint8_t *buf, int len, int is_write)
3481 int l;
3482 target_phys_addr_t phys_addr;
3483 target_ulong page;
3485 while (len > 0) {
3486 page = addr & TARGET_PAGE_MASK;
3487 phys_addr = cpu_get_phys_page_debug(env, page);
3488 /* if no physical page mapped, return an error */
3489 if (phys_addr == -1)
3490 return -1;
3491 l = (page + TARGET_PAGE_SIZE) - addr;
3492 if (l > len)
3493 l = len;
3494 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3495 buf, l, is_write);
3496 len -= l;
3497 buf += l;
3498 addr += l;
3500 return 0;
3503 /* in deterministic execution mode, instructions doing device I/Os
3504 must be at the end of the TB */
3505 void cpu_io_recompile(CPUState *env, void *retaddr)
3507 TranslationBlock *tb;
3508 uint32_t n, cflags;
3509 target_ulong pc, cs_base;
3510 uint64_t flags;
3512 tb = tb_find_pc((unsigned long)retaddr);
3513 if (!tb) {
3514 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3515 retaddr);
3517 n = env->icount_decr.u16.low + tb->icount;
3518 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3519 /* Calculate how many instructions had been executed before the fault
3520 occurred. */
3521 n = n - env->icount_decr.u16.low;
3522 /* Generate a new TB ending on the I/O insn. */
3523 n++;
3524 /* On MIPS and SH, delay slot instructions can only be restarted if
3525 they were already the first instruction in the TB. If this is not
3526 the first instruction in a TB then re-execute the preceding
3527 branch. */
3528 #if defined(TARGET_MIPS)
3529 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3530 env->active_tc.PC -= 4;
3531 env->icount_decr.u16.low++;
3532 env->hflags &= ~MIPS_HFLAG_BMASK;
3534 #elif defined(TARGET_SH4)
3535 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3536 && n > 1) {
3537 env->pc -= 2;
3538 env->icount_decr.u16.low++;
3539 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3541 #endif
3542 /* This should never happen. */
3543 if (n > CF_COUNT_MASK)
3544 cpu_abort(env, "TB too big during recompile");
3546 cflags = n | CF_LAST_IO;
3547 pc = tb->pc;
3548 cs_base = tb->cs_base;
3549 flags = tb->flags;
3550 tb_phys_invalidate(tb, -1);
3551 /* FIXME: In theory this could raise an exception. In practice
3552 we have already translated the block once so it's probably ok. */
3553 tb_gen_code(env, pc, cs_base, flags, cflags);
3554 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3555 the first in the TB) then we end up generating a whole new TB and
3556 repeating the fault, which is horribly inefficient.
3557 Better would be to execute just this insn uncached, or generate a
3558 second new TB. */
3559 cpu_resume_from_signal(env, NULL);
3562 void dump_exec_info(FILE *f,
3563 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3565 int i, target_code_size, max_target_code_size;
3566 int direct_jmp_count, direct_jmp2_count, cross_page;
3567 TranslationBlock *tb;
3569 target_code_size = 0;
3570 max_target_code_size = 0;
3571 cross_page = 0;
3572 direct_jmp_count = 0;
3573 direct_jmp2_count = 0;
3574 for(i = 0; i < nb_tbs; i++) {
3575 tb = &tbs[i];
3576 target_code_size += tb->size;
3577 if (tb->size > max_target_code_size)
3578 max_target_code_size = tb->size;
3579 if (tb->page_addr[1] != -1)
3580 cross_page++;
3581 if (tb->tb_next_offset[0] != 0xffff) {
3582 direct_jmp_count++;
3583 if (tb->tb_next_offset[1] != 0xffff) {
3584 direct_jmp2_count++;
3588 /* XXX: avoid using doubles ? */
3589 cpu_fprintf(f, "Translation buffer state:\n");
3590 cpu_fprintf(f, "gen code size %ld/%ld\n",
3591 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3592 cpu_fprintf(f, "TB count %d/%d\n",
3593 nb_tbs, code_gen_max_blocks);
3594 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3595 nb_tbs ? target_code_size / nb_tbs : 0,
3596 max_target_code_size);
3597 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3598 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3599 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3600 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3601 cross_page,
3602 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3603 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3604 direct_jmp_count,
3605 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3606 direct_jmp2_count,
3607 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3608 cpu_fprintf(f, "\nStatistics:\n");
3609 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3610 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3611 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3612 tcg_dump_info(f, cpu_fprintf);
3615 #if !defined(CONFIG_USER_ONLY)
3617 #define MMUSUFFIX _cmmu
3618 #define GETPC() NULL
3619 #define env cpu_single_env
3620 #define SOFTMMU_CODE_ACCESS
3622 #define SHIFT 0
3623 #include "softmmu_template.h"
3625 #define SHIFT 1
3626 #include "softmmu_template.h"
3628 #define SHIFT 2
3629 #include "softmmu_template.h"
3631 #define SHIFT 3
3632 #include "softmmu_template.h"
3634 #undef env
3636 #endif