Merge commit '5f87762742923e42114ed49c96251a245f109147' into upstream-merge
[qemu-kvm/fedora.git] / exec.c
blobf6d9ec9d8727b19485d438e1962edc945b422083
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #include "qemu-common.h"
38 #include "cache-utils.h"
40 #if !defined(TARGET_IA64)
41 #include "tcg.h"
42 #endif
43 #include "qemu-kvm.h"
45 #include "hw/hw.h"
46 #include "osdep.h"
47 #include "kvm.h"
48 #if defined(CONFIG_USER_ONLY)
49 #include <qemu.h>
50 #endif
52 //#define DEBUG_TB_INVALIDATE
53 //#define DEBUG_FLUSH
54 //#define DEBUG_TLB
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
66 #undef DEBUG_TB_CHECK
67 #endif
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #if defined(TARGET_SPARC64)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 41
73 #elif defined(TARGET_SPARC)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 36
75 #elif defined(TARGET_ALPHA)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #define TARGET_VIRT_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_PPC64)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 42
82 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
83 #define TARGET_PHYS_ADDR_SPACE_BITS 36
84 #elif defined(TARGET_IA64)
85 #define TARGET_PHYS_ADDR_SPACE_BITS 36
86 #else
87 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
88 #define TARGET_PHYS_ADDR_SPACE_BITS 32
89 #endif
91 static TranslationBlock *tbs;
92 int code_gen_max_blocks;
93 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
94 static int nb_tbs;
95 /* any access to the tbs or the page table must use this lock */
96 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
98 #if defined(__arm__) || defined(__sparc_v9__)
99 /* The prologue must be reachable with a direct jump. ARM and Sparc64
100 have limited branch ranges (possibly also PPC) so place it in a
101 section close to code segment. */
102 #define code_gen_section \
103 __attribute__((__section__(".gen_code"))) \
104 __attribute__((aligned (32)))
105 #elif defined(_WIN32)
106 /* Maximum alignment for Win32 is 16. */
107 #define code_gen_section \
108 __attribute__((aligned (16)))
109 #else
110 #define code_gen_section \
111 __attribute__((aligned (32)))
112 #endif
114 uint8_t code_gen_prologue[1024] code_gen_section;
115 static uint8_t *code_gen_buffer;
116 static unsigned long code_gen_buffer_size;
117 /* threshold to flush the translated code buffer */
118 static unsigned long code_gen_buffer_max_size;
119 uint8_t *code_gen_ptr;
121 #if !defined(CONFIG_USER_ONLY)
122 int phys_ram_fd;
123 uint8_t *phys_ram_dirty;
124 uint8_t *bios_mem;
125 static int in_migration;
127 typedef struct RAMBlock {
128 uint8_t *host;
129 ram_addr_t offset;
130 ram_addr_t length;
131 struct RAMBlock *next;
132 } RAMBlock;
134 static RAMBlock *ram_blocks;
135 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
136 then we can no longer assume contiguous ram offsets, and external uses
137 of this variable will break. */
138 ram_addr_t last_ram_offset;
139 #endif
141 CPUState *first_cpu;
142 /* current CPU in the current thread. It is only valid inside
143 cpu_exec() */
144 CPUState *cpu_single_env;
145 /* 0 = Do not count executed instructions.
146 1 = Precise instruction counting.
147 2 = Adaptive rate instruction counting. */
148 int use_icount = 0;
149 /* Current instruction counter. While executing translated code this may
150 include some instructions that have not yet been executed. */
151 int64_t qemu_icount;
153 typedef struct PageDesc {
154 /* list of TBs intersecting this ram page */
155 TranslationBlock *first_tb;
156 /* in order to optimize self modifying code, we count the number
157 of lookups we do to a given page to use a bitmap */
158 unsigned int code_write_count;
159 uint8_t *code_bitmap;
160 #if defined(CONFIG_USER_ONLY)
161 unsigned long flags;
162 #endif
163 } PageDesc;
165 typedef struct PhysPageDesc {
166 /* offset in host memory of the page + io_index in the low bits */
167 ram_addr_t phys_offset;
168 ram_addr_t region_offset;
169 } PhysPageDesc;
171 #define L2_BITS 10
172 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
173 /* XXX: this is a temporary hack for alpha target.
174 * In the future, this is to be replaced by a multi-level table
175 * to actually be able to handle the complete 64 bits address space.
177 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
178 #else
179 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
180 #endif
182 #define L1_SIZE (1 << L1_BITS)
183 #define L2_SIZE (1 << L2_BITS)
185 unsigned long qemu_real_host_page_size;
186 unsigned long qemu_host_page_bits;
187 unsigned long qemu_host_page_size;
188 unsigned long qemu_host_page_mask;
190 /* XXX: for system emulation, it could just be an array */
191 static PageDesc *l1_map[L1_SIZE];
192 static PhysPageDesc **l1_phys_map;
194 #if !defined(CONFIG_USER_ONLY)
195 static void io_mem_init(void);
197 /* io memory support */
198 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
199 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
200 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
201 static char io_mem_used[IO_MEM_NB_ENTRIES];
202 static int io_mem_watch;
203 #endif
205 /* log support */
206 static const char *logfilename = "/tmp/qemu.log";
207 FILE *logfile;
208 int loglevel;
209 static int log_append = 0;
211 /* statistics */
212 static int tlb_flush_count;
213 static int tb_flush_count;
214 static int tb_phys_invalidate_count;
216 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
217 typedef struct subpage_t {
218 target_phys_addr_t base;
219 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
220 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
221 void *opaque[TARGET_PAGE_SIZE][2][4];
222 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
223 } subpage_t;
225 #ifdef _WIN32
226 static void map_exec(void *addr, long size)
228 DWORD old_protect;
229 VirtualProtect(addr, size,
230 PAGE_EXECUTE_READWRITE, &old_protect);
233 #else
234 static void map_exec(void *addr, long size)
236 unsigned long start, end, page_size;
238 page_size = getpagesize();
239 start = (unsigned long)addr;
240 start &= ~(page_size - 1);
242 end = (unsigned long)addr + size;
243 end += page_size - 1;
244 end &= ~(page_size - 1);
246 mprotect((void *)start, end - start,
247 PROT_READ | PROT_WRITE | PROT_EXEC);
249 #endif
251 static void page_init(void)
253 /* NOTE: we can always suppose that qemu_host_page_size >=
254 TARGET_PAGE_SIZE */
255 #ifdef _WIN32
257 SYSTEM_INFO system_info;
259 GetSystemInfo(&system_info);
260 qemu_real_host_page_size = system_info.dwPageSize;
262 #else
263 qemu_real_host_page_size = getpagesize();
264 #endif
265 if (qemu_host_page_size == 0)
266 qemu_host_page_size = qemu_real_host_page_size;
267 if (qemu_host_page_size < TARGET_PAGE_SIZE)
268 qemu_host_page_size = TARGET_PAGE_SIZE;
269 qemu_host_page_bits = 0;
270 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
271 qemu_host_page_bits++;
272 qemu_host_page_mask = ~(qemu_host_page_size - 1);
273 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
274 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
276 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
278 long long startaddr, endaddr;
279 FILE *f;
280 int n;
282 mmap_lock();
283 last_brk = (unsigned long)sbrk(0);
284 f = fopen("/proc/self/maps", "r");
285 if (f) {
286 do {
287 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
288 if (n == 2) {
289 startaddr = MIN(startaddr,
290 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
291 endaddr = MIN(endaddr,
292 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
293 page_set_flags(startaddr & TARGET_PAGE_MASK,
294 TARGET_PAGE_ALIGN(endaddr),
295 PAGE_RESERVED);
297 } while (!feof(f));
298 fclose(f);
300 mmap_unlock();
302 #endif
305 static inline PageDesc **page_l1_map(target_ulong index)
307 #if TARGET_LONG_BITS > 32
308 /* Host memory outside guest VM. For 32-bit targets we have already
309 excluded high addresses. */
310 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
311 return NULL;
312 #endif
313 return &l1_map[index >> L2_BITS];
316 static inline PageDesc *page_find_alloc(target_ulong index)
318 PageDesc **lp, *p;
319 lp = page_l1_map(index);
320 if (!lp)
321 return NULL;
323 p = *lp;
324 if (!p) {
325 /* allocate if not found */
326 #if defined(CONFIG_USER_ONLY)
327 size_t len = sizeof(PageDesc) * L2_SIZE;
328 /* Don't use qemu_malloc because it may recurse. */
329 p = mmap(0, len, PROT_READ | PROT_WRITE,
330 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
331 *lp = p;
332 if (h2g_valid(p)) {
333 unsigned long addr = h2g(p);
334 page_set_flags(addr & TARGET_PAGE_MASK,
335 TARGET_PAGE_ALIGN(addr + len),
336 PAGE_RESERVED);
338 #else
339 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
340 *lp = p;
341 #endif
343 return p + (index & (L2_SIZE - 1));
346 static inline PageDesc *page_find(target_ulong index)
348 PageDesc **lp, *p;
349 lp = page_l1_map(index);
350 if (!lp)
351 return NULL;
353 p = *lp;
354 if (!p)
355 return 0;
356 return p + (index & (L2_SIZE - 1));
359 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
361 void **lp, **p;
362 PhysPageDesc *pd;
364 p = (void **)l1_phys_map;
365 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
367 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
368 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
369 #endif
370 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
371 p = *lp;
372 if (!p) {
373 /* allocate if not found */
374 if (!alloc)
375 return NULL;
376 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
377 memset(p, 0, sizeof(void *) * L1_SIZE);
378 *lp = p;
380 #endif
381 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
382 pd = *lp;
383 if (!pd) {
384 int i;
385 /* allocate if not found */
386 if (!alloc)
387 return NULL;
388 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
389 *lp = pd;
390 for (i = 0; i < L2_SIZE; i++) {
391 pd[i].phys_offset = IO_MEM_UNASSIGNED;
392 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
395 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
398 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
400 return phys_page_find_alloc(index, 0);
403 #if !defined(CONFIG_USER_ONLY)
404 static void tlb_protect_code(ram_addr_t ram_addr);
405 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
406 target_ulong vaddr);
407 #define mmap_lock() do { } while(0)
408 #define mmap_unlock() do { } while(0)
409 #endif
411 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
413 #if defined(CONFIG_USER_ONLY)
414 /* Currently it is not recommended to allocate big chunks of data in
415 user mode. It will change when a dedicated libc will be used */
416 #define USE_STATIC_CODE_GEN_BUFFER
417 #endif
419 #ifdef USE_STATIC_CODE_GEN_BUFFER
420 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
421 #endif
423 static void code_gen_alloc(unsigned long tb_size)
425 if (kvm_enabled())
426 return;
428 #ifdef USE_STATIC_CODE_GEN_BUFFER
429 code_gen_buffer = static_code_gen_buffer;
430 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
431 map_exec(code_gen_buffer, code_gen_buffer_size);
432 #else
433 code_gen_buffer_size = tb_size;
434 if (code_gen_buffer_size == 0) {
435 #if defined(CONFIG_USER_ONLY)
436 /* in user mode, phys_ram_size is not meaningful */
437 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
438 #else
439 /* XXX: needs adjustments */
440 code_gen_buffer_size = (unsigned long)(ram_size / 4);
441 #endif
443 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
444 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
445 /* The code gen buffer location may have constraints depending on
446 the host cpu and OS */
447 #if defined(__linux__)
449 int flags;
450 void *start = NULL;
452 flags = MAP_PRIVATE | MAP_ANONYMOUS;
453 #if defined(__x86_64__)
454 flags |= MAP_32BIT;
455 /* Cannot map more than that */
456 if (code_gen_buffer_size > (800 * 1024 * 1024))
457 code_gen_buffer_size = (800 * 1024 * 1024);
458 #elif defined(__sparc_v9__)
459 // Map the buffer below 2G, so we can use direct calls and branches
460 flags |= MAP_FIXED;
461 start = (void *) 0x60000000UL;
462 if (code_gen_buffer_size > (512 * 1024 * 1024))
463 code_gen_buffer_size = (512 * 1024 * 1024);
464 #elif defined(__arm__)
465 /* Map the buffer below 32M, so we can use direct calls and branches */
466 flags |= MAP_FIXED;
467 start = (void *) 0x01000000UL;
468 if (code_gen_buffer_size > 16 * 1024 * 1024)
469 code_gen_buffer_size = 16 * 1024 * 1024;
470 #endif
471 code_gen_buffer = mmap(start, code_gen_buffer_size,
472 PROT_WRITE | PROT_READ | PROT_EXEC,
473 flags, -1, 0);
474 if (code_gen_buffer == MAP_FAILED) {
475 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
476 exit(1);
479 #elif defined(__FreeBSD__) || defined(__DragonFly__)
481 int flags;
482 void *addr = NULL;
483 flags = MAP_PRIVATE | MAP_ANONYMOUS;
484 #if defined(__x86_64__)
485 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
486 * 0x40000000 is free */
487 flags |= MAP_FIXED;
488 addr = (void *)0x40000000;
489 /* Cannot map more than that */
490 if (code_gen_buffer_size > (800 * 1024 * 1024))
491 code_gen_buffer_size = (800 * 1024 * 1024);
492 #endif
493 code_gen_buffer = mmap(addr, code_gen_buffer_size,
494 PROT_WRITE | PROT_READ | PROT_EXEC,
495 flags, -1, 0);
496 if (code_gen_buffer == MAP_FAILED) {
497 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
498 exit(1);
501 #else
502 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
503 map_exec(code_gen_buffer, code_gen_buffer_size);
504 #endif
505 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
506 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
507 code_gen_buffer_max_size = code_gen_buffer_size -
508 code_gen_max_block_size();
509 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
510 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
513 /* Must be called before using the QEMU cpus. 'tb_size' is the size
514 (in bytes) allocated to the translation buffer. Zero means default
515 size. */
516 void cpu_exec_init_all(unsigned long tb_size)
518 cpu_gen_init();
519 code_gen_alloc(tb_size);
520 code_gen_ptr = code_gen_buffer;
521 page_init();
522 #if !defined(CONFIG_USER_ONLY)
523 io_mem_init();
524 #endif
527 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
529 #define CPU_COMMON_SAVE_VERSION 1
531 static void cpu_common_save(QEMUFile *f, void *opaque)
533 CPUState *env = opaque;
535 cpu_synchronize_state(env, 0);
537 qemu_put_be32s(f, &env->halted);
538 qemu_put_be32s(f, &env->interrupt_request);
541 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
543 CPUState *env = opaque;
545 if (version_id != CPU_COMMON_SAVE_VERSION)
546 return -EINVAL;
548 qemu_get_be32s(f, &env->halted);
549 qemu_get_be32s(f, &env->interrupt_request);
550 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
551 version_id is increased. */
552 env->interrupt_request &= ~0x01;
553 tlb_flush(env, 1);
554 cpu_synchronize_state(env, 1);
556 return 0;
558 #endif
560 CPUState *qemu_get_cpu(int cpu)
562 CPUState *env = first_cpu;
564 while (env) {
565 if (env->cpu_index == cpu)
566 break;
567 env = env->next_cpu;
570 return env;
573 void cpu_exec_init(CPUState *env)
575 CPUState **penv;
576 int cpu_index;
578 #if defined(CONFIG_USER_ONLY)
579 cpu_list_lock();
580 #endif
581 env->next_cpu = NULL;
582 penv = &first_cpu;
583 cpu_index = 0;
584 while (*penv != NULL) {
585 penv = &(*penv)->next_cpu;
586 cpu_index++;
588 env->cpu_index = cpu_index;
589 env->numa_node = 0;
590 TAILQ_INIT(&env->breakpoints);
591 TAILQ_INIT(&env->watchpoints);
592 #ifdef __WIN32
593 env->thread_id = GetCurrentProcessId();
594 #else
595 env->thread_id = getpid();
596 #endif
597 *penv = env;
598 #if defined(CONFIG_USER_ONLY)
599 cpu_list_unlock();
600 #endif
601 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
602 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
603 cpu_common_save, cpu_common_load, env);
604 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
605 cpu_save, cpu_load, env);
606 #endif
609 static inline void invalidate_page_bitmap(PageDesc *p)
611 if (p->code_bitmap) {
612 qemu_free(p->code_bitmap);
613 p->code_bitmap = NULL;
615 p->code_write_count = 0;
618 /* set to NULL all the 'first_tb' fields in all PageDescs */
619 static void page_flush_tb(void)
621 int i, j;
622 PageDesc *p;
624 for(i = 0; i < L1_SIZE; i++) {
625 p = l1_map[i];
626 if (p) {
627 for(j = 0; j < L2_SIZE; j++) {
628 p->first_tb = NULL;
629 invalidate_page_bitmap(p);
630 p++;
636 /* flush all the translation blocks */
637 /* XXX: tb_flush is currently not thread safe */
638 void tb_flush(CPUState *env1)
640 CPUState *env;
641 #if defined(DEBUG_FLUSH)
642 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
643 (unsigned long)(code_gen_ptr - code_gen_buffer),
644 nb_tbs, nb_tbs > 0 ?
645 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
646 #endif
647 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
648 cpu_abort(env1, "Internal error: code buffer overflow\n");
650 nb_tbs = 0;
652 for(env = first_cpu; env != NULL; env = env->next_cpu) {
653 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
656 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
657 page_flush_tb();
659 code_gen_ptr = code_gen_buffer;
660 /* XXX: flush processor icache at this point if cache flush is
661 expensive */
662 tb_flush_count++;
665 #ifdef DEBUG_TB_CHECK
667 static void tb_invalidate_check(target_ulong address)
669 TranslationBlock *tb;
670 int i;
671 address &= TARGET_PAGE_MASK;
672 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
673 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
674 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
675 address >= tb->pc + tb->size)) {
676 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
677 address, (long)tb->pc, tb->size);
683 /* verify that all the pages have correct rights for code */
684 static void tb_page_check(void)
686 TranslationBlock *tb;
687 int i, flags1, flags2;
689 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
690 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
691 flags1 = page_get_flags(tb->pc);
692 flags2 = page_get_flags(tb->pc + tb->size - 1);
693 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
694 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
695 (long)tb->pc, tb->size, flags1, flags2);
701 static void tb_jmp_check(TranslationBlock *tb)
703 TranslationBlock *tb1;
704 unsigned int n1;
706 /* suppress any remaining jumps to this TB */
707 tb1 = tb->jmp_first;
708 for(;;) {
709 n1 = (long)tb1 & 3;
710 tb1 = (TranslationBlock *)((long)tb1 & ~3);
711 if (n1 == 2)
712 break;
713 tb1 = tb1->jmp_next[n1];
715 /* check end of list */
716 if (tb1 != tb) {
717 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
721 #endif
723 /* invalidate one TB */
724 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
725 int next_offset)
727 TranslationBlock *tb1;
728 for(;;) {
729 tb1 = *ptb;
730 if (tb1 == tb) {
731 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
732 break;
734 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
738 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
740 TranslationBlock *tb1;
741 unsigned int n1;
743 for(;;) {
744 tb1 = *ptb;
745 n1 = (long)tb1 & 3;
746 tb1 = (TranslationBlock *)((long)tb1 & ~3);
747 if (tb1 == tb) {
748 *ptb = tb1->page_next[n1];
749 break;
751 ptb = &tb1->page_next[n1];
755 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
757 TranslationBlock *tb1, **ptb;
758 unsigned int n1;
760 ptb = &tb->jmp_next[n];
761 tb1 = *ptb;
762 if (tb1) {
763 /* find tb(n) in circular list */
764 for(;;) {
765 tb1 = *ptb;
766 n1 = (long)tb1 & 3;
767 tb1 = (TranslationBlock *)((long)tb1 & ~3);
768 if (n1 == n && tb1 == tb)
769 break;
770 if (n1 == 2) {
771 ptb = &tb1->jmp_first;
772 } else {
773 ptb = &tb1->jmp_next[n1];
776 /* now we can suppress tb(n) from the list */
777 *ptb = tb->jmp_next[n];
779 tb->jmp_next[n] = NULL;
783 /* reset the jump entry 'n' of a TB so that it is not chained to
784 another TB */
785 static inline void tb_reset_jump(TranslationBlock *tb, int n)
787 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
790 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
792 CPUState *env;
793 PageDesc *p;
794 unsigned int h, n1;
795 target_phys_addr_t phys_pc;
796 TranslationBlock *tb1, *tb2;
798 /* remove the TB from the hash list */
799 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
800 h = tb_phys_hash_func(phys_pc);
801 tb_remove(&tb_phys_hash[h], tb,
802 offsetof(TranslationBlock, phys_hash_next));
804 /* remove the TB from the page list */
805 if (tb->page_addr[0] != page_addr) {
806 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
807 tb_page_remove(&p->first_tb, tb);
808 invalidate_page_bitmap(p);
810 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
811 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
812 tb_page_remove(&p->first_tb, tb);
813 invalidate_page_bitmap(p);
816 tb_invalidated_flag = 1;
818 /* remove the TB from the hash list */
819 h = tb_jmp_cache_hash_func(tb->pc);
820 for(env = first_cpu; env != NULL; env = env->next_cpu) {
821 if (env->tb_jmp_cache[h] == tb)
822 env->tb_jmp_cache[h] = NULL;
825 /* suppress this TB from the two jump lists */
826 tb_jmp_remove(tb, 0);
827 tb_jmp_remove(tb, 1);
829 /* suppress any remaining jumps to this TB */
830 tb1 = tb->jmp_first;
831 for(;;) {
832 n1 = (long)tb1 & 3;
833 if (n1 == 2)
834 break;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 tb2 = tb1->jmp_next[n1];
837 tb_reset_jump(tb1, n1);
838 tb1->jmp_next[n1] = NULL;
839 tb1 = tb2;
841 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
843 tb_phys_invalidate_count++;
846 static inline void set_bits(uint8_t *tab, int start, int len)
848 int end, mask, end1;
850 end = start + len;
851 tab += start >> 3;
852 mask = 0xff << (start & 7);
853 if ((start & ~7) == (end & ~7)) {
854 if (start < end) {
855 mask &= ~(0xff << (end & 7));
856 *tab |= mask;
858 } else {
859 *tab++ |= mask;
860 start = (start + 8) & ~7;
861 end1 = end & ~7;
862 while (start < end1) {
863 *tab++ = 0xff;
864 start += 8;
866 if (start < end) {
867 mask = ~(0xff << (end & 7));
868 *tab |= mask;
873 static void build_page_bitmap(PageDesc *p)
875 int n, tb_start, tb_end;
876 TranslationBlock *tb;
878 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
880 tb = p->first_tb;
881 while (tb != NULL) {
882 n = (long)tb & 3;
883 tb = (TranslationBlock *)((long)tb & ~3);
884 /* NOTE: this is subtle as a TB may span two physical pages */
885 if (n == 0) {
886 /* NOTE: tb_end may be after the end of the page, but
887 it is not a problem */
888 tb_start = tb->pc & ~TARGET_PAGE_MASK;
889 tb_end = tb_start + tb->size;
890 if (tb_end > TARGET_PAGE_SIZE)
891 tb_end = TARGET_PAGE_SIZE;
892 } else {
893 tb_start = 0;
894 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
896 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
897 tb = tb->page_next[n];
901 TranslationBlock *tb_gen_code(CPUState *env,
902 target_ulong pc, target_ulong cs_base,
903 int flags, int cflags)
905 TranslationBlock *tb;
906 uint8_t *tc_ptr;
907 target_ulong phys_pc, phys_page2, virt_page2;
908 int code_gen_size;
910 phys_pc = get_phys_addr_code(env, pc);
911 tb = tb_alloc(pc);
912 if (!tb) {
913 /* flush must be done */
914 tb_flush(env);
915 /* cannot fail at this point */
916 tb = tb_alloc(pc);
917 /* Don't forget to invalidate previous TB info. */
918 tb_invalidated_flag = 1;
920 tc_ptr = code_gen_ptr;
921 tb->tc_ptr = tc_ptr;
922 tb->cs_base = cs_base;
923 tb->flags = flags;
924 tb->cflags = cflags;
925 cpu_gen_code(env, tb, &code_gen_size);
926 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
928 /* check next page if needed */
929 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
930 phys_page2 = -1;
931 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
932 phys_page2 = get_phys_addr_code(env, virt_page2);
934 tb_link_phys(tb, phys_pc, phys_page2);
935 return tb;
938 /* invalidate all TBs which intersect with the target physical page
939 starting in range [start;end[. NOTE: start and end must refer to
940 the same physical page. 'is_cpu_write_access' should be true if called
941 from a real cpu write access: the virtual CPU will exit the current
942 TB if code is modified inside this TB. */
943 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
944 int is_cpu_write_access)
946 TranslationBlock *tb, *tb_next, *saved_tb;
947 CPUState *env = cpu_single_env;
948 target_ulong tb_start, tb_end;
949 PageDesc *p;
950 int n;
951 #ifdef TARGET_HAS_PRECISE_SMC
952 int current_tb_not_found = is_cpu_write_access;
953 TranslationBlock *current_tb = NULL;
954 int current_tb_modified = 0;
955 target_ulong current_pc = 0;
956 target_ulong current_cs_base = 0;
957 int current_flags = 0;
958 #endif /* TARGET_HAS_PRECISE_SMC */
960 p = page_find(start >> TARGET_PAGE_BITS);
961 if (!p)
962 return;
963 if (!p->code_bitmap &&
964 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
965 is_cpu_write_access) {
966 /* build code bitmap */
967 build_page_bitmap(p);
970 /* we remove all the TBs in the range [start, end[ */
971 /* XXX: see if in some cases it could be faster to invalidate all the code */
972 tb = p->first_tb;
973 while (tb != NULL) {
974 n = (long)tb & 3;
975 tb = (TranslationBlock *)((long)tb & ~3);
976 tb_next = tb->page_next[n];
977 /* NOTE: this is subtle as a TB may span two physical pages */
978 if (n == 0) {
979 /* NOTE: tb_end may be after the end of the page, but
980 it is not a problem */
981 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
982 tb_end = tb_start + tb->size;
983 } else {
984 tb_start = tb->page_addr[1];
985 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
987 if (!(tb_end <= start || tb_start >= end)) {
988 #ifdef TARGET_HAS_PRECISE_SMC
989 if (current_tb_not_found) {
990 current_tb_not_found = 0;
991 current_tb = NULL;
992 if (env->mem_io_pc) {
993 /* now we have a real cpu fault */
994 current_tb = tb_find_pc(env->mem_io_pc);
997 if (current_tb == tb &&
998 (current_tb->cflags & CF_COUNT_MASK) != 1) {
999 /* If we are modifying the current TB, we must stop
1000 its execution. We could be more precise by checking
1001 that the modification is after the current PC, but it
1002 would require a specialized function to partially
1003 restore the CPU state */
1005 current_tb_modified = 1;
1006 cpu_restore_state(current_tb, env,
1007 env->mem_io_pc, NULL);
1008 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1009 &current_flags);
1011 #endif /* TARGET_HAS_PRECISE_SMC */
1012 /* we need to do that to handle the case where a signal
1013 occurs while doing tb_phys_invalidate() */
1014 saved_tb = NULL;
1015 if (env) {
1016 saved_tb = env->current_tb;
1017 env->current_tb = NULL;
1019 tb_phys_invalidate(tb, -1);
1020 if (env) {
1021 env->current_tb = saved_tb;
1022 if (env->interrupt_request && env->current_tb)
1023 cpu_interrupt(env, env->interrupt_request);
1026 tb = tb_next;
1028 #if !defined(CONFIG_USER_ONLY)
1029 /* if no code remaining, no need to continue to use slow writes */
1030 if (!p->first_tb) {
1031 invalidate_page_bitmap(p);
1032 if (is_cpu_write_access) {
1033 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1036 #endif
1037 #ifdef TARGET_HAS_PRECISE_SMC
1038 if (current_tb_modified) {
1039 /* we generate a block containing just the instruction
1040 modifying the memory. It will ensure that it cannot modify
1041 itself */
1042 env->current_tb = NULL;
1043 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1044 cpu_resume_from_signal(env, NULL);
1046 #endif
1049 /* len must be <= 8 and start must be a multiple of len */
1050 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1052 PageDesc *p;
1053 int offset, b;
1054 #if 0
1055 if (1) {
1056 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1057 cpu_single_env->mem_io_vaddr, len,
1058 cpu_single_env->eip,
1059 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1061 #endif
1062 p = page_find(start >> TARGET_PAGE_BITS);
1063 if (!p)
1064 return;
1065 if (p->code_bitmap) {
1066 offset = start & ~TARGET_PAGE_MASK;
1067 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1068 if (b & ((1 << len) - 1))
1069 goto do_invalidate;
1070 } else {
1071 do_invalidate:
1072 tb_invalidate_phys_page_range(start, start + len, 1);
1076 #if !defined(CONFIG_SOFTMMU)
1077 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1078 unsigned long pc, void *puc)
1080 TranslationBlock *tb;
1081 PageDesc *p;
1082 int n;
1083 #ifdef TARGET_HAS_PRECISE_SMC
1084 TranslationBlock *current_tb = NULL;
1085 CPUState *env = cpu_single_env;
1086 int current_tb_modified = 0;
1087 target_ulong current_pc = 0;
1088 target_ulong current_cs_base = 0;
1089 int current_flags = 0;
1090 #endif
1092 addr &= TARGET_PAGE_MASK;
1093 p = page_find(addr >> TARGET_PAGE_BITS);
1094 if (!p)
1095 return;
1096 tb = p->first_tb;
1097 #ifdef TARGET_HAS_PRECISE_SMC
1098 if (tb && pc != 0) {
1099 current_tb = tb_find_pc(pc);
1101 #endif
1102 while (tb != NULL) {
1103 n = (long)tb & 3;
1104 tb = (TranslationBlock *)((long)tb & ~3);
1105 #ifdef TARGET_HAS_PRECISE_SMC
1106 if (current_tb == tb &&
1107 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1108 /* If we are modifying the current TB, we must stop
1109 its execution. We could be more precise by checking
1110 that the modification is after the current PC, but it
1111 would require a specialized function to partially
1112 restore the CPU state */
1114 current_tb_modified = 1;
1115 cpu_restore_state(current_tb, env, pc, puc);
1116 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1117 &current_flags);
1119 #endif /* TARGET_HAS_PRECISE_SMC */
1120 tb_phys_invalidate(tb, addr);
1121 tb = tb->page_next[n];
1123 p->first_tb = NULL;
1124 #ifdef TARGET_HAS_PRECISE_SMC
1125 if (current_tb_modified) {
1126 /* we generate a block containing just the instruction
1127 modifying the memory. It will ensure that it cannot modify
1128 itself */
1129 env->current_tb = NULL;
1130 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1131 cpu_resume_from_signal(env, puc);
1133 #endif
1135 #endif
1137 /* add the tb in the target page and protect it if necessary */
1138 static inline void tb_alloc_page(TranslationBlock *tb,
1139 unsigned int n, target_ulong page_addr)
1141 PageDesc *p;
1142 TranslationBlock *last_first_tb;
1144 tb->page_addr[n] = page_addr;
1145 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1146 tb->page_next[n] = p->first_tb;
1147 last_first_tb = p->first_tb;
1148 p->first_tb = (TranslationBlock *)((long)tb | n);
1149 invalidate_page_bitmap(p);
1151 #if defined(TARGET_HAS_SMC) || 1
1153 #if defined(CONFIG_USER_ONLY)
1154 if (p->flags & PAGE_WRITE) {
1155 target_ulong addr;
1156 PageDesc *p2;
1157 int prot;
1159 /* force the host page as non writable (writes will have a
1160 page fault + mprotect overhead) */
1161 page_addr &= qemu_host_page_mask;
1162 prot = 0;
1163 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1164 addr += TARGET_PAGE_SIZE) {
1166 p2 = page_find (addr >> TARGET_PAGE_BITS);
1167 if (!p2)
1168 continue;
1169 prot |= p2->flags;
1170 p2->flags &= ~PAGE_WRITE;
1171 page_get_flags(addr);
1173 mprotect(g2h(page_addr), qemu_host_page_size,
1174 (prot & PAGE_BITS) & ~PAGE_WRITE);
1175 #ifdef DEBUG_TB_INVALIDATE
1176 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1177 page_addr);
1178 #endif
1180 #else
1181 /* if some code is already present, then the pages are already
1182 protected. So we handle the case where only the first TB is
1183 allocated in a physical page */
1184 if (!last_first_tb) {
1185 tlb_protect_code(page_addr);
1187 #endif
1189 #endif /* TARGET_HAS_SMC */
1192 /* Allocate a new translation block. Flush the translation buffer if
1193 too many translation blocks or too much generated code. */
1194 TranslationBlock *tb_alloc(target_ulong pc)
1196 TranslationBlock *tb;
1198 if (nb_tbs >= code_gen_max_blocks ||
1199 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1200 return NULL;
1201 tb = &tbs[nb_tbs++];
1202 tb->pc = pc;
1203 tb->cflags = 0;
1204 return tb;
1207 void tb_free(TranslationBlock *tb)
1209 /* In practice this is mostly used for single use temporary TB
1210 Ignore the hard cases and just back up if this TB happens to
1211 be the last one generated. */
1212 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1213 code_gen_ptr = tb->tc_ptr;
1214 nb_tbs--;
1218 /* add a new TB and link it to the physical page tables. phys_page2 is
1219 (-1) to indicate that only one page contains the TB. */
1220 void tb_link_phys(TranslationBlock *tb,
1221 target_ulong phys_pc, target_ulong phys_page2)
1223 unsigned int h;
1224 TranslationBlock **ptb;
1226 /* Grab the mmap lock to stop another thread invalidating this TB
1227 before we are done. */
1228 mmap_lock();
1229 /* add in the physical hash table */
1230 h = tb_phys_hash_func(phys_pc);
1231 ptb = &tb_phys_hash[h];
1232 tb->phys_hash_next = *ptb;
1233 *ptb = tb;
1235 /* add in the page list */
1236 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1237 if (phys_page2 != -1)
1238 tb_alloc_page(tb, 1, phys_page2);
1239 else
1240 tb->page_addr[1] = -1;
1242 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1243 tb->jmp_next[0] = NULL;
1244 tb->jmp_next[1] = NULL;
1246 /* init original jump addresses */
1247 if (tb->tb_next_offset[0] != 0xffff)
1248 tb_reset_jump(tb, 0);
1249 if (tb->tb_next_offset[1] != 0xffff)
1250 tb_reset_jump(tb, 1);
1252 #ifdef DEBUG_TB_CHECK
1253 tb_page_check();
1254 #endif
1255 mmap_unlock();
1258 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1259 tb[1].tc_ptr. Return NULL if not found */
1260 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1262 int m_min, m_max, m;
1263 unsigned long v;
1264 TranslationBlock *tb;
1266 if (nb_tbs <= 0)
1267 return NULL;
1268 if (tc_ptr < (unsigned long)code_gen_buffer ||
1269 tc_ptr >= (unsigned long)code_gen_ptr)
1270 return NULL;
1271 /* binary search (cf Knuth) */
1272 m_min = 0;
1273 m_max = nb_tbs - 1;
1274 while (m_min <= m_max) {
1275 m = (m_min + m_max) >> 1;
1276 tb = &tbs[m];
1277 v = (unsigned long)tb->tc_ptr;
1278 if (v == tc_ptr)
1279 return tb;
1280 else if (tc_ptr < v) {
1281 m_max = m - 1;
1282 } else {
1283 m_min = m + 1;
1286 return &tbs[m_max];
1289 static void tb_reset_jump_recursive(TranslationBlock *tb);
1291 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1293 TranslationBlock *tb1, *tb_next, **ptb;
1294 unsigned int n1;
1296 tb1 = tb->jmp_next[n];
1297 if (tb1 != NULL) {
1298 /* find head of list */
1299 for(;;) {
1300 n1 = (long)tb1 & 3;
1301 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1302 if (n1 == 2)
1303 break;
1304 tb1 = tb1->jmp_next[n1];
1306 /* we are now sure now that tb jumps to tb1 */
1307 tb_next = tb1;
1309 /* remove tb from the jmp_first list */
1310 ptb = &tb_next->jmp_first;
1311 for(;;) {
1312 tb1 = *ptb;
1313 n1 = (long)tb1 & 3;
1314 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1315 if (n1 == n && tb1 == tb)
1316 break;
1317 ptb = &tb1->jmp_next[n1];
1319 *ptb = tb->jmp_next[n];
1320 tb->jmp_next[n] = NULL;
1322 /* suppress the jump to next tb in generated code */
1323 tb_reset_jump(tb, n);
1325 /* suppress jumps in the tb on which we could have jumped */
1326 tb_reset_jump_recursive(tb_next);
1330 static void tb_reset_jump_recursive(TranslationBlock *tb)
1332 tb_reset_jump_recursive2(tb, 0);
1333 tb_reset_jump_recursive2(tb, 1);
1336 #if defined(TARGET_HAS_ICE)
1337 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1339 target_phys_addr_t addr;
1340 target_ulong pd;
1341 ram_addr_t ram_addr;
1342 PhysPageDesc *p;
1344 addr = cpu_get_phys_page_debug(env, pc);
1345 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1346 if (!p) {
1347 pd = IO_MEM_UNASSIGNED;
1348 } else {
1349 pd = p->phys_offset;
1351 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1352 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1354 #endif
1356 /* Add a watchpoint. */
1357 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1358 int flags, CPUWatchpoint **watchpoint)
1360 target_ulong len_mask = ~(len - 1);
1361 CPUWatchpoint *wp;
1363 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1364 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1365 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1366 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1367 return -EINVAL;
1369 wp = qemu_malloc(sizeof(*wp));
1371 wp->vaddr = addr;
1372 wp->len_mask = len_mask;
1373 wp->flags = flags;
1375 /* keep all GDB-injected watchpoints in front */
1376 if (flags & BP_GDB)
1377 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1378 else
1379 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1381 tlb_flush_page(env, addr);
1383 if (watchpoint)
1384 *watchpoint = wp;
1385 return 0;
1388 /* Remove a specific watchpoint. */
1389 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1390 int flags)
1392 target_ulong len_mask = ~(len - 1);
1393 CPUWatchpoint *wp;
1395 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1396 if (addr == wp->vaddr && len_mask == wp->len_mask
1397 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1398 cpu_watchpoint_remove_by_ref(env, wp);
1399 return 0;
1402 return -ENOENT;
1405 /* Remove a specific watchpoint by reference. */
1406 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1408 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1410 tlb_flush_page(env, watchpoint->vaddr);
1412 qemu_free(watchpoint);
1415 /* Remove all matching watchpoints. */
1416 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1418 CPUWatchpoint *wp, *next;
1420 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1421 if (wp->flags & mask)
1422 cpu_watchpoint_remove_by_ref(env, wp);
1426 /* Add a breakpoint. */
1427 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1428 CPUBreakpoint **breakpoint)
1430 #if defined(TARGET_HAS_ICE)
1431 CPUBreakpoint *bp;
1433 bp = qemu_malloc(sizeof(*bp));
1435 bp->pc = pc;
1436 bp->flags = flags;
1438 /* keep all GDB-injected breakpoints in front */
1439 if (flags & BP_GDB)
1440 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1441 else
1442 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1444 breakpoint_invalidate(env, pc);
1446 if (breakpoint)
1447 *breakpoint = bp;
1448 return 0;
1449 #else
1450 return -ENOSYS;
1451 #endif
1454 /* Remove a specific breakpoint. */
1455 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1457 #if defined(TARGET_HAS_ICE)
1458 CPUBreakpoint *bp;
1460 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1461 if (bp->pc == pc && bp->flags == flags) {
1462 cpu_breakpoint_remove_by_ref(env, bp);
1463 return 0;
1466 return -ENOENT;
1467 #else
1468 return -ENOSYS;
1469 #endif
1472 /* Remove a specific breakpoint by reference. */
1473 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1475 #if defined(TARGET_HAS_ICE)
1476 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1478 breakpoint_invalidate(env, breakpoint->pc);
1480 qemu_free(breakpoint);
1481 #endif
1484 /* Remove all matching breakpoints. */
1485 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1487 #if defined(TARGET_HAS_ICE)
1488 CPUBreakpoint *bp, *next;
1490 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1491 if (bp->flags & mask)
1492 cpu_breakpoint_remove_by_ref(env, bp);
1494 #endif
1497 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1498 CPU loop after each instruction */
1499 void cpu_single_step(CPUState *env, int enabled)
1501 #if defined(TARGET_HAS_ICE)
1502 if (env->singlestep_enabled != enabled) {
1503 env->singlestep_enabled = enabled;
1504 if (kvm_enabled())
1505 kvm_update_guest_debug(env, 0);
1506 else {
1507 /* must flush all the translated code to avoid inconsistencies */
1508 /* XXX: only flush what is necessary */
1509 tb_flush(env);
1512 #endif
1515 /* enable or disable low levels log */
1516 void cpu_set_log(int log_flags)
1518 loglevel = log_flags;
1519 if (loglevel && !logfile) {
1520 logfile = fopen(logfilename, log_append ? "a" : "w");
1521 if (!logfile) {
1522 perror(logfilename);
1523 _exit(1);
1525 #if !defined(CONFIG_SOFTMMU)
1526 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1528 static char logfile_buf[4096];
1529 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1531 #else
1532 setvbuf(logfile, NULL, _IOLBF, 0);
1533 #endif
1534 log_append = 1;
1536 if (!loglevel && logfile) {
1537 fclose(logfile);
1538 logfile = NULL;
1542 void cpu_set_log_filename(const char *filename)
1544 logfilename = strdup(filename);
1545 if (logfile) {
1546 fclose(logfile);
1547 logfile = NULL;
1549 cpu_set_log(loglevel);
1552 static void cpu_unlink_tb(CPUState *env)
1554 #if defined(USE_NPTL)
1555 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1556 problem and hope the cpu will stop of its own accord. For userspace
1557 emulation this often isn't actually as bad as it sounds. Often
1558 signals are used primarily to interrupt blocking syscalls. */
1559 #else
1560 TranslationBlock *tb;
1561 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1563 tb = env->current_tb;
1564 /* if the cpu is currently executing code, we must unlink it and
1565 all the potentially executing TB */
1566 if (tb && !testandset(&interrupt_lock)) {
1567 env->current_tb = NULL;
1568 tb_reset_jump_recursive(tb);
1569 resetlock(&interrupt_lock);
1571 #endif
1574 /* mask must never be zero, except for A20 change call */
1575 void cpu_interrupt(CPUState *env, int mask)
1577 int old_mask;
1579 old_mask = env->interrupt_request;
1580 env->interrupt_request |= mask;
1581 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1582 kvm_update_interrupt_request(env);
1584 #ifndef CONFIG_USER_ONLY
1586 * If called from iothread context, wake the target cpu in
1587 * case its halted.
1589 if (!qemu_cpu_self(env)) {
1590 qemu_cpu_kick(env);
1591 return;
1593 #endif
1595 if (use_icount) {
1596 env->icount_decr.u16.high = 0xffff;
1597 #ifndef CONFIG_USER_ONLY
1598 if (!can_do_io(env)
1599 && (mask & ~old_mask) != 0) {
1600 cpu_abort(env, "Raised interrupt while not in I/O function");
1602 #endif
1603 } else {
1604 cpu_unlink_tb(env);
1608 void cpu_reset_interrupt(CPUState *env, int mask)
1610 env->interrupt_request &= ~mask;
1613 void cpu_exit(CPUState *env)
1615 env->exit_request = 1;
1616 cpu_unlink_tb(env);
1619 const CPULogItem cpu_log_items[] = {
1620 { CPU_LOG_TB_OUT_ASM, "out_asm",
1621 "show generated host assembly code for each compiled TB" },
1622 { CPU_LOG_TB_IN_ASM, "in_asm",
1623 "show target assembly code for each compiled TB" },
1624 { CPU_LOG_TB_OP, "op",
1625 "show micro ops for each compiled TB" },
1626 { CPU_LOG_TB_OP_OPT, "op_opt",
1627 "show micro ops "
1628 #ifdef TARGET_I386
1629 "before eflags optimization and "
1630 #endif
1631 "after liveness analysis" },
1632 { CPU_LOG_INT, "int",
1633 "show interrupts/exceptions in short format" },
1634 { CPU_LOG_EXEC, "exec",
1635 "show trace before each executed TB (lots of logs)" },
1636 { CPU_LOG_TB_CPU, "cpu",
1637 "show CPU state before block translation" },
1638 #ifdef TARGET_I386
1639 { CPU_LOG_PCALL, "pcall",
1640 "show protected mode far calls/returns/exceptions" },
1641 { CPU_LOG_RESET, "cpu_reset",
1642 "show CPU state before CPU resets" },
1643 #endif
1644 #ifdef DEBUG_IOPORT
1645 { CPU_LOG_IOPORT, "ioport",
1646 "show all i/o ports accesses" },
1647 #endif
1648 { 0, NULL, NULL },
1651 static int cmp1(const char *s1, int n, const char *s2)
1653 if (strlen(s2) != n)
1654 return 0;
1655 return memcmp(s1, s2, n) == 0;
1658 /* takes a comma separated list of log masks. Return 0 if error. */
1659 int cpu_str_to_log_mask(const char *str)
1661 const CPULogItem *item;
1662 int mask;
1663 const char *p, *p1;
1665 p = str;
1666 mask = 0;
1667 for(;;) {
1668 p1 = strchr(p, ',');
1669 if (!p1)
1670 p1 = p + strlen(p);
1671 if(cmp1(p,p1-p,"all")) {
1672 for(item = cpu_log_items; item->mask != 0; item++) {
1673 mask |= item->mask;
1675 } else {
1676 for(item = cpu_log_items; item->mask != 0; item++) {
1677 if (cmp1(p, p1 - p, item->name))
1678 goto found;
1680 return 0;
1682 found:
1683 mask |= item->mask;
1684 if (*p1 != ',')
1685 break;
1686 p = p1 + 1;
1688 return mask;
1691 void cpu_abort(CPUState *env, const char *fmt, ...)
1693 va_list ap;
1694 va_list ap2;
1696 va_start(ap, fmt);
1697 va_copy(ap2, ap);
1698 fprintf(stderr, "qemu: fatal: ");
1699 vfprintf(stderr, fmt, ap);
1700 fprintf(stderr, "\n");
1701 #ifdef TARGET_I386
1702 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1703 #else
1704 cpu_dump_state(env, stderr, fprintf, 0);
1705 #endif
1706 if (qemu_log_enabled()) {
1707 qemu_log("qemu: fatal: ");
1708 qemu_log_vprintf(fmt, ap2);
1709 qemu_log("\n");
1710 #ifdef TARGET_I386
1711 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1712 #else
1713 log_cpu_state(env, 0);
1714 #endif
1715 qemu_log_flush();
1716 qemu_log_close();
1718 va_end(ap2);
1719 va_end(ap);
1720 abort();
1723 CPUState *cpu_copy(CPUState *env)
1725 CPUState *new_env = cpu_init(env->cpu_model_str);
1726 CPUState *next_cpu = new_env->next_cpu;
1727 int cpu_index = new_env->cpu_index;
1728 #if defined(TARGET_HAS_ICE)
1729 CPUBreakpoint *bp;
1730 CPUWatchpoint *wp;
1731 #endif
1733 memcpy(new_env, env, sizeof(CPUState));
1735 /* Preserve chaining and index. */
1736 new_env->next_cpu = next_cpu;
1737 new_env->cpu_index = cpu_index;
1739 /* Clone all break/watchpoints.
1740 Note: Once we support ptrace with hw-debug register access, make sure
1741 BP_CPU break/watchpoints are handled correctly on clone. */
1742 TAILQ_INIT(&env->breakpoints);
1743 TAILQ_INIT(&env->watchpoints);
1744 #if defined(TARGET_HAS_ICE)
1745 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1746 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1748 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1749 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1750 wp->flags, NULL);
1752 #endif
1754 return new_env;
1757 #if !defined(CONFIG_USER_ONLY)
1759 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1761 unsigned int i;
1763 /* Discard jump cache entries for any tb which might potentially
1764 overlap the flushed page. */
1765 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1766 memset (&env->tb_jmp_cache[i], 0,
1767 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1769 i = tb_jmp_cache_hash_page(addr);
1770 memset (&env->tb_jmp_cache[i], 0,
1771 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1774 /* NOTE: if flush_global is true, also flush global entries (not
1775 implemented yet) */
1776 void tlb_flush(CPUState *env, int flush_global)
1778 int i;
1780 #if defined(DEBUG_TLB)
1781 printf("tlb_flush:\n");
1782 #endif
1783 /* must reset current TB so that interrupts cannot modify the
1784 links while we are modifying them */
1785 env->current_tb = NULL;
1787 for(i = 0; i < CPU_TLB_SIZE; i++) {
1788 int mmu_idx;
1789 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1790 env->tlb_table[mmu_idx][i].addr_read = -1;
1791 env->tlb_table[mmu_idx][i].addr_write = -1;
1792 env->tlb_table[mmu_idx][i].addr_code = -1;
1796 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1798 #ifdef CONFIG_KQEMU
1799 if (env->kqemu_enabled) {
1800 kqemu_flush(env, flush_global);
1802 #endif
1803 tlb_flush_count++;
1806 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1808 if (addr == (tlb_entry->addr_read &
1809 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1810 addr == (tlb_entry->addr_write &
1811 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1812 addr == (tlb_entry->addr_code &
1813 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1814 tlb_entry->addr_read = -1;
1815 tlb_entry->addr_write = -1;
1816 tlb_entry->addr_code = -1;
1820 void tlb_flush_page(CPUState *env, target_ulong addr)
1822 int i;
1823 int mmu_idx;
1825 #if defined(DEBUG_TLB)
1826 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1827 #endif
1828 /* must reset current TB so that interrupts cannot modify the
1829 links while we are modifying them */
1830 env->current_tb = NULL;
1832 addr &= TARGET_PAGE_MASK;
1833 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1834 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1835 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1837 tlb_flush_jmp_cache(env, addr);
1839 #ifdef CONFIG_KQEMU
1840 if (env->kqemu_enabled) {
1841 kqemu_flush_page(env, addr);
1843 #endif
1846 /* update the TLBs so that writes to code in the virtual page 'addr'
1847 can be detected */
1848 static void tlb_protect_code(ram_addr_t ram_addr)
1850 cpu_physical_memory_reset_dirty(ram_addr,
1851 ram_addr + TARGET_PAGE_SIZE,
1852 CODE_DIRTY_FLAG);
1855 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1856 tested for self modifying code */
1857 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1858 target_ulong vaddr)
1860 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1863 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1864 unsigned long start, unsigned long length)
1866 unsigned long addr;
1867 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1868 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1869 if ((addr - start) < length) {
1870 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1875 /* Note: start and end must be within the same ram block. */
1876 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1877 int dirty_flags)
1879 CPUState *env;
1880 unsigned long length, start1;
1881 int i, mask, len;
1882 uint8_t *p;
1884 start &= TARGET_PAGE_MASK;
1885 end = TARGET_PAGE_ALIGN(end);
1887 length = end - start;
1888 if (length == 0)
1889 return;
1890 len = length >> TARGET_PAGE_BITS;
1891 #ifdef CONFIG_KQEMU
1892 /* XXX: should not depend on cpu context */
1893 env = first_cpu;
1894 if (env->kqemu_enabled) {
1895 ram_addr_t addr;
1896 addr = start;
1897 for(i = 0; i < len; i++) {
1898 kqemu_set_notdirty(env, addr);
1899 addr += TARGET_PAGE_SIZE;
1902 #endif
1903 mask = ~dirty_flags;
1904 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1905 for(i = 0; i < len; i++)
1906 p[i] &= mask;
1908 /* we modify the TLB cache so that the dirty bit will be set again
1909 when accessing the range */
1910 start1 = (unsigned long)qemu_get_ram_ptr(start);
1911 /* Chek that we don't span multiple blocks - this breaks the
1912 address comparisons below. */
1913 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1914 != (end - 1) - start) {
1915 abort();
1918 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1919 int mmu_idx;
1920 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1921 for(i = 0; i < CPU_TLB_SIZE; i++)
1922 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1923 start1, length);
1928 int cpu_physical_memory_set_dirty_tracking(int enable)
1930 if (kvm_enabled()) {
1931 return kvm_set_migration_log(enable);
1933 return 0;
1936 int cpu_physical_memory_get_dirty_tracking(void)
1938 return in_migration;
1941 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1942 target_phys_addr_t end_addr)
1944 int ret = 0;
1946 if (kvm_enabled())
1947 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1948 return ret;
1951 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1953 ram_addr_t ram_addr;
1954 void *p;
1956 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1957 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1958 + tlb_entry->addend);
1959 ram_addr = qemu_ram_addr_from_host(p);
1960 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1961 tlb_entry->addr_write |= TLB_NOTDIRTY;
1966 /* update the TLB according to the current state of the dirty bits */
1967 void cpu_tlb_update_dirty(CPUState *env)
1969 int i;
1970 int mmu_idx;
1971 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1972 for(i = 0; i < CPU_TLB_SIZE; i++)
1973 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1977 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1979 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1980 tlb_entry->addr_write = vaddr;
1983 /* update the TLB corresponding to virtual page vaddr
1984 so that it is no longer dirty */
1985 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1987 int i;
1988 int mmu_idx;
1990 vaddr &= TARGET_PAGE_MASK;
1991 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1992 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1993 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1996 /* add a new TLB entry. At most one entry for a given virtual address
1997 is permitted. Return 0 if OK or 2 if the page could not be mapped
1998 (can only happen in non SOFTMMU mode for I/O pages or pages
1999 conflicting with the host address space). */
2000 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2001 target_phys_addr_t paddr, int prot,
2002 int mmu_idx, int is_softmmu)
2004 PhysPageDesc *p;
2005 unsigned long pd;
2006 unsigned int index;
2007 target_ulong address;
2008 target_ulong code_address;
2009 target_phys_addr_t addend;
2010 int ret;
2011 CPUTLBEntry *te;
2012 CPUWatchpoint *wp;
2013 target_phys_addr_t iotlb;
2015 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2016 if (!p) {
2017 pd = IO_MEM_UNASSIGNED;
2018 } else {
2019 pd = p->phys_offset;
2021 #if defined(DEBUG_TLB)
2022 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2023 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2024 #endif
2026 ret = 0;
2027 address = vaddr;
2028 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2029 /* IO memory case (romd handled later) */
2030 address |= TLB_MMIO;
2032 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2033 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2034 /* Normal RAM. */
2035 iotlb = pd & TARGET_PAGE_MASK;
2036 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2037 iotlb |= IO_MEM_NOTDIRTY;
2038 else
2039 iotlb |= IO_MEM_ROM;
2040 } else {
2041 /* IO handlers are currently passed a physical address.
2042 It would be nice to pass an offset from the base address
2043 of that region. This would avoid having to special case RAM,
2044 and avoid full address decoding in every device.
2045 We can't use the high bits of pd for this because
2046 IO_MEM_ROMD uses these as a ram address. */
2047 iotlb = (pd & ~TARGET_PAGE_MASK);
2048 if (p) {
2049 iotlb += p->region_offset;
2050 } else {
2051 iotlb += paddr;
2055 code_address = address;
2056 /* Make accesses to pages with watchpoints go via the
2057 watchpoint trap routines. */
2058 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2059 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2060 iotlb = io_mem_watch + paddr;
2061 /* TODO: The memory case can be optimized by not trapping
2062 reads of pages with a write breakpoint. */
2063 address |= TLB_MMIO;
2067 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2068 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2069 te = &env->tlb_table[mmu_idx][index];
2070 te->addend = addend - vaddr;
2071 if (prot & PAGE_READ) {
2072 te->addr_read = address;
2073 } else {
2074 te->addr_read = -1;
2077 if (prot & PAGE_EXEC) {
2078 te->addr_code = code_address;
2079 } else {
2080 te->addr_code = -1;
2082 if (prot & PAGE_WRITE) {
2083 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2084 (pd & IO_MEM_ROMD)) {
2085 /* Write access calls the I/O callback. */
2086 te->addr_write = address | TLB_MMIO;
2087 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2088 !cpu_physical_memory_is_dirty(pd)) {
2089 te->addr_write = address | TLB_NOTDIRTY;
2090 } else {
2091 te->addr_write = address;
2093 } else {
2094 te->addr_write = -1;
2096 return ret;
2099 #else
2101 void tlb_flush(CPUState *env, int flush_global)
2105 void tlb_flush_page(CPUState *env, target_ulong addr)
2109 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2110 target_phys_addr_t paddr, int prot,
2111 int mmu_idx, int is_softmmu)
2113 return 0;
2117 * Walks guest process memory "regions" one by one
2118 * and calls callback function 'fn' for each region.
2120 int walk_memory_regions(void *priv,
2121 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2123 unsigned long start, end;
2124 PageDesc *p = NULL;
2125 int i, j, prot, prot1;
2126 int rc = 0;
2128 start = end = -1;
2129 prot = 0;
2131 for (i = 0; i <= L1_SIZE; i++) {
2132 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2133 for (j = 0; j < L2_SIZE; j++) {
2134 prot1 = (p == NULL) ? 0 : p[j].flags;
2136 * "region" is one continuous chunk of memory
2137 * that has same protection flags set.
2139 if (prot1 != prot) {
2140 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2141 if (start != -1) {
2142 rc = (*fn)(priv, start, end, prot);
2143 /* callback can stop iteration by returning != 0 */
2144 if (rc != 0)
2145 return (rc);
2147 if (prot1 != 0)
2148 start = end;
2149 else
2150 start = -1;
2151 prot = prot1;
2153 if (p == NULL)
2154 break;
2157 return (rc);
2160 static int dump_region(void *priv, unsigned long start,
2161 unsigned long end, unsigned long prot)
2163 FILE *f = (FILE *)priv;
2165 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2166 start, end, end - start,
2167 ((prot & PAGE_READ) ? 'r' : '-'),
2168 ((prot & PAGE_WRITE) ? 'w' : '-'),
2169 ((prot & PAGE_EXEC) ? 'x' : '-'));
2171 return (0);
2174 /* dump memory mappings */
2175 void page_dump(FILE *f)
2177 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2178 "start", "end", "size", "prot");
2179 walk_memory_regions(f, dump_region);
2182 int page_get_flags(target_ulong address)
2184 PageDesc *p;
2186 p = page_find(address >> TARGET_PAGE_BITS);
2187 if (!p)
2188 return 0;
2189 return p->flags;
2192 /* modify the flags of a page and invalidate the code if
2193 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2194 depending on PAGE_WRITE */
2195 void page_set_flags(target_ulong start, target_ulong end, int flags)
2197 PageDesc *p;
2198 target_ulong addr;
2200 /* mmap_lock should already be held. */
2201 start = start & TARGET_PAGE_MASK;
2202 end = TARGET_PAGE_ALIGN(end);
2203 if (flags & PAGE_WRITE)
2204 flags |= PAGE_WRITE_ORG;
2205 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2206 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2207 /* We may be called for host regions that are outside guest
2208 address space. */
2209 if (!p)
2210 return;
2211 /* if the write protection is set, then we invalidate the code
2212 inside */
2213 if (!(p->flags & PAGE_WRITE) &&
2214 (flags & PAGE_WRITE) &&
2215 p->first_tb) {
2216 tb_invalidate_phys_page(addr, 0, NULL);
2218 p->flags = flags;
2222 int page_check_range(target_ulong start, target_ulong len, int flags)
2224 PageDesc *p;
2225 target_ulong end;
2226 target_ulong addr;
2228 if (start + len < start)
2229 /* we've wrapped around */
2230 return -1;
2232 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2233 start = start & TARGET_PAGE_MASK;
2235 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2236 p = page_find(addr >> TARGET_PAGE_BITS);
2237 if( !p )
2238 return -1;
2239 if( !(p->flags & PAGE_VALID) )
2240 return -1;
2242 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2243 return -1;
2244 if (flags & PAGE_WRITE) {
2245 if (!(p->flags & PAGE_WRITE_ORG))
2246 return -1;
2247 /* unprotect the page if it was put read-only because it
2248 contains translated code */
2249 if (!(p->flags & PAGE_WRITE)) {
2250 if (!page_unprotect(addr, 0, NULL))
2251 return -1;
2253 return 0;
2256 return 0;
2259 /* called from signal handler: invalidate the code and unprotect the
2260 page. Return TRUE if the fault was successfully handled. */
2261 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2263 unsigned int page_index, prot, pindex;
2264 PageDesc *p, *p1;
2265 target_ulong host_start, host_end, addr;
2267 /* Technically this isn't safe inside a signal handler. However we
2268 know this only ever happens in a synchronous SEGV handler, so in
2269 practice it seems to be ok. */
2270 mmap_lock();
2272 host_start = address & qemu_host_page_mask;
2273 page_index = host_start >> TARGET_PAGE_BITS;
2274 p1 = page_find(page_index);
2275 if (!p1) {
2276 mmap_unlock();
2277 return 0;
2279 host_end = host_start + qemu_host_page_size;
2280 p = p1;
2281 prot = 0;
2282 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2283 prot |= p->flags;
2284 p++;
2286 /* if the page was really writable, then we change its
2287 protection back to writable */
2288 if (prot & PAGE_WRITE_ORG) {
2289 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2290 if (!(p1[pindex].flags & PAGE_WRITE)) {
2291 mprotect((void *)g2h(host_start), qemu_host_page_size,
2292 (prot & PAGE_BITS) | PAGE_WRITE);
2293 p1[pindex].flags |= PAGE_WRITE;
2294 /* and since the content will be modified, we must invalidate
2295 the corresponding translated code. */
2296 tb_invalidate_phys_page(address, pc, puc);
2297 #ifdef DEBUG_TB_CHECK
2298 tb_invalidate_check(address);
2299 #endif
2300 mmap_unlock();
2301 return 1;
2304 mmap_unlock();
2305 return 0;
2308 static inline void tlb_set_dirty(CPUState *env,
2309 unsigned long addr, target_ulong vaddr)
2312 #endif /* defined(CONFIG_USER_ONLY) */
2314 #if !defined(CONFIG_USER_ONLY)
2316 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2317 ram_addr_t memory, ram_addr_t region_offset);
2318 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2319 ram_addr_t orig_memory, ram_addr_t region_offset);
2320 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2321 need_subpage) \
2322 do { \
2323 if (addr > start_addr) \
2324 start_addr2 = 0; \
2325 else { \
2326 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2327 if (start_addr2 > 0) \
2328 need_subpage = 1; \
2331 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2332 end_addr2 = TARGET_PAGE_SIZE - 1; \
2333 else { \
2334 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2335 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2336 need_subpage = 1; \
2338 } while (0)
2340 /* register physical memory. 'size' must be a multiple of the target
2341 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2342 io memory page. The address used when calling the IO function is
2343 the offset from the start of the region, plus region_offset. Both
2344 start_addr and region_offset are rounded down to a page boundary
2345 before calculating this offset. This should not be a problem unless
2346 the low bits of start_addr and region_offset differ. */
2347 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2348 ram_addr_t size,
2349 ram_addr_t phys_offset,
2350 ram_addr_t region_offset)
2352 target_phys_addr_t addr, end_addr;
2353 PhysPageDesc *p;
2354 CPUState *env;
2355 ram_addr_t orig_size = size;
2356 void *subpage;
2358 #ifdef CONFIG_KQEMU
2359 /* XXX: should not depend on cpu context */
2360 env = first_cpu;
2361 if (env->kqemu_enabled) {
2362 kqemu_set_phys_mem(start_addr, size, phys_offset);
2364 #endif
2365 if (kvm_enabled())
2366 kvm_set_phys_mem(start_addr, size, phys_offset);
2368 if (phys_offset == IO_MEM_UNASSIGNED) {
2369 region_offset = start_addr;
2371 region_offset &= TARGET_PAGE_MASK;
2372 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2373 end_addr = start_addr + (target_phys_addr_t)size;
2374 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2375 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2376 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2377 ram_addr_t orig_memory = p->phys_offset;
2378 target_phys_addr_t start_addr2, end_addr2;
2379 int need_subpage = 0;
2381 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2382 need_subpage);
2383 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2384 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2385 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2386 &p->phys_offset, orig_memory,
2387 p->region_offset);
2388 } else {
2389 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2390 >> IO_MEM_SHIFT];
2392 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2393 region_offset);
2394 p->region_offset = 0;
2395 } else {
2396 p->phys_offset = phys_offset;
2397 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2398 (phys_offset & IO_MEM_ROMD))
2399 phys_offset += TARGET_PAGE_SIZE;
2401 } else {
2402 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2403 p->phys_offset = phys_offset;
2404 p->region_offset = region_offset;
2405 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2406 (phys_offset & IO_MEM_ROMD)) {
2407 phys_offset += TARGET_PAGE_SIZE;
2408 } else {
2409 target_phys_addr_t start_addr2, end_addr2;
2410 int need_subpage = 0;
2412 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2413 end_addr2, need_subpage);
2415 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2416 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2417 &p->phys_offset, IO_MEM_UNASSIGNED,
2418 addr & TARGET_PAGE_MASK);
2419 subpage_register(subpage, start_addr2, end_addr2,
2420 phys_offset, region_offset);
2421 p->region_offset = 0;
2425 region_offset += TARGET_PAGE_SIZE;
2428 /* since each CPU stores ram addresses in its TLB cache, we must
2429 reset the modified entries */
2430 /* XXX: slow ! */
2431 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2432 tlb_flush(env, 1);
2436 /* XXX: temporary until new memory mapping API */
2437 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2439 PhysPageDesc *p;
2441 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2442 if (!p)
2443 return IO_MEM_UNASSIGNED;
2444 return p->phys_offset;
2447 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2449 if (kvm_enabled())
2450 kvm_coalesce_mmio_region(addr, size);
2453 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2455 if (kvm_enabled())
2456 kvm_uncoalesce_mmio_region(addr, size);
2459 #ifdef CONFIG_KQEMU
2460 /* XXX: better than nothing */
2461 static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2463 ram_addr_t addr;
2464 if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2465 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2466 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2467 abort();
2469 addr = last_ram_offset;
2470 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2471 return addr;
2473 #endif
2475 #ifdef __linux__
2477 #include <sys/vfs.h>
2479 #define HUGETLBFS_MAGIC 0x958458f6
2481 static long gethugepagesize(const char *path)
2483 struct statfs fs;
2484 int ret;
2486 do {
2487 ret = statfs(path, &fs);
2488 } while (ret != 0 && errno == EINTR);
2490 if (ret != 0) {
2491 perror("statfs");
2492 return 0;
2495 if (fs.f_type != HUGETLBFS_MAGIC)
2496 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2498 return fs.f_bsize;
2501 static void *file_ram_alloc(ram_addr_t memory, const char *path)
2503 char *filename;
2504 void *area;
2505 int fd;
2506 #ifdef MAP_POPULATE
2507 int flags;
2508 #endif
2509 unsigned long hpagesize;
2510 extern int mem_prealloc;
2512 if (!path) {
2513 return NULL;
2516 hpagesize = gethugepagesize(path);
2517 if (!hpagesize) {
2518 return NULL;
2521 if (memory < hpagesize) {
2522 return NULL;
2525 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2526 fprintf(stderr, "host lacks mmu notifiers, disabling --mem-path\n");
2527 return NULL;
2530 if (asprintf(&filename, "%s/kvm.XXXXXX", path) == -1) {
2531 return NULL;
2534 fd = mkstemp(filename);
2535 if (fd < 0) {
2536 perror("mkstemp");
2537 free(filename);
2538 return NULL;
2540 unlink(filename);
2541 free(filename);
2543 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2546 * ftruncate is not supported by hugetlbfs in older
2547 * hosts, so don't bother checking for errors.
2548 * If anything goes wrong with it under other filesystems,
2549 * mmap will fail.
2551 ftruncate(fd, memory);
2553 #ifdef MAP_POPULATE
2554 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2555 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2556 * to sidestep this quirk.
2558 flags = mem_prealloc ? MAP_POPULATE|MAP_SHARED : MAP_PRIVATE;
2559 area = mmap(0, memory, PROT_READ|PROT_WRITE, flags, fd, 0);
2560 #else
2561 area = mmap(0, memory, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
2562 #endif
2563 if (area == MAP_FAILED) {
2564 perror("alloc_mem_area: can't mmap hugetlbfs pages");
2565 close(fd);
2566 return (NULL);
2568 return area;
2571 #else
2573 static void *file_ram_alloc(ram_addr_t memory, const char *path)
2575 return NULL;
2578 #endif
2580 extern const char *mem_path;
2582 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2584 RAMBlock *new_block;
2586 #ifdef CONFIG_KQEMU
2587 if (kqemu_phys_ram_base) {
2588 return kqemu_ram_alloc(size);
2590 #endif
2592 size = TARGET_PAGE_ALIGN(size);
2593 new_block = qemu_malloc(sizeof(*new_block));
2595 new_block->host = file_ram_alloc(size, mem_path);
2596 if (!new_block->host) {
2597 new_block->host = qemu_vmalloc(size);
2599 new_block->offset = last_ram_offset;
2600 new_block->length = size;
2602 new_block->next = ram_blocks;
2603 ram_blocks = new_block;
2605 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2606 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2607 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2608 0xff, size >> TARGET_PAGE_BITS);
2610 last_ram_offset += size;
2612 if (kvm_enabled())
2613 kvm_setup_guest_memory(new_block->host, size);
2615 return new_block->offset;
2618 void qemu_ram_free(ram_addr_t addr)
2620 /* TODO: implement this. */
2623 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2624 With the exception of the softmmu code in this file, this should
2625 only be used for local memory (e.g. video ram) that the device owns,
2626 and knows it isn't going to access beyond the end of the block.
2628 It should not be used for general purpose DMA.
2629 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2631 void *qemu_get_ram_ptr(ram_addr_t addr)
2633 RAMBlock *prev;
2634 RAMBlock **prevp;
2635 RAMBlock *block;
2637 #ifdef CONFIG_KQEMU
2638 if (kqemu_phys_ram_base) {
2639 return kqemu_phys_ram_base + addr;
2641 #endif
2643 prev = NULL;
2644 prevp = &ram_blocks;
2645 block = ram_blocks;
2646 while (block && (block->offset > addr
2647 || block->offset + block->length <= addr)) {
2648 if (prev)
2649 prevp = &prev->next;
2650 prev = block;
2651 block = block->next;
2653 if (!block) {
2654 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2655 abort();
2657 /* Move this entry to to start of the list. */
2658 if (prev) {
2659 prev->next = block->next;
2660 block->next = *prevp;
2661 *prevp = block;
2663 return block->host + (addr - block->offset);
2666 /* Some of the softmmu routines need to translate from a host pointer
2667 (typically a TLB entry) back to a ram offset. */
2668 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2670 RAMBlock *prev;
2671 RAMBlock **prevp;
2672 RAMBlock *block;
2673 uint8_t *host = ptr;
2675 #ifdef CONFIG_KQEMU
2676 if (kqemu_phys_ram_base) {
2677 return host - kqemu_phys_ram_base;
2679 #endif
2681 prev = NULL;
2682 prevp = &ram_blocks;
2683 block = ram_blocks;
2684 while (block && (block->host > host
2685 || block->host + block->length <= host)) {
2686 if (prev)
2687 prevp = &prev->next;
2688 prev = block;
2689 block = block->next;
2691 if (!block) {
2692 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2693 abort();
2695 return block->offset + (host - block->host);
2698 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2700 #ifdef DEBUG_UNASSIGNED
2701 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2702 #endif
2703 #if defined(TARGET_SPARC)
2704 do_unassigned_access(addr, 0, 0, 0, 1);
2705 #endif
2706 return 0;
2709 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2711 #ifdef DEBUG_UNASSIGNED
2712 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2713 #endif
2714 #if defined(TARGET_SPARC)
2715 do_unassigned_access(addr, 0, 0, 0, 2);
2716 #endif
2717 return 0;
2720 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2722 #ifdef DEBUG_UNASSIGNED
2723 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2724 #endif
2725 #if defined(TARGET_SPARC)
2726 do_unassigned_access(addr, 0, 0, 0, 4);
2727 #endif
2728 return 0;
2731 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2733 #ifdef DEBUG_UNASSIGNED
2734 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2735 #endif
2736 #if defined(TARGET_SPARC)
2737 do_unassigned_access(addr, 1, 0, 0, 1);
2738 #endif
2741 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2743 #ifdef DEBUG_UNASSIGNED
2744 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2745 #endif
2746 #if defined(TARGET_SPARC)
2747 do_unassigned_access(addr, 1, 0, 0, 2);
2748 #endif
2751 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2753 #ifdef DEBUG_UNASSIGNED
2754 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2755 #endif
2756 #if defined(TARGET_SPARC)
2757 do_unassigned_access(addr, 1, 0, 0, 4);
2758 #endif
2761 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2762 unassigned_mem_readb,
2763 unassigned_mem_readw,
2764 unassigned_mem_readl,
2767 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2768 unassigned_mem_writeb,
2769 unassigned_mem_writew,
2770 unassigned_mem_writel,
2773 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2774 uint32_t val)
2776 int dirty_flags;
2777 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2778 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2779 #if !defined(CONFIG_USER_ONLY)
2780 tb_invalidate_phys_page_fast(ram_addr, 1);
2781 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2782 #endif
2784 stb_p(qemu_get_ram_ptr(ram_addr), val);
2785 #ifdef CONFIG_KQEMU
2786 if (cpu_single_env->kqemu_enabled &&
2787 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2788 kqemu_modify_page(cpu_single_env, ram_addr);
2789 #endif
2790 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2791 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2792 /* we remove the notdirty callback only if the code has been
2793 flushed */
2794 if (dirty_flags == 0xff)
2795 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2798 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2799 uint32_t val)
2801 int dirty_flags;
2802 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2803 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2804 #if !defined(CONFIG_USER_ONLY)
2805 tb_invalidate_phys_page_fast(ram_addr, 2);
2806 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2807 #endif
2809 stw_p(qemu_get_ram_ptr(ram_addr), val);
2810 #ifdef CONFIG_KQEMU
2811 if (cpu_single_env->kqemu_enabled &&
2812 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2813 kqemu_modify_page(cpu_single_env, ram_addr);
2814 #endif
2815 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2816 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2817 /* we remove the notdirty callback only if the code has been
2818 flushed */
2819 if (dirty_flags == 0xff)
2820 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2823 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2824 uint32_t val)
2826 int dirty_flags;
2827 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2828 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2829 #if !defined(CONFIG_USER_ONLY)
2830 tb_invalidate_phys_page_fast(ram_addr, 4);
2831 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2832 #endif
2834 stl_p(qemu_get_ram_ptr(ram_addr), val);
2835 #ifdef CONFIG_KQEMU
2836 if (cpu_single_env->kqemu_enabled &&
2837 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2838 kqemu_modify_page(cpu_single_env, ram_addr);
2839 #endif
2840 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2841 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2842 /* we remove the notdirty callback only if the code has been
2843 flushed */
2844 if (dirty_flags == 0xff)
2845 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2848 static CPUReadMemoryFunc *error_mem_read[3] = {
2849 NULL, /* never used */
2850 NULL, /* never used */
2851 NULL, /* never used */
2854 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2855 notdirty_mem_writeb,
2856 notdirty_mem_writew,
2857 notdirty_mem_writel,
2860 /* Generate a debug exception if a watchpoint has been hit. */
2861 static void check_watchpoint(int offset, int len_mask, int flags)
2863 CPUState *env = cpu_single_env;
2864 target_ulong pc, cs_base;
2865 TranslationBlock *tb;
2866 target_ulong vaddr;
2867 CPUWatchpoint *wp;
2868 int cpu_flags;
2870 if (env->watchpoint_hit) {
2871 /* We re-entered the check after replacing the TB. Now raise
2872 * the debug interrupt so that is will trigger after the
2873 * current instruction. */
2874 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2875 return;
2877 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2878 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2879 if ((vaddr == (wp->vaddr & len_mask) ||
2880 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2881 wp->flags |= BP_WATCHPOINT_HIT;
2882 if (!env->watchpoint_hit) {
2883 env->watchpoint_hit = wp;
2884 tb = tb_find_pc(env->mem_io_pc);
2885 if (!tb) {
2886 cpu_abort(env, "check_watchpoint: could not find TB for "
2887 "pc=%p", (void *)env->mem_io_pc);
2889 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2890 tb_phys_invalidate(tb, -1);
2891 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2892 env->exception_index = EXCP_DEBUG;
2893 } else {
2894 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2895 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2897 cpu_resume_from_signal(env, NULL);
2899 } else {
2900 wp->flags &= ~BP_WATCHPOINT_HIT;
2905 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2906 so these check for a hit then pass through to the normal out-of-line
2907 phys routines. */
2908 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2910 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2911 return ldub_phys(addr);
2914 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2916 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2917 return lduw_phys(addr);
2920 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2922 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2923 return ldl_phys(addr);
2926 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2927 uint32_t val)
2929 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2930 stb_phys(addr, val);
2933 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2934 uint32_t val)
2936 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2937 stw_phys(addr, val);
2940 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2941 uint32_t val)
2943 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2944 stl_phys(addr, val);
2947 static CPUReadMemoryFunc *watch_mem_read[3] = {
2948 watch_mem_readb,
2949 watch_mem_readw,
2950 watch_mem_readl,
2953 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2954 watch_mem_writeb,
2955 watch_mem_writew,
2956 watch_mem_writel,
2959 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2960 unsigned int len)
2962 uint32_t ret;
2963 unsigned int idx;
2965 idx = SUBPAGE_IDX(addr);
2966 #if defined(DEBUG_SUBPAGE)
2967 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2968 mmio, len, addr, idx);
2969 #endif
2970 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2971 addr + mmio->region_offset[idx][0][len]);
2973 return ret;
2976 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2977 uint32_t value, unsigned int len)
2979 unsigned int idx;
2981 idx = SUBPAGE_IDX(addr);
2982 #if defined(DEBUG_SUBPAGE)
2983 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2984 mmio, len, addr, idx, value);
2985 #endif
2986 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2987 addr + mmio->region_offset[idx][1][len],
2988 value);
2991 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2993 #if defined(DEBUG_SUBPAGE)
2994 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2995 #endif
2997 return subpage_readlen(opaque, addr, 0);
3000 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3001 uint32_t value)
3003 #if defined(DEBUG_SUBPAGE)
3004 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3005 #endif
3006 subpage_writelen(opaque, addr, value, 0);
3009 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3011 #if defined(DEBUG_SUBPAGE)
3012 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3013 #endif
3015 return subpage_readlen(opaque, addr, 1);
3018 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3019 uint32_t value)
3021 #if defined(DEBUG_SUBPAGE)
3022 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3023 #endif
3024 subpage_writelen(opaque, addr, value, 1);
3027 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3029 #if defined(DEBUG_SUBPAGE)
3030 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3031 #endif
3033 return subpage_readlen(opaque, addr, 2);
3036 static void subpage_writel (void *opaque,
3037 target_phys_addr_t addr, uint32_t value)
3039 #if defined(DEBUG_SUBPAGE)
3040 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3041 #endif
3042 subpage_writelen(opaque, addr, value, 2);
3045 static CPUReadMemoryFunc *subpage_read[] = {
3046 &subpage_readb,
3047 &subpage_readw,
3048 &subpage_readl,
3051 static CPUWriteMemoryFunc *subpage_write[] = {
3052 &subpage_writeb,
3053 &subpage_writew,
3054 &subpage_writel,
3057 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3058 ram_addr_t memory, ram_addr_t region_offset)
3060 int idx, eidx;
3061 unsigned int i;
3063 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3064 return -1;
3065 idx = SUBPAGE_IDX(start);
3066 eidx = SUBPAGE_IDX(end);
3067 #if defined(DEBUG_SUBPAGE)
3068 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3069 mmio, start, end, idx, eidx, memory);
3070 #endif
3071 memory >>= IO_MEM_SHIFT;
3072 for (; idx <= eidx; idx++) {
3073 for (i = 0; i < 4; i++) {
3074 if (io_mem_read[memory][i]) {
3075 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3076 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3077 mmio->region_offset[idx][0][i] = region_offset;
3079 if (io_mem_write[memory][i]) {
3080 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3081 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3082 mmio->region_offset[idx][1][i] = region_offset;
3087 return 0;
3090 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3091 ram_addr_t orig_memory, ram_addr_t region_offset)
3093 subpage_t *mmio;
3094 int subpage_memory;
3096 mmio = qemu_mallocz(sizeof(subpage_t));
3098 mmio->base = base;
3099 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3100 #if defined(DEBUG_SUBPAGE)
3101 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3102 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3103 #endif
3104 *phys = subpage_memory | IO_MEM_SUBPAGE;
3105 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3106 region_offset);
3108 return mmio;
3111 static int get_free_io_mem_idx(void)
3113 int i;
3115 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3116 if (!io_mem_used[i]) {
3117 io_mem_used[i] = 1;
3118 return i;
3121 return -1;
3124 /* mem_read and mem_write are arrays of functions containing the
3125 function to access byte (index 0), word (index 1) and dword (index
3126 2). Functions can be omitted with a NULL function pointer.
3127 If io_index is non zero, the corresponding io zone is
3128 modified. If it is zero, a new io zone is allocated. The return
3129 value can be used with cpu_register_physical_memory(). (-1) is
3130 returned if error. */
3131 static int cpu_register_io_memory_fixed(int io_index,
3132 CPUReadMemoryFunc **mem_read,
3133 CPUWriteMemoryFunc **mem_write,
3134 void *opaque)
3136 int i, subwidth = 0;
3138 if (io_index <= 0) {
3139 io_index = get_free_io_mem_idx();
3140 if (io_index == -1)
3141 return io_index;
3142 } else {
3143 io_index >>= IO_MEM_SHIFT;
3144 if (io_index >= IO_MEM_NB_ENTRIES)
3145 return -1;
3148 for(i = 0;i < 3; i++) {
3149 if (!mem_read[i] || !mem_write[i])
3150 subwidth = IO_MEM_SUBWIDTH;
3151 io_mem_read[io_index][i] = mem_read[i];
3152 io_mem_write[io_index][i] = mem_write[i];
3154 io_mem_opaque[io_index] = opaque;
3155 return (io_index << IO_MEM_SHIFT) | subwidth;
3158 int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
3159 CPUWriteMemoryFunc **mem_write,
3160 void *opaque)
3162 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3165 void cpu_unregister_io_memory(int io_table_address)
3167 int i;
3168 int io_index = io_table_address >> IO_MEM_SHIFT;
3170 for (i=0;i < 3; i++) {
3171 io_mem_read[io_index][i] = unassigned_mem_read[i];
3172 io_mem_write[io_index][i] = unassigned_mem_write[i];
3174 io_mem_opaque[io_index] = NULL;
3175 io_mem_used[io_index] = 0;
3178 static void io_mem_init(void)
3180 int i;
3182 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3183 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3184 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3185 for (i=0; i<5; i++)
3186 io_mem_used[i] = 1;
3188 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3189 watch_mem_write, NULL);
3190 #ifdef CONFIG_KQEMU
3191 if (kqemu_phys_ram_base) {
3192 /* alloc dirty bits array */
3193 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3194 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3196 #endif
3199 #endif /* !defined(CONFIG_USER_ONLY) */
3201 /* physical memory access (slow version, mainly for debug) */
3202 #if defined(CONFIG_USER_ONLY)
3203 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3204 int len, int is_write)
3206 int l, flags;
3207 target_ulong page;
3208 void * p;
3210 while (len > 0) {
3211 page = addr & TARGET_PAGE_MASK;
3212 l = (page + TARGET_PAGE_SIZE) - addr;
3213 if (l > len)
3214 l = len;
3215 flags = page_get_flags(page);
3216 if (!(flags & PAGE_VALID))
3217 return;
3218 if (is_write) {
3219 if (!(flags & PAGE_WRITE))
3220 return;
3221 /* XXX: this code should not depend on lock_user */
3222 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3223 /* FIXME - should this return an error rather than just fail? */
3224 return;
3225 memcpy(p, buf, l);
3226 unlock_user(p, addr, l);
3227 } else {
3228 if (!(flags & PAGE_READ))
3229 return;
3230 /* XXX: this code should not depend on lock_user */
3231 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3232 /* FIXME - should this return an error rather than just fail? */
3233 return;
3234 memcpy(buf, p, l);
3235 unlock_user(p, addr, 0);
3237 len -= l;
3238 buf += l;
3239 addr += l;
3243 #else
3244 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3245 int len, int is_write)
3247 int l, io_index;
3248 uint8_t *ptr;
3249 uint32_t val;
3250 target_phys_addr_t page;
3251 unsigned long pd;
3252 PhysPageDesc *p;
3254 while (len > 0) {
3255 page = addr & TARGET_PAGE_MASK;
3256 l = (page + TARGET_PAGE_SIZE) - addr;
3257 if (l > len)
3258 l = len;
3259 p = phys_page_find(page >> TARGET_PAGE_BITS);
3260 if (!p) {
3261 pd = IO_MEM_UNASSIGNED;
3262 } else {
3263 pd = p->phys_offset;
3266 if (is_write) {
3267 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3268 target_phys_addr_t addr1 = addr;
3269 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3270 if (p)
3271 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3272 /* XXX: could force cpu_single_env to NULL to avoid
3273 potential bugs */
3274 if (l >= 4 && ((addr1 & 3) == 0)) {
3275 /* 32 bit write access */
3276 val = ldl_p(buf);
3277 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3278 l = 4;
3279 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3280 /* 16 bit write access */
3281 val = lduw_p(buf);
3282 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3283 l = 2;
3284 } else {
3285 /* 8 bit write access */
3286 val = ldub_p(buf);
3287 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3288 l = 1;
3290 } else {
3291 unsigned long addr1;
3292 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3293 /* RAM case */
3294 ptr = qemu_get_ram_ptr(addr1);
3295 memcpy(ptr, buf, l);
3296 if (!cpu_physical_memory_is_dirty(addr1)) {
3297 /* invalidate code */
3298 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3299 /* set dirty bit */
3300 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3301 (0xff & ~CODE_DIRTY_FLAG);
3303 /* qemu doesn't execute guest code directly, but kvm does
3304 therefore flush instruction caches */
3305 if (kvm_enabled())
3306 flush_icache_range((unsigned long)ptr,
3307 ((unsigned long)ptr)+l);
3309 } else {
3310 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3311 !(pd & IO_MEM_ROMD)) {
3312 target_phys_addr_t addr1 = addr;
3313 /* I/O case */
3314 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3315 if (p)
3316 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3317 if (l >= 4 && ((addr1 & 3) == 0)) {
3318 /* 32 bit read access */
3319 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3320 stl_p(buf, val);
3321 l = 4;
3322 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3323 /* 16 bit read access */
3324 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3325 stw_p(buf, val);
3326 l = 2;
3327 } else {
3328 /* 8 bit read access */
3329 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3330 stb_p(buf, val);
3331 l = 1;
3333 } else {
3334 /* RAM case */
3335 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3336 (addr & ~TARGET_PAGE_MASK);
3337 memcpy(buf, ptr, l);
3340 len -= l;
3341 buf += l;
3342 addr += l;
3346 /* used for ROM loading : can write in RAM and ROM */
3347 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3348 const uint8_t *buf, int len)
3350 int l;
3351 uint8_t *ptr;
3352 target_phys_addr_t page;
3353 unsigned long pd;
3354 PhysPageDesc *p;
3356 while (len > 0) {
3357 page = addr & TARGET_PAGE_MASK;
3358 l = (page + TARGET_PAGE_SIZE) - addr;
3359 if (l > len)
3360 l = len;
3361 p = phys_page_find(page >> TARGET_PAGE_BITS);
3362 if (!p) {
3363 pd = IO_MEM_UNASSIGNED;
3364 } else {
3365 pd = p->phys_offset;
3368 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3369 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3370 !(pd & IO_MEM_ROMD)) {
3371 /* do nothing */
3372 } else {
3373 unsigned long addr1;
3374 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3375 /* ROM/RAM case */
3376 ptr = qemu_get_ram_ptr(addr1);
3377 memcpy(ptr, buf, l);
3379 len -= l;
3380 buf += l;
3381 addr += l;
3385 typedef struct {
3386 void *buffer;
3387 target_phys_addr_t addr;
3388 target_phys_addr_t len;
3389 } BounceBuffer;
3391 static BounceBuffer bounce;
3393 typedef struct MapClient {
3394 void *opaque;
3395 void (*callback)(void *opaque);
3396 LIST_ENTRY(MapClient) link;
3397 } MapClient;
3399 static LIST_HEAD(map_client_list, MapClient) map_client_list
3400 = LIST_HEAD_INITIALIZER(map_client_list);
3402 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3404 MapClient *client = qemu_malloc(sizeof(*client));
3406 client->opaque = opaque;
3407 client->callback = callback;
3408 LIST_INSERT_HEAD(&map_client_list, client, link);
3409 return client;
3412 void cpu_unregister_map_client(void *_client)
3414 MapClient *client = (MapClient *)_client;
3416 LIST_REMOVE(client, link);
3417 qemu_free(client);
3420 static void cpu_notify_map_clients(void)
3422 MapClient *client;
3424 while (!LIST_EMPTY(&map_client_list)) {
3425 client = LIST_FIRST(&map_client_list);
3426 client->callback(client->opaque);
3427 cpu_unregister_map_client(client);
3431 /* Map a physical memory region into a host virtual address.
3432 * May map a subset of the requested range, given by and returned in *plen.
3433 * May return NULL if resources needed to perform the mapping are exhausted.
3434 * Use only for reads OR writes - not for read-modify-write operations.
3435 * Use cpu_register_map_client() to know when retrying the map operation is
3436 * likely to succeed.
3438 void *cpu_physical_memory_map(target_phys_addr_t addr,
3439 target_phys_addr_t *plen,
3440 int is_write)
3442 target_phys_addr_t len = *plen;
3443 target_phys_addr_t done = 0;
3444 int l;
3445 uint8_t *ret = NULL;
3446 uint8_t *ptr;
3447 target_phys_addr_t page;
3448 unsigned long pd;
3449 PhysPageDesc *p;
3450 unsigned long addr1;
3452 while (len > 0) {
3453 page = addr & TARGET_PAGE_MASK;
3454 l = (page + TARGET_PAGE_SIZE) - addr;
3455 if (l > len)
3456 l = len;
3457 p = phys_page_find(page >> TARGET_PAGE_BITS);
3458 if (!p) {
3459 pd = IO_MEM_UNASSIGNED;
3460 } else {
3461 pd = p->phys_offset;
3464 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3465 if (done || bounce.buffer) {
3466 break;
3468 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3469 bounce.addr = addr;
3470 bounce.len = l;
3471 if (!is_write) {
3472 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3474 ptr = bounce.buffer;
3475 } else {
3476 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3477 ptr = qemu_get_ram_ptr(addr1);
3479 if (!done) {
3480 ret = ptr;
3481 } else if (ret + done != ptr) {
3482 break;
3485 len -= l;
3486 addr += l;
3487 done += l;
3489 *plen = done;
3490 return ret;
3493 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3494 * Will also mark the memory as dirty if is_write == 1. access_len gives
3495 * the amount of memory that was actually read or written by the caller.
3497 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3498 int is_write, target_phys_addr_t access_len)
3500 unsigned long flush_len = (unsigned long)access_len;
3502 if (buffer != bounce.buffer) {
3503 if (is_write) {
3504 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3505 while (access_len) {
3506 unsigned l;
3507 l = TARGET_PAGE_SIZE;
3508 if (l > access_len)
3509 l = access_len;
3510 if (!cpu_physical_memory_is_dirty(addr1)) {
3511 /* invalidate code */
3512 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3513 /* set dirty bit */
3514 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3515 (0xff & ~CODE_DIRTY_FLAG);
3517 addr1 += l;
3518 access_len -= l;
3520 dma_flush_range((unsigned long)buffer,
3521 (unsigned long)buffer + flush_len);
3523 return;
3525 if (is_write) {
3526 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3528 qemu_free(bounce.buffer);
3529 bounce.buffer = NULL;
3530 cpu_notify_map_clients();
3533 /* warning: addr must be aligned */
3534 uint32_t ldl_phys(target_phys_addr_t addr)
3536 int io_index;
3537 uint8_t *ptr;
3538 uint32_t val;
3539 unsigned long pd;
3540 PhysPageDesc *p;
3542 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3543 if (!p) {
3544 pd = IO_MEM_UNASSIGNED;
3545 } else {
3546 pd = p->phys_offset;
3549 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3550 !(pd & IO_MEM_ROMD)) {
3551 /* I/O case */
3552 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3553 if (p)
3554 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3555 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3556 } else {
3557 /* RAM case */
3558 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3559 (addr & ~TARGET_PAGE_MASK);
3560 val = ldl_p(ptr);
3562 return val;
3565 /* warning: addr must be aligned */
3566 uint64_t ldq_phys(target_phys_addr_t addr)
3568 int io_index;
3569 uint8_t *ptr;
3570 uint64_t val;
3571 unsigned long pd;
3572 PhysPageDesc *p;
3574 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3575 if (!p) {
3576 pd = IO_MEM_UNASSIGNED;
3577 } else {
3578 pd = p->phys_offset;
3581 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3582 !(pd & IO_MEM_ROMD)) {
3583 /* I/O case */
3584 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3585 if (p)
3586 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3587 #ifdef TARGET_WORDS_BIGENDIAN
3588 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3589 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3590 #else
3591 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3592 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3593 #endif
3594 } else {
3595 /* RAM case */
3596 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3597 (addr & ~TARGET_PAGE_MASK);
3598 val = ldq_p(ptr);
3600 return val;
3603 /* XXX: optimize */
3604 uint32_t ldub_phys(target_phys_addr_t addr)
3606 uint8_t val;
3607 cpu_physical_memory_read(addr, &val, 1);
3608 return val;
3611 /* XXX: optimize */
3612 uint32_t lduw_phys(target_phys_addr_t addr)
3614 uint16_t val;
3615 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3616 return tswap16(val);
3619 /* warning: addr must be aligned. The ram page is not masked as dirty
3620 and the code inside is not invalidated. It is useful if the dirty
3621 bits are used to track modified PTEs */
3622 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3624 int io_index;
3625 uint8_t *ptr;
3626 unsigned long pd;
3627 PhysPageDesc *p;
3629 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3630 if (!p) {
3631 pd = IO_MEM_UNASSIGNED;
3632 } else {
3633 pd = p->phys_offset;
3636 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3637 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3638 if (p)
3639 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3640 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3641 } else {
3642 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3643 ptr = qemu_get_ram_ptr(addr1);
3644 stl_p(ptr, val);
3646 if (unlikely(in_migration)) {
3647 if (!cpu_physical_memory_is_dirty(addr1)) {
3648 /* invalidate code */
3649 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3650 /* set dirty bit */
3651 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3652 (0xff & ~CODE_DIRTY_FLAG);
3658 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3660 int io_index;
3661 uint8_t *ptr;
3662 unsigned long pd;
3663 PhysPageDesc *p;
3665 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3666 if (!p) {
3667 pd = IO_MEM_UNASSIGNED;
3668 } else {
3669 pd = p->phys_offset;
3672 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3673 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3674 if (p)
3675 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3676 #ifdef TARGET_WORDS_BIGENDIAN
3677 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3678 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3679 #else
3680 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3681 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3682 #endif
3683 } else {
3684 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3685 (addr & ~TARGET_PAGE_MASK);
3686 stq_p(ptr, val);
3690 /* warning: addr must be aligned */
3691 void stl_phys(target_phys_addr_t addr, uint32_t val)
3693 int io_index;
3694 uint8_t *ptr;
3695 unsigned long pd;
3696 PhysPageDesc *p;
3698 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3699 if (!p) {
3700 pd = IO_MEM_UNASSIGNED;
3701 } else {
3702 pd = p->phys_offset;
3705 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3706 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3707 if (p)
3708 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3709 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3710 } else {
3711 unsigned long addr1;
3712 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3713 /* RAM case */
3714 ptr = qemu_get_ram_ptr(addr1);
3715 stl_p(ptr, val);
3716 if (!cpu_physical_memory_is_dirty(addr1)) {
3717 /* invalidate code */
3718 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3719 /* set dirty bit */
3720 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3721 (0xff & ~CODE_DIRTY_FLAG);
3726 /* XXX: optimize */
3727 void stb_phys(target_phys_addr_t addr, uint32_t val)
3729 uint8_t v = val;
3730 cpu_physical_memory_write(addr, &v, 1);
3733 /* XXX: optimize */
3734 void stw_phys(target_phys_addr_t addr, uint32_t val)
3736 uint16_t v = tswap16(val);
3737 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3740 /* XXX: optimize */
3741 void stq_phys(target_phys_addr_t addr, uint64_t val)
3743 val = tswap64(val);
3744 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3747 #endif
3749 /* virtual memory access for debug (includes writing to ROM) */
3750 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3751 uint8_t *buf, int len, int is_write)
3753 int l;
3754 target_phys_addr_t phys_addr;
3755 target_ulong page;
3757 while (len > 0) {
3758 page = addr & TARGET_PAGE_MASK;
3759 phys_addr = cpu_get_phys_page_debug(env, page);
3760 /* if no physical page mapped, return an error */
3761 if (phys_addr == -1)
3762 return -1;
3763 l = (page + TARGET_PAGE_SIZE) - addr;
3764 if (l > len)
3765 l = len;
3766 phys_addr += (addr & ~TARGET_PAGE_MASK);
3767 #if !defined(CONFIG_USER_ONLY)
3768 if (is_write)
3769 cpu_physical_memory_write_rom(phys_addr, buf, l);
3770 else
3771 #endif
3772 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3773 len -= l;
3774 buf += l;
3775 addr += l;
3777 return 0;
3780 /* in deterministic execution mode, instructions doing device I/Os
3781 must be at the end of the TB */
3782 void cpu_io_recompile(CPUState *env, void *retaddr)
3784 TranslationBlock *tb;
3785 uint32_t n, cflags;
3786 target_ulong pc, cs_base;
3787 uint64_t flags;
3789 tb = tb_find_pc((unsigned long)retaddr);
3790 if (!tb) {
3791 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3792 retaddr);
3794 n = env->icount_decr.u16.low + tb->icount;
3795 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3796 /* Calculate how many instructions had been executed before the fault
3797 occurred. */
3798 n = n - env->icount_decr.u16.low;
3799 /* Generate a new TB ending on the I/O insn. */
3800 n++;
3801 /* On MIPS and SH, delay slot instructions can only be restarted if
3802 they were already the first instruction in the TB. If this is not
3803 the first instruction in a TB then re-execute the preceding
3804 branch. */
3805 #if defined(TARGET_MIPS)
3806 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3807 env->active_tc.PC -= 4;
3808 env->icount_decr.u16.low++;
3809 env->hflags &= ~MIPS_HFLAG_BMASK;
3811 #elif defined(TARGET_SH4)
3812 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3813 && n > 1) {
3814 env->pc -= 2;
3815 env->icount_decr.u16.low++;
3816 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3818 #endif
3819 /* This should never happen. */
3820 if (n > CF_COUNT_MASK)
3821 cpu_abort(env, "TB too big during recompile");
3823 cflags = n | CF_LAST_IO;
3824 pc = tb->pc;
3825 cs_base = tb->cs_base;
3826 flags = tb->flags;
3827 tb_phys_invalidate(tb, -1);
3828 /* FIXME: In theory this could raise an exception. In practice
3829 we have already translated the block once so it's probably ok. */
3830 tb_gen_code(env, pc, cs_base, flags, cflags);
3831 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3832 the first in the TB) then we end up generating a whole new TB and
3833 repeating the fault, which is horribly inefficient.
3834 Better would be to execute just this insn uncached, or generate a
3835 second new TB. */
3836 cpu_resume_from_signal(env, NULL);
3839 void dump_exec_info(FILE *f,
3840 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3842 int i, target_code_size, max_target_code_size;
3843 int direct_jmp_count, direct_jmp2_count, cross_page;
3844 TranslationBlock *tb;
3846 target_code_size = 0;
3847 max_target_code_size = 0;
3848 cross_page = 0;
3849 direct_jmp_count = 0;
3850 direct_jmp2_count = 0;
3851 for(i = 0; i < nb_tbs; i++) {
3852 tb = &tbs[i];
3853 target_code_size += tb->size;
3854 if (tb->size > max_target_code_size)
3855 max_target_code_size = tb->size;
3856 if (tb->page_addr[1] != -1)
3857 cross_page++;
3858 if (tb->tb_next_offset[0] != 0xffff) {
3859 direct_jmp_count++;
3860 if (tb->tb_next_offset[1] != 0xffff) {
3861 direct_jmp2_count++;
3865 /* XXX: avoid using doubles ? */
3866 cpu_fprintf(f, "Translation buffer state:\n");
3867 cpu_fprintf(f, "gen code size %ld/%ld\n",
3868 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3869 cpu_fprintf(f, "TB count %d/%d\n",
3870 nb_tbs, code_gen_max_blocks);
3871 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3872 nb_tbs ? target_code_size / nb_tbs : 0,
3873 max_target_code_size);
3874 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3875 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3876 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3877 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3878 cross_page,
3879 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3880 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3881 direct_jmp_count,
3882 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3883 direct_jmp2_count,
3884 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3885 cpu_fprintf(f, "\nStatistics:\n");
3886 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3887 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3888 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3889 tcg_dump_info(f, cpu_fprintf);
3892 #if !defined(CONFIG_USER_ONLY)
3894 #define MMUSUFFIX _cmmu
3895 #define GETPC() NULL
3896 #define env cpu_single_env
3897 #define SOFTMMU_CODE_ACCESS
3899 #define SHIFT 0
3900 #include "softmmu_template.h"
3902 #define SHIFT 1
3903 #include "softmmu_template.h"
3905 #define SHIFT 2
3906 #include "softmmu_template.h"
3908 #define SHIFT 3
3909 #include "softmmu_template.h"
3911 #undef env
3913 #endif