kvm: move kvm_set_phys_mem around
[qemu/aliguori-queue.git] / exec.c
blob4599fb55c80fdbfb0ebe8eb680c6b51f580c7ec0
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <unistd.h>
32 #include <inttypes.h>
34 #include "cpu.h"
35 #include "exec-all.h"
36 #include "qemu-common.h"
37 #include "tcg.h"
38 #include "hw/hw.h"
39 #include "osdep.h"
40 #include "kvm.h"
41 #if defined(CONFIG_USER_ONLY)
42 #include <qemu.h>
43 #include <signal.h>
44 #endif
46 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_FLUSH
48 //#define DEBUG_TLB
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
60 #undef DEBUG_TB_CHECK
61 #endif
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_SPARC)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 36
69 #elif defined(TARGET_ALPHA)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 #define TARGET_VIRT_ADDR_SPACE_BITS 42
72 #elif defined(TARGET_PPC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_X86_64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_I386)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #else
79 #define TARGET_PHYS_ADDR_SPACE_BITS 32
80 #endif
82 static TranslationBlock *tbs;
83 int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 static int nb_tbs;
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32)
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
100 #else
101 #define code_gen_section \
102 __attribute__((aligned (32)))
103 #endif
105 uint8_t code_gen_prologue[1024] code_gen_section;
106 static uint8_t *code_gen_buffer;
107 static unsigned long code_gen_buffer_size;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size;
110 uint8_t *code_gen_ptr;
112 #if !defined(CONFIG_USER_ONLY)
113 int phys_ram_fd;
114 uint8_t *phys_ram_dirty;
115 static int in_migration;
117 typedef struct RAMBlock {
118 uint8_t *host;
119 ram_addr_t offset;
120 ram_addr_t length;
121 struct RAMBlock *next;
122 } RAMBlock;
124 static RAMBlock *ram_blocks;
125 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126 then we can no longer assume contiguous ram offsets, and external uses
127 of this variable will break. */
128 ram_addr_t last_ram_offset;
129 #endif
131 CPUState *first_cpu;
132 /* current CPU in the current thread. It is only valid inside
133 cpu_exec() */
134 CPUState *cpu_single_env;
135 /* 0 = Do not count executed instructions.
136 1 = Precise instruction counting.
137 2 = Adaptive rate instruction counting. */
138 int use_icount = 0;
139 /* Current instruction counter. While executing translated code this may
140 include some instructions that have not yet been executed. */
141 int64_t qemu_icount;
143 typedef struct PageDesc {
144 /* list of TBs intersecting this ram page */
145 TranslationBlock *first_tb;
146 /* in order to optimize self modifying code, we count the number
147 of lookups we do to a given page to use a bitmap */
148 unsigned int code_write_count;
149 uint8_t *code_bitmap;
150 #if defined(CONFIG_USER_ONLY)
151 unsigned long flags;
152 #endif
153 } PageDesc;
155 typedef struct PhysPageDesc {
156 /* offset in host memory of the page + io_index in the low bits */
157 ram_addr_t phys_offset;
158 ram_addr_t region_offset;
159 } PhysPageDesc;
161 #define L2_BITS 10
162 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163 /* XXX: this is a temporary hack for alpha target.
164 * In the future, this is to be replaced by a multi-level table
165 * to actually be able to handle the complete 64 bits address space.
167 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168 #else
169 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
170 #endif
172 #define L1_SIZE (1 << L1_BITS)
173 #define L2_SIZE (1 << L2_BITS)
175 unsigned long qemu_real_host_page_size;
176 unsigned long qemu_host_page_bits;
177 unsigned long qemu_host_page_size;
178 unsigned long qemu_host_page_mask;
180 /* XXX: for system emulation, it could just be an array */
181 static PageDesc *l1_map[L1_SIZE];
182 static PhysPageDesc **l1_phys_map;
184 #if !defined(CONFIG_USER_ONLY)
185 static void io_mem_init(void);
187 /* io memory support */
188 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
189 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
190 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
191 static char io_mem_used[IO_MEM_NB_ENTRIES];
192 static int io_mem_watch;
193 #endif
195 /* log support */
196 #ifdef WIN32
197 static const char *logfilename = "qemu.log";
198 #else
199 static const char *logfilename = "/tmp/qemu.log";
200 #endif
201 FILE *logfile;
202 int loglevel;
203 static int log_append = 0;
205 /* statistics */
206 static int tlb_flush_count;
207 static int tb_flush_count;
208 static int tb_phys_invalidate_count;
210 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
211 typedef struct subpage_t {
212 target_phys_addr_t base;
213 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
214 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
215 void *opaque[TARGET_PAGE_SIZE][2][4];
216 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
217 } subpage_t;
219 #ifdef _WIN32
220 static void map_exec(void *addr, long size)
222 DWORD old_protect;
223 VirtualProtect(addr, size,
224 PAGE_EXECUTE_READWRITE, &old_protect);
227 #else
228 static void map_exec(void *addr, long size)
230 unsigned long start, end, page_size;
232 page_size = getpagesize();
233 start = (unsigned long)addr;
234 start &= ~(page_size - 1);
236 end = (unsigned long)addr + size;
237 end += page_size - 1;
238 end &= ~(page_size - 1);
240 mprotect((void *)start, end - start,
241 PROT_READ | PROT_WRITE | PROT_EXEC);
243 #endif
245 static void page_init(void)
247 /* NOTE: we can always suppose that qemu_host_page_size >=
248 TARGET_PAGE_SIZE */
249 #ifdef _WIN32
251 SYSTEM_INFO system_info;
253 GetSystemInfo(&system_info);
254 qemu_real_host_page_size = system_info.dwPageSize;
256 #else
257 qemu_real_host_page_size = getpagesize();
258 #endif
259 if (qemu_host_page_size == 0)
260 qemu_host_page_size = qemu_real_host_page_size;
261 if (qemu_host_page_size < TARGET_PAGE_SIZE)
262 qemu_host_page_size = TARGET_PAGE_SIZE;
263 qemu_host_page_bits = 0;
264 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
265 qemu_host_page_bits++;
266 qemu_host_page_mask = ~(qemu_host_page_size - 1);
267 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
268 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
270 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
272 long long startaddr, endaddr;
273 FILE *f;
274 int n;
276 mmap_lock();
277 last_brk = (unsigned long)sbrk(0);
278 f = fopen("/proc/self/maps", "r");
279 if (f) {
280 do {
281 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
282 if (n == 2) {
283 startaddr = MIN(startaddr,
284 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
285 endaddr = MIN(endaddr,
286 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
287 page_set_flags(startaddr & TARGET_PAGE_MASK,
288 TARGET_PAGE_ALIGN(endaddr),
289 PAGE_RESERVED);
291 } while (!feof(f));
292 fclose(f);
294 mmap_unlock();
296 #endif
299 static inline PageDesc **page_l1_map(target_ulong index)
301 #if TARGET_LONG_BITS > 32
302 /* Host memory outside guest VM. For 32-bit targets we have already
303 excluded high addresses. */
304 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
305 return NULL;
306 #endif
307 return &l1_map[index >> L2_BITS];
310 static inline PageDesc *page_find_alloc(target_ulong index)
312 PageDesc **lp, *p;
313 lp = page_l1_map(index);
314 if (!lp)
315 return NULL;
317 p = *lp;
318 if (!p) {
319 /* allocate if not found */
320 #if defined(CONFIG_USER_ONLY)
321 size_t len = sizeof(PageDesc) * L2_SIZE;
322 /* Don't use qemu_malloc because it may recurse. */
323 p = mmap(NULL, len, PROT_READ | PROT_WRITE,
324 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
325 *lp = p;
326 if (h2g_valid(p)) {
327 unsigned long addr = h2g(p);
328 page_set_flags(addr & TARGET_PAGE_MASK,
329 TARGET_PAGE_ALIGN(addr + len),
330 PAGE_RESERVED);
332 #else
333 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
334 *lp = p;
335 #endif
337 return p + (index & (L2_SIZE - 1));
340 static inline PageDesc *page_find(target_ulong index)
342 PageDesc **lp, *p;
343 lp = page_l1_map(index);
344 if (!lp)
345 return NULL;
347 p = *lp;
348 if (!p) {
349 return NULL;
351 return p + (index & (L2_SIZE - 1));
354 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
356 void **lp, **p;
357 PhysPageDesc *pd;
359 p = (void **)l1_phys_map;
360 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
362 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
363 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
364 #endif
365 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
366 p = *lp;
367 if (!p) {
368 /* allocate if not found */
369 if (!alloc)
370 return NULL;
371 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
372 memset(p, 0, sizeof(void *) * L1_SIZE);
373 *lp = p;
375 #endif
376 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
377 pd = *lp;
378 if (!pd) {
379 int i;
380 /* allocate if not found */
381 if (!alloc)
382 return NULL;
383 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
384 *lp = pd;
385 for (i = 0; i < L2_SIZE; i++) {
386 pd[i].phys_offset = IO_MEM_UNASSIGNED;
387 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
390 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
393 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
395 return phys_page_find_alloc(index, 0);
398 #if !defined(CONFIG_USER_ONLY)
399 static void tlb_protect_code(ram_addr_t ram_addr);
400 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
401 target_ulong vaddr);
402 #define mmap_lock() do { } while(0)
403 #define mmap_unlock() do { } while(0)
404 #endif
406 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
408 #if defined(CONFIG_USER_ONLY)
409 /* Currently it is not recommended to allocate big chunks of data in
410 user mode. It will change when a dedicated libc will be used */
411 #define USE_STATIC_CODE_GEN_BUFFER
412 #endif
414 #ifdef USE_STATIC_CODE_GEN_BUFFER
415 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
416 #endif
418 static void code_gen_alloc(unsigned long tb_size)
420 #ifdef USE_STATIC_CODE_GEN_BUFFER
421 code_gen_buffer = static_code_gen_buffer;
422 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
423 map_exec(code_gen_buffer, code_gen_buffer_size);
424 #else
425 code_gen_buffer_size = tb_size;
426 if (code_gen_buffer_size == 0) {
427 #if defined(CONFIG_USER_ONLY)
428 /* in user mode, phys_ram_size is not meaningful */
429 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
430 #else
431 /* XXX: needs adjustments */
432 code_gen_buffer_size = (unsigned long)(ram_size / 4);
433 #endif
435 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
436 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
437 /* The code gen buffer location may have constraints depending on
438 the host cpu and OS */
439 #if defined(__linux__)
441 int flags;
442 void *start = NULL;
444 flags = MAP_PRIVATE | MAP_ANONYMOUS;
445 #if defined(__x86_64__)
446 flags |= MAP_32BIT;
447 /* Cannot map more than that */
448 if (code_gen_buffer_size > (800 * 1024 * 1024))
449 code_gen_buffer_size = (800 * 1024 * 1024);
450 #elif defined(__sparc_v9__)
451 // Map the buffer below 2G, so we can use direct calls and branches
452 flags |= MAP_FIXED;
453 start = (void *) 0x60000000UL;
454 if (code_gen_buffer_size > (512 * 1024 * 1024))
455 code_gen_buffer_size = (512 * 1024 * 1024);
456 #elif defined(__arm__)
457 /* Map the buffer below 32M, so we can use direct calls and branches */
458 flags |= MAP_FIXED;
459 start = (void *) 0x01000000UL;
460 if (code_gen_buffer_size > 16 * 1024 * 1024)
461 code_gen_buffer_size = 16 * 1024 * 1024;
462 #endif
463 code_gen_buffer = mmap(start, code_gen_buffer_size,
464 PROT_WRITE | PROT_READ | PROT_EXEC,
465 flags, -1, 0);
466 if (code_gen_buffer == MAP_FAILED) {
467 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
468 exit(1);
471 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
473 int flags;
474 void *addr = NULL;
475 flags = MAP_PRIVATE | MAP_ANONYMOUS;
476 #if defined(__x86_64__)
477 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
478 * 0x40000000 is free */
479 flags |= MAP_FIXED;
480 addr = (void *)0x40000000;
481 /* Cannot map more than that */
482 if (code_gen_buffer_size > (800 * 1024 * 1024))
483 code_gen_buffer_size = (800 * 1024 * 1024);
484 #endif
485 code_gen_buffer = mmap(addr, code_gen_buffer_size,
486 PROT_WRITE | PROT_READ | PROT_EXEC,
487 flags, -1, 0);
488 if (code_gen_buffer == MAP_FAILED) {
489 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
490 exit(1);
493 #else
494 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
495 map_exec(code_gen_buffer, code_gen_buffer_size);
496 #endif
497 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
498 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
499 code_gen_buffer_max_size = code_gen_buffer_size -
500 code_gen_max_block_size();
501 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
502 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
505 /* Must be called before using the QEMU cpus. 'tb_size' is the size
506 (in bytes) allocated to the translation buffer. Zero means default
507 size. */
508 void cpu_exec_init_all(unsigned long tb_size)
510 cpu_gen_init();
511 code_gen_alloc(tb_size);
512 code_gen_ptr = code_gen_buffer;
513 page_init();
514 #if !defined(CONFIG_USER_ONLY)
515 io_mem_init();
516 #endif
519 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
521 static void cpu_common_pre_save(void *opaque)
523 CPUState *env = opaque;
525 cpu_synchronize_state(env);
528 static int cpu_common_pre_load(void *opaque)
530 CPUState *env = opaque;
532 cpu_synchronize_state(env);
533 return 0;
536 static int cpu_common_post_load(void *opaque, int version_id)
538 CPUState *env = opaque;
540 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
541 version_id is increased. */
542 env->interrupt_request &= ~0x01;
543 tlb_flush(env, 1);
545 return 0;
548 static const VMStateDescription vmstate_cpu_common = {
549 .name = "cpu_common",
550 .version_id = 1,
551 .minimum_version_id = 1,
552 .minimum_version_id_old = 1,
553 .pre_save = cpu_common_pre_save,
554 .pre_load = cpu_common_pre_load,
555 .post_load = cpu_common_post_load,
556 .fields = (VMStateField []) {
557 VMSTATE_UINT32(halted, CPUState),
558 VMSTATE_UINT32(interrupt_request, CPUState),
559 VMSTATE_END_OF_LIST()
562 #endif
564 CPUState *qemu_get_cpu(int cpu)
566 CPUState *env = first_cpu;
568 while (env) {
569 if (env->cpu_index == cpu)
570 break;
571 env = env->next_cpu;
574 return env;
577 void cpu_exec_init(CPUState *env)
579 CPUState **penv;
580 int cpu_index;
582 #if defined(CONFIG_USER_ONLY)
583 cpu_list_lock();
584 #endif
585 env->next_cpu = NULL;
586 penv = &first_cpu;
587 cpu_index = 0;
588 while (*penv != NULL) {
589 penv = &(*penv)->next_cpu;
590 cpu_index++;
592 env->cpu_index = cpu_index;
593 env->numa_node = 0;
594 QTAILQ_INIT(&env->breakpoints);
595 QTAILQ_INIT(&env->watchpoints);
596 *penv = env;
597 #if defined(CONFIG_USER_ONLY)
598 cpu_list_unlock();
599 #endif
600 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
601 vmstate_register(cpu_index, &vmstate_cpu_common, env);
602 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
603 cpu_save, cpu_load, env);
604 #endif
607 static inline void invalidate_page_bitmap(PageDesc *p)
609 if (p->code_bitmap) {
610 qemu_free(p->code_bitmap);
611 p->code_bitmap = NULL;
613 p->code_write_count = 0;
616 /* set to NULL all the 'first_tb' fields in all PageDescs */
617 static void page_flush_tb(void)
619 int i, j;
620 PageDesc *p;
622 for(i = 0; i < L1_SIZE; i++) {
623 p = l1_map[i];
624 if (p) {
625 for(j = 0; j < L2_SIZE; j++) {
626 p->first_tb = NULL;
627 invalidate_page_bitmap(p);
628 p++;
634 /* flush all the translation blocks */
635 /* XXX: tb_flush is currently not thread safe */
636 void tb_flush(CPUState *env1)
638 CPUState *env;
639 #if defined(DEBUG_FLUSH)
640 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
641 (unsigned long)(code_gen_ptr - code_gen_buffer),
642 nb_tbs, nb_tbs > 0 ?
643 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
644 #endif
645 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
646 cpu_abort(env1, "Internal error: code buffer overflow\n");
648 nb_tbs = 0;
650 for(env = first_cpu; env != NULL; env = env->next_cpu) {
651 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
654 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
655 page_flush_tb();
657 code_gen_ptr = code_gen_buffer;
658 /* XXX: flush processor icache at this point if cache flush is
659 expensive */
660 tb_flush_count++;
663 #ifdef DEBUG_TB_CHECK
665 static void tb_invalidate_check(target_ulong address)
667 TranslationBlock *tb;
668 int i;
669 address &= TARGET_PAGE_MASK;
670 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
671 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
672 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
673 address >= tb->pc + tb->size)) {
674 printf("ERROR invalidate: address=" TARGET_FMT_lx
675 " PC=%08lx size=%04x\n",
676 address, (long)tb->pc, tb->size);
682 /* verify that all the pages have correct rights for code */
683 static void tb_page_check(void)
685 TranslationBlock *tb;
686 int i, flags1, flags2;
688 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
689 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
690 flags1 = page_get_flags(tb->pc);
691 flags2 = page_get_flags(tb->pc + tb->size - 1);
692 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
693 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
694 (long)tb->pc, tb->size, flags1, flags2);
700 #endif
702 /* invalidate one TB */
703 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
704 int next_offset)
706 TranslationBlock *tb1;
707 for(;;) {
708 tb1 = *ptb;
709 if (tb1 == tb) {
710 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
711 break;
713 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
717 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
719 TranslationBlock *tb1;
720 unsigned int n1;
722 for(;;) {
723 tb1 = *ptb;
724 n1 = (long)tb1 & 3;
725 tb1 = (TranslationBlock *)((long)tb1 & ~3);
726 if (tb1 == tb) {
727 *ptb = tb1->page_next[n1];
728 break;
730 ptb = &tb1->page_next[n1];
734 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
736 TranslationBlock *tb1, **ptb;
737 unsigned int n1;
739 ptb = &tb->jmp_next[n];
740 tb1 = *ptb;
741 if (tb1) {
742 /* find tb(n) in circular list */
743 for(;;) {
744 tb1 = *ptb;
745 n1 = (long)tb1 & 3;
746 tb1 = (TranslationBlock *)((long)tb1 & ~3);
747 if (n1 == n && tb1 == tb)
748 break;
749 if (n1 == 2) {
750 ptb = &tb1->jmp_first;
751 } else {
752 ptb = &tb1->jmp_next[n1];
755 /* now we can suppress tb(n) from the list */
756 *ptb = tb->jmp_next[n];
758 tb->jmp_next[n] = NULL;
762 /* reset the jump entry 'n' of a TB so that it is not chained to
763 another TB */
764 static inline void tb_reset_jump(TranslationBlock *tb, int n)
766 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
769 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
771 CPUState *env;
772 PageDesc *p;
773 unsigned int h, n1;
774 target_phys_addr_t phys_pc;
775 TranslationBlock *tb1, *tb2;
777 /* remove the TB from the hash list */
778 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
779 h = tb_phys_hash_func(phys_pc);
780 tb_remove(&tb_phys_hash[h], tb,
781 offsetof(TranslationBlock, phys_hash_next));
783 /* remove the TB from the page list */
784 if (tb->page_addr[0] != page_addr) {
785 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
786 tb_page_remove(&p->first_tb, tb);
787 invalidate_page_bitmap(p);
789 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
790 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
791 tb_page_remove(&p->first_tb, tb);
792 invalidate_page_bitmap(p);
795 tb_invalidated_flag = 1;
797 /* remove the TB from the hash list */
798 h = tb_jmp_cache_hash_func(tb->pc);
799 for(env = first_cpu; env != NULL; env = env->next_cpu) {
800 if (env->tb_jmp_cache[h] == tb)
801 env->tb_jmp_cache[h] = NULL;
804 /* suppress this TB from the two jump lists */
805 tb_jmp_remove(tb, 0);
806 tb_jmp_remove(tb, 1);
808 /* suppress any remaining jumps to this TB */
809 tb1 = tb->jmp_first;
810 for(;;) {
811 n1 = (long)tb1 & 3;
812 if (n1 == 2)
813 break;
814 tb1 = (TranslationBlock *)((long)tb1 & ~3);
815 tb2 = tb1->jmp_next[n1];
816 tb_reset_jump(tb1, n1);
817 tb1->jmp_next[n1] = NULL;
818 tb1 = tb2;
820 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
822 tb_phys_invalidate_count++;
825 static inline void set_bits(uint8_t *tab, int start, int len)
827 int end, mask, end1;
829 end = start + len;
830 tab += start >> 3;
831 mask = 0xff << (start & 7);
832 if ((start & ~7) == (end & ~7)) {
833 if (start < end) {
834 mask &= ~(0xff << (end & 7));
835 *tab |= mask;
837 } else {
838 *tab++ |= mask;
839 start = (start + 8) & ~7;
840 end1 = end & ~7;
841 while (start < end1) {
842 *tab++ = 0xff;
843 start += 8;
845 if (start < end) {
846 mask = ~(0xff << (end & 7));
847 *tab |= mask;
852 static void build_page_bitmap(PageDesc *p)
854 int n, tb_start, tb_end;
855 TranslationBlock *tb;
857 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
859 tb = p->first_tb;
860 while (tb != NULL) {
861 n = (long)tb & 3;
862 tb = (TranslationBlock *)((long)tb & ~3);
863 /* NOTE: this is subtle as a TB may span two physical pages */
864 if (n == 0) {
865 /* NOTE: tb_end may be after the end of the page, but
866 it is not a problem */
867 tb_start = tb->pc & ~TARGET_PAGE_MASK;
868 tb_end = tb_start + tb->size;
869 if (tb_end > TARGET_PAGE_SIZE)
870 tb_end = TARGET_PAGE_SIZE;
871 } else {
872 tb_start = 0;
873 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
875 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
876 tb = tb->page_next[n];
880 TranslationBlock *tb_gen_code(CPUState *env,
881 target_ulong pc, target_ulong cs_base,
882 int flags, int cflags)
884 TranslationBlock *tb;
885 uint8_t *tc_ptr;
886 target_ulong phys_pc, phys_page2, virt_page2;
887 int code_gen_size;
889 phys_pc = get_phys_addr_code(env, pc);
890 tb = tb_alloc(pc);
891 if (!tb) {
892 /* flush must be done */
893 tb_flush(env);
894 /* cannot fail at this point */
895 tb = tb_alloc(pc);
896 /* Don't forget to invalidate previous TB info. */
897 tb_invalidated_flag = 1;
899 tc_ptr = code_gen_ptr;
900 tb->tc_ptr = tc_ptr;
901 tb->cs_base = cs_base;
902 tb->flags = flags;
903 tb->cflags = cflags;
904 cpu_gen_code(env, tb, &code_gen_size);
905 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
907 /* check next page if needed */
908 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
909 phys_page2 = -1;
910 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
911 phys_page2 = get_phys_addr_code(env, virt_page2);
913 tb_link_phys(tb, phys_pc, phys_page2);
914 return tb;
917 /* invalidate all TBs which intersect with the target physical page
918 starting in range [start;end[. NOTE: start and end must refer to
919 the same physical page. 'is_cpu_write_access' should be true if called
920 from a real cpu write access: the virtual CPU will exit the current
921 TB if code is modified inside this TB. */
922 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
923 int is_cpu_write_access)
925 TranslationBlock *tb, *tb_next, *saved_tb;
926 CPUState *env = cpu_single_env;
927 target_ulong tb_start, tb_end;
928 PageDesc *p;
929 int n;
930 #ifdef TARGET_HAS_PRECISE_SMC
931 int current_tb_not_found = is_cpu_write_access;
932 TranslationBlock *current_tb = NULL;
933 int current_tb_modified = 0;
934 target_ulong current_pc = 0;
935 target_ulong current_cs_base = 0;
936 int current_flags = 0;
937 #endif /* TARGET_HAS_PRECISE_SMC */
939 p = page_find(start >> TARGET_PAGE_BITS);
940 if (!p)
941 return;
942 if (!p->code_bitmap &&
943 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
944 is_cpu_write_access) {
945 /* build code bitmap */
946 build_page_bitmap(p);
949 /* we remove all the TBs in the range [start, end[ */
950 /* XXX: see if in some cases it could be faster to invalidate all the code */
951 tb = p->first_tb;
952 while (tb != NULL) {
953 n = (long)tb & 3;
954 tb = (TranslationBlock *)((long)tb & ~3);
955 tb_next = tb->page_next[n];
956 /* NOTE: this is subtle as a TB may span two physical pages */
957 if (n == 0) {
958 /* NOTE: tb_end may be after the end of the page, but
959 it is not a problem */
960 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
961 tb_end = tb_start + tb->size;
962 } else {
963 tb_start = tb->page_addr[1];
964 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
966 if (!(tb_end <= start || tb_start >= end)) {
967 #ifdef TARGET_HAS_PRECISE_SMC
968 if (current_tb_not_found) {
969 current_tb_not_found = 0;
970 current_tb = NULL;
971 if (env->mem_io_pc) {
972 /* now we have a real cpu fault */
973 current_tb = tb_find_pc(env->mem_io_pc);
976 if (current_tb == tb &&
977 (current_tb->cflags & CF_COUNT_MASK) != 1) {
978 /* If we are modifying the current TB, we must stop
979 its execution. We could be more precise by checking
980 that the modification is after the current PC, but it
981 would require a specialized function to partially
982 restore the CPU state */
984 current_tb_modified = 1;
985 cpu_restore_state(current_tb, env,
986 env->mem_io_pc, NULL);
987 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
988 &current_flags);
990 #endif /* TARGET_HAS_PRECISE_SMC */
991 /* we need to do that to handle the case where a signal
992 occurs while doing tb_phys_invalidate() */
993 saved_tb = NULL;
994 if (env) {
995 saved_tb = env->current_tb;
996 env->current_tb = NULL;
998 tb_phys_invalidate(tb, -1);
999 if (env) {
1000 env->current_tb = saved_tb;
1001 if (env->interrupt_request && env->current_tb)
1002 cpu_interrupt(env, env->interrupt_request);
1005 tb = tb_next;
1007 #if !defined(CONFIG_USER_ONLY)
1008 /* if no code remaining, no need to continue to use slow writes */
1009 if (!p->first_tb) {
1010 invalidate_page_bitmap(p);
1011 if (is_cpu_write_access) {
1012 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1015 #endif
1016 #ifdef TARGET_HAS_PRECISE_SMC
1017 if (current_tb_modified) {
1018 /* we generate a block containing just the instruction
1019 modifying the memory. It will ensure that it cannot modify
1020 itself */
1021 env->current_tb = NULL;
1022 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1023 cpu_resume_from_signal(env, NULL);
1025 #endif
1028 /* len must be <= 8 and start must be a multiple of len */
1029 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1031 PageDesc *p;
1032 int offset, b;
1033 #if 0
1034 if (1) {
1035 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1036 cpu_single_env->mem_io_vaddr, len,
1037 cpu_single_env->eip,
1038 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1040 #endif
1041 p = page_find(start >> TARGET_PAGE_BITS);
1042 if (!p)
1043 return;
1044 if (p->code_bitmap) {
1045 offset = start & ~TARGET_PAGE_MASK;
1046 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1047 if (b & ((1 << len) - 1))
1048 goto do_invalidate;
1049 } else {
1050 do_invalidate:
1051 tb_invalidate_phys_page_range(start, start + len, 1);
1055 #if !defined(CONFIG_SOFTMMU)
1056 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1057 unsigned long pc, void *puc)
1059 TranslationBlock *tb;
1060 PageDesc *p;
1061 int n;
1062 #ifdef TARGET_HAS_PRECISE_SMC
1063 TranslationBlock *current_tb = NULL;
1064 CPUState *env = cpu_single_env;
1065 int current_tb_modified = 0;
1066 target_ulong current_pc = 0;
1067 target_ulong current_cs_base = 0;
1068 int current_flags = 0;
1069 #endif
1071 addr &= TARGET_PAGE_MASK;
1072 p = page_find(addr >> TARGET_PAGE_BITS);
1073 if (!p)
1074 return;
1075 tb = p->first_tb;
1076 #ifdef TARGET_HAS_PRECISE_SMC
1077 if (tb && pc != 0) {
1078 current_tb = tb_find_pc(pc);
1080 #endif
1081 while (tb != NULL) {
1082 n = (long)tb & 3;
1083 tb = (TranslationBlock *)((long)tb & ~3);
1084 #ifdef TARGET_HAS_PRECISE_SMC
1085 if (current_tb == tb &&
1086 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1087 /* If we are modifying the current TB, we must stop
1088 its execution. We could be more precise by checking
1089 that the modification is after the current PC, but it
1090 would require a specialized function to partially
1091 restore the CPU state */
1093 current_tb_modified = 1;
1094 cpu_restore_state(current_tb, env, pc, puc);
1095 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1096 &current_flags);
1098 #endif /* TARGET_HAS_PRECISE_SMC */
1099 tb_phys_invalidate(tb, addr);
1100 tb = tb->page_next[n];
1102 p->first_tb = NULL;
1103 #ifdef TARGET_HAS_PRECISE_SMC
1104 if (current_tb_modified) {
1105 /* we generate a block containing just the instruction
1106 modifying the memory. It will ensure that it cannot modify
1107 itself */
1108 env->current_tb = NULL;
1109 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1110 cpu_resume_from_signal(env, puc);
1112 #endif
1114 #endif
1116 /* add the tb in the target page and protect it if necessary */
1117 static inline void tb_alloc_page(TranslationBlock *tb,
1118 unsigned int n, target_ulong page_addr)
1120 PageDesc *p;
1121 TranslationBlock *last_first_tb;
1123 tb->page_addr[n] = page_addr;
1124 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1125 tb->page_next[n] = p->first_tb;
1126 last_first_tb = p->first_tb;
1127 p->first_tb = (TranslationBlock *)((long)tb | n);
1128 invalidate_page_bitmap(p);
1130 #if defined(TARGET_HAS_SMC) || 1
1132 #if defined(CONFIG_USER_ONLY)
1133 if (p->flags & PAGE_WRITE) {
1134 target_ulong addr;
1135 PageDesc *p2;
1136 int prot;
1138 /* force the host page as non writable (writes will have a
1139 page fault + mprotect overhead) */
1140 page_addr &= qemu_host_page_mask;
1141 prot = 0;
1142 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1143 addr += TARGET_PAGE_SIZE) {
1145 p2 = page_find (addr >> TARGET_PAGE_BITS);
1146 if (!p2)
1147 continue;
1148 prot |= p2->flags;
1149 p2->flags &= ~PAGE_WRITE;
1150 page_get_flags(addr);
1152 mprotect(g2h(page_addr), qemu_host_page_size,
1153 (prot & PAGE_BITS) & ~PAGE_WRITE);
1154 #ifdef DEBUG_TB_INVALIDATE
1155 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1156 page_addr);
1157 #endif
1159 #else
1160 /* if some code is already present, then the pages are already
1161 protected. So we handle the case where only the first TB is
1162 allocated in a physical page */
1163 if (!last_first_tb) {
1164 tlb_protect_code(page_addr);
1166 #endif
1168 #endif /* TARGET_HAS_SMC */
1171 /* Allocate a new translation block. Flush the translation buffer if
1172 too many translation blocks or too much generated code. */
1173 TranslationBlock *tb_alloc(target_ulong pc)
1175 TranslationBlock *tb;
1177 if (nb_tbs >= code_gen_max_blocks ||
1178 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1179 return NULL;
1180 tb = &tbs[nb_tbs++];
1181 tb->pc = pc;
1182 tb->cflags = 0;
1183 return tb;
1186 void tb_free(TranslationBlock *tb)
1188 /* In practice this is mostly used for single use temporary TB
1189 Ignore the hard cases and just back up if this TB happens to
1190 be the last one generated. */
1191 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1192 code_gen_ptr = tb->tc_ptr;
1193 nb_tbs--;
1197 /* add a new TB and link it to the physical page tables. phys_page2 is
1198 (-1) to indicate that only one page contains the TB. */
1199 void tb_link_phys(TranslationBlock *tb,
1200 target_ulong phys_pc, target_ulong phys_page2)
1202 unsigned int h;
1203 TranslationBlock **ptb;
1205 /* Grab the mmap lock to stop another thread invalidating this TB
1206 before we are done. */
1207 mmap_lock();
1208 /* add in the physical hash table */
1209 h = tb_phys_hash_func(phys_pc);
1210 ptb = &tb_phys_hash[h];
1211 tb->phys_hash_next = *ptb;
1212 *ptb = tb;
1214 /* add in the page list */
1215 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1216 if (phys_page2 != -1)
1217 tb_alloc_page(tb, 1, phys_page2);
1218 else
1219 tb->page_addr[1] = -1;
1221 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1222 tb->jmp_next[0] = NULL;
1223 tb->jmp_next[1] = NULL;
1225 /* init original jump addresses */
1226 if (tb->tb_next_offset[0] != 0xffff)
1227 tb_reset_jump(tb, 0);
1228 if (tb->tb_next_offset[1] != 0xffff)
1229 tb_reset_jump(tb, 1);
1231 #ifdef DEBUG_TB_CHECK
1232 tb_page_check();
1233 #endif
1234 mmap_unlock();
1237 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1238 tb[1].tc_ptr. Return NULL if not found */
1239 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1241 int m_min, m_max, m;
1242 unsigned long v;
1243 TranslationBlock *tb;
1245 if (nb_tbs <= 0)
1246 return NULL;
1247 if (tc_ptr < (unsigned long)code_gen_buffer ||
1248 tc_ptr >= (unsigned long)code_gen_ptr)
1249 return NULL;
1250 /* binary search (cf Knuth) */
1251 m_min = 0;
1252 m_max = nb_tbs - 1;
1253 while (m_min <= m_max) {
1254 m = (m_min + m_max) >> 1;
1255 tb = &tbs[m];
1256 v = (unsigned long)tb->tc_ptr;
1257 if (v == tc_ptr)
1258 return tb;
1259 else if (tc_ptr < v) {
1260 m_max = m - 1;
1261 } else {
1262 m_min = m + 1;
1265 return &tbs[m_max];
1268 static void tb_reset_jump_recursive(TranslationBlock *tb);
1270 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1272 TranslationBlock *tb1, *tb_next, **ptb;
1273 unsigned int n1;
1275 tb1 = tb->jmp_next[n];
1276 if (tb1 != NULL) {
1277 /* find head of list */
1278 for(;;) {
1279 n1 = (long)tb1 & 3;
1280 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1281 if (n1 == 2)
1282 break;
1283 tb1 = tb1->jmp_next[n1];
1285 /* we are now sure now that tb jumps to tb1 */
1286 tb_next = tb1;
1288 /* remove tb from the jmp_first list */
1289 ptb = &tb_next->jmp_first;
1290 for(;;) {
1291 tb1 = *ptb;
1292 n1 = (long)tb1 & 3;
1293 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1294 if (n1 == n && tb1 == tb)
1295 break;
1296 ptb = &tb1->jmp_next[n1];
1298 *ptb = tb->jmp_next[n];
1299 tb->jmp_next[n] = NULL;
1301 /* suppress the jump to next tb in generated code */
1302 tb_reset_jump(tb, n);
1304 /* suppress jumps in the tb on which we could have jumped */
1305 tb_reset_jump_recursive(tb_next);
1309 static void tb_reset_jump_recursive(TranslationBlock *tb)
1311 tb_reset_jump_recursive2(tb, 0);
1312 tb_reset_jump_recursive2(tb, 1);
1315 #if defined(TARGET_HAS_ICE)
1316 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1318 target_phys_addr_t addr;
1319 target_ulong pd;
1320 ram_addr_t ram_addr;
1321 PhysPageDesc *p;
1323 addr = cpu_get_phys_page_debug(env, pc);
1324 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1325 if (!p) {
1326 pd = IO_MEM_UNASSIGNED;
1327 } else {
1328 pd = p->phys_offset;
1330 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1331 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1333 #endif
1335 /* Add a watchpoint. */
1336 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1337 int flags, CPUWatchpoint **watchpoint)
1339 target_ulong len_mask = ~(len - 1);
1340 CPUWatchpoint *wp;
1342 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1343 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1344 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1345 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1346 return -EINVAL;
1348 wp = qemu_malloc(sizeof(*wp));
1350 wp->vaddr = addr;
1351 wp->len_mask = len_mask;
1352 wp->flags = flags;
1354 /* keep all GDB-injected watchpoints in front */
1355 if (flags & BP_GDB)
1356 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1357 else
1358 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1360 tlb_flush_page(env, addr);
1362 if (watchpoint)
1363 *watchpoint = wp;
1364 return 0;
1367 /* Remove a specific watchpoint. */
1368 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1369 int flags)
1371 target_ulong len_mask = ~(len - 1);
1372 CPUWatchpoint *wp;
1374 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1375 if (addr == wp->vaddr && len_mask == wp->len_mask
1376 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1377 cpu_watchpoint_remove_by_ref(env, wp);
1378 return 0;
1381 return -ENOENT;
1384 /* Remove a specific watchpoint by reference. */
1385 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1387 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1389 tlb_flush_page(env, watchpoint->vaddr);
1391 qemu_free(watchpoint);
1394 /* Remove all matching watchpoints. */
1395 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1397 CPUWatchpoint *wp, *next;
1399 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1400 if (wp->flags & mask)
1401 cpu_watchpoint_remove_by_ref(env, wp);
1405 /* Add a breakpoint. */
1406 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1407 CPUBreakpoint **breakpoint)
1409 #if defined(TARGET_HAS_ICE)
1410 CPUBreakpoint *bp;
1412 bp = qemu_malloc(sizeof(*bp));
1414 bp->pc = pc;
1415 bp->flags = flags;
1417 /* keep all GDB-injected breakpoints in front */
1418 if (flags & BP_GDB)
1419 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1420 else
1421 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1423 breakpoint_invalidate(env, pc);
1425 if (breakpoint)
1426 *breakpoint = bp;
1427 return 0;
1428 #else
1429 return -ENOSYS;
1430 #endif
1433 /* Remove a specific breakpoint. */
1434 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1436 #if defined(TARGET_HAS_ICE)
1437 CPUBreakpoint *bp;
1439 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1440 if (bp->pc == pc && bp->flags == flags) {
1441 cpu_breakpoint_remove_by_ref(env, bp);
1442 return 0;
1445 return -ENOENT;
1446 #else
1447 return -ENOSYS;
1448 #endif
1451 /* Remove a specific breakpoint by reference. */
1452 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1454 #if defined(TARGET_HAS_ICE)
1455 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1457 breakpoint_invalidate(env, breakpoint->pc);
1459 qemu_free(breakpoint);
1460 #endif
1463 /* Remove all matching breakpoints. */
1464 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1466 #if defined(TARGET_HAS_ICE)
1467 CPUBreakpoint *bp, *next;
1469 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1470 if (bp->flags & mask)
1471 cpu_breakpoint_remove_by_ref(env, bp);
1473 #endif
1476 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1477 CPU loop after each instruction */
1478 void cpu_single_step(CPUState *env, int enabled)
1480 #if defined(TARGET_HAS_ICE)
1481 if (env->singlestep_enabled != enabled) {
1482 env->singlestep_enabled = enabled;
1483 if (kvm_enabled())
1484 kvm_update_guest_debug(env, 0);
1485 else {
1486 /* must flush all the translated code to avoid inconsistencies */
1487 /* XXX: only flush what is necessary */
1488 tb_flush(env);
1491 #endif
1494 /* enable or disable low levels log */
1495 void cpu_set_log(int log_flags)
1497 loglevel = log_flags;
1498 if (loglevel && !logfile) {
1499 logfile = fopen(logfilename, log_append ? "a" : "w");
1500 if (!logfile) {
1501 perror(logfilename);
1502 _exit(1);
1504 #if !defined(CONFIG_SOFTMMU)
1505 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1507 static char logfile_buf[4096];
1508 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1510 #elif !defined(_WIN32)
1511 /* Win32 doesn't support line-buffering and requires size >= 2 */
1512 setvbuf(logfile, NULL, _IOLBF, 0);
1513 #endif
1514 log_append = 1;
1516 if (!loglevel && logfile) {
1517 fclose(logfile);
1518 logfile = NULL;
1522 void cpu_set_log_filename(const char *filename)
1524 logfilename = strdup(filename);
1525 if (logfile) {
1526 fclose(logfile);
1527 logfile = NULL;
1529 cpu_set_log(loglevel);
1532 static void cpu_unlink_tb(CPUState *env)
1534 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1535 problem and hope the cpu will stop of its own accord. For userspace
1536 emulation this often isn't actually as bad as it sounds. Often
1537 signals are used primarily to interrupt blocking syscalls. */
1538 TranslationBlock *tb;
1539 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1541 spin_lock(&interrupt_lock);
1542 tb = env->current_tb;
1543 /* if the cpu is currently executing code, we must unlink it and
1544 all the potentially executing TB */
1545 if (tb) {
1546 env->current_tb = NULL;
1547 tb_reset_jump_recursive(tb);
1549 spin_unlock(&interrupt_lock);
1552 /* mask must never be zero, except for A20 change call */
1553 void cpu_interrupt(CPUState *env, int mask)
1555 int old_mask;
1557 old_mask = env->interrupt_request;
1558 env->interrupt_request |= mask;
1560 #ifndef CONFIG_USER_ONLY
1562 * If called from iothread context, wake the target cpu in
1563 * case its halted.
1565 if (!qemu_cpu_self(env)) {
1566 qemu_cpu_kick(env);
1567 return;
1569 #endif
1571 if (use_icount) {
1572 env->icount_decr.u16.high = 0xffff;
1573 #ifndef CONFIG_USER_ONLY
1574 if (!can_do_io(env)
1575 && (mask & ~old_mask) != 0) {
1576 cpu_abort(env, "Raised interrupt while not in I/O function");
1578 #endif
1579 } else {
1580 cpu_unlink_tb(env);
1584 void cpu_reset_interrupt(CPUState *env, int mask)
1586 env->interrupt_request &= ~mask;
1589 void cpu_exit(CPUState *env)
1591 env->exit_request = 1;
1592 cpu_unlink_tb(env);
1595 const CPULogItem cpu_log_items[] = {
1596 { CPU_LOG_TB_OUT_ASM, "out_asm",
1597 "show generated host assembly code for each compiled TB" },
1598 { CPU_LOG_TB_IN_ASM, "in_asm",
1599 "show target assembly code for each compiled TB" },
1600 { CPU_LOG_TB_OP, "op",
1601 "show micro ops for each compiled TB" },
1602 { CPU_LOG_TB_OP_OPT, "op_opt",
1603 "show micro ops "
1604 #ifdef TARGET_I386
1605 "before eflags optimization and "
1606 #endif
1607 "after liveness analysis" },
1608 { CPU_LOG_INT, "int",
1609 "show interrupts/exceptions in short format" },
1610 { CPU_LOG_EXEC, "exec",
1611 "show trace before each executed TB (lots of logs)" },
1612 { CPU_LOG_TB_CPU, "cpu",
1613 "show CPU state before block translation" },
1614 #ifdef TARGET_I386
1615 { CPU_LOG_PCALL, "pcall",
1616 "show protected mode far calls/returns/exceptions" },
1617 { CPU_LOG_RESET, "cpu_reset",
1618 "show CPU state before CPU resets" },
1619 #endif
1620 #ifdef DEBUG_IOPORT
1621 { CPU_LOG_IOPORT, "ioport",
1622 "show all i/o ports accesses" },
1623 #endif
1624 { 0, NULL, NULL },
1627 #ifndef CONFIG_USER_ONLY
1628 static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1629 = QLIST_HEAD_INITIALIZER(memory_client_list);
1631 static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1632 ram_addr_t size,
1633 ram_addr_t phys_offset)
1635 CPUPhysMemoryClient *client;
1636 QLIST_FOREACH(client, &memory_client_list, list) {
1637 client->set_memory(client, start_addr, size, phys_offset);
1641 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1642 target_phys_addr_t end)
1644 CPUPhysMemoryClient *client;
1645 QLIST_FOREACH(client, &memory_client_list, list) {
1646 int r = client->sync_dirty_bitmap(client, start, end);
1647 if (r < 0)
1648 return r;
1650 return 0;
1653 static int cpu_notify_migration_log(int enable)
1655 CPUPhysMemoryClient *client;
1656 QLIST_FOREACH(client, &memory_client_list, list) {
1657 int r = client->migration_log(client, enable);
1658 if (r < 0)
1659 return r;
1661 return 0;
1664 static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
1665 CPUPhysMemoryClient *client)
1667 PhysPageDesc *pd;
1668 int l1, l2;
1670 for (l1 = 0; l1 < L1_SIZE; ++l1) {
1671 pd = phys_map[l1];
1672 if (!pd) {
1673 continue;
1675 for (l2 = 0; l2 < L2_SIZE; ++l2) {
1676 if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
1677 continue;
1679 client->set_memory(client, pd[l2].region_offset,
1680 TARGET_PAGE_SIZE, pd[l2].phys_offset);
1685 static void phys_page_for_each(CPUPhysMemoryClient *client)
1687 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
1689 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
1690 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
1691 #endif
1692 void **phys_map = (void **)l1_phys_map;
1693 int l1;
1694 if (!l1_phys_map) {
1695 return;
1697 for (l1 = 0; l1 < L1_SIZE; ++l1) {
1698 if (phys_map[l1]) {
1699 phys_page_for_each_in_l1_map(phys_map[l1], client);
1702 #else
1703 if (!l1_phys_map) {
1704 return;
1706 phys_page_for_each_in_l1_map(l1_phys_map, client);
1707 #endif
1710 void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1712 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1713 phys_page_for_each(client);
1716 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1718 QLIST_REMOVE(client, list);
1720 #endif
1722 static int cmp1(const char *s1, int n, const char *s2)
1724 if (strlen(s2) != n)
1725 return 0;
1726 return memcmp(s1, s2, n) == 0;
1729 /* takes a comma separated list of log masks. Return 0 if error. */
1730 int cpu_str_to_log_mask(const char *str)
1732 const CPULogItem *item;
1733 int mask;
1734 const char *p, *p1;
1736 p = str;
1737 mask = 0;
1738 for(;;) {
1739 p1 = strchr(p, ',');
1740 if (!p1)
1741 p1 = p + strlen(p);
1742 if(cmp1(p,p1-p,"all")) {
1743 for(item = cpu_log_items; item->mask != 0; item++) {
1744 mask |= item->mask;
1746 } else {
1747 for(item = cpu_log_items; item->mask != 0; item++) {
1748 if (cmp1(p, p1 - p, item->name))
1749 goto found;
1751 return 0;
1753 found:
1754 mask |= item->mask;
1755 if (*p1 != ',')
1756 break;
1757 p = p1 + 1;
1759 return mask;
1762 void cpu_abort(CPUState *env, const char *fmt, ...)
1764 va_list ap;
1765 va_list ap2;
1767 va_start(ap, fmt);
1768 va_copy(ap2, ap);
1769 fprintf(stderr, "qemu: fatal: ");
1770 vfprintf(stderr, fmt, ap);
1771 fprintf(stderr, "\n");
1772 #ifdef TARGET_I386
1773 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1774 #else
1775 cpu_dump_state(env, stderr, fprintf, 0);
1776 #endif
1777 if (qemu_log_enabled()) {
1778 qemu_log("qemu: fatal: ");
1779 qemu_log_vprintf(fmt, ap2);
1780 qemu_log("\n");
1781 #ifdef TARGET_I386
1782 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1783 #else
1784 log_cpu_state(env, 0);
1785 #endif
1786 qemu_log_flush();
1787 qemu_log_close();
1789 va_end(ap2);
1790 va_end(ap);
1791 #if defined(CONFIG_USER_ONLY)
1793 struct sigaction act;
1794 sigfillset(&act.sa_mask);
1795 act.sa_handler = SIG_DFL;
1796 sigaction(SIGABRT, &act, NULL);
1798 #endif
1799 abort();
1802 CPUState *cpu_copy(CPUState *env)
1804 CPUState *new_env = cpu_init(env->cpu_model_str);
1805 CPUState *next_cpu = new_env->next_cpu;
1806 int cpu_index = new_env->cpu_index;
1807 #if defined(TARGET_HAS_ICE)
1808 CPUBreakpoint *bp;
1809 CPUWatchpoint *wp;
1810 #endif
1812 memcpy(new_env, env, sizeof(CPUState));
1814 /* Preserve chaining and index. */
1815 new_env->next_cpu = next_cpu;
1816 new_env->cpu_index = cpu_index;
1818 /* Clone all break/watchpoints.
1819 Note: Once we support ptrace with hw-debug register access, make sure
1820 BP_CPU break/watchpoints are handled correctly on clone. */
1821 QTAILQ_INIT(&env->breakpoints);
1822 QTAILQ_INIT(&env->watchpoints);
1823 #if defined(TARGET_HAS_ICE)
1824 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1825 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1827 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1828 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1829 wp->flags, NULL);
1831 #endif
1833 return new_env;
1836 #if !defined(CONFIG_USER_ONLY)
1838 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1840 unsigned int i;
1842 /* Discard jump cache entries for any tb which might potentially
1843 overlap the flushed page. */
1844 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1845 memset (&env->tb_jmp_cache[i], 0,
1846 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1848 i = tb_jmp_cache_hash_page(addr);
1849 memset (&env->tb_jmp_cache[i], 0,
1850 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1853 static CPUTLBEntry s_cputlb_empty_entry = {
1854 .addr_read = -1,
1855 .addr_write = -1,
1856 .addr_code = -1,
1857 .addend = -1,
1860 /* NOTE: if flush_global is true, also flush global entries (not
1861 implemented yet) */
1862 void tlb_flush(CPUState *env, int flush_global)
1864 int i;
1866 #if defined(DEBUG_TLB)
1867 printf("tlb_flush:\n");
1868 #endif
1869 /* must reset current TB so that interrupts cannot modify the
1870 links while we are modifying them */
1871 env->current_tb = NULL;
1873 for(i = 0; i < CPU_TLB_SIZE; i++) {
1874 int mmu_idx;
1875 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1876 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1880 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1882 tlb_flush_count++;
1885 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1887 if (addr == (tlb_entry->addr_read &
1888 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1889 addr == (tlb_entry->addr_write &
1890 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1891 addr == (tlb_entry->addr_code &
1892 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1893 *tlb_entry = s_cputlb_empty_entry;
1897 void tlb_flush_page(CPUState *env, target_ulong addr)
1899 int i;
1900 int mmu_idx;
1902 #if defined(DEBUG_TLB)
1903 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1904 #endif
1905 /* must reset current TB so that interrupts cannot modify the
1906 links while we are modifying them */
1907 env->current_tb = NULL;
1909 addr &= TARGET_PAGE_MASK;
1910 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1911 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1912 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1914 tlb_flush_jmp_cache(env, addr);
1917 /* update the TLBs so that writes to code in the virtual page 'addr'
1918 can be detected */
1919 static void tlb_protect_code(ram_addr_t ram_addr)
1921 cpu_physical_memory_reset_dirty(ram_addr,
1922 ram_addr + TARGET_PAGE_SIZE,
1923 CODE_DIRTY_FLAG);
1926 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1927 tested for self modifying code */
1928 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1929 target_ulong vaddr)
1931 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1934 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1935 unsigned long start, unsigned long length)
1937 unsigned long addr;
1938 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1939 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1940 if ((addr - start) < length) {
1941 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1946 /* Note: start and end must be within the same ram block. */
1947 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1948 int dirty_flags)
1950 CPUState *env;
1951 unsigned long length, start1;
1952 int i, mask, len;
1953 uint8_t *p;
1955 start &= TARGET_PAGE_MASK;
1956 end = TARGET_PAGE_ALIGN(end);
1958 length = end - start;
1959 if (length == 0)
1960 return;
1961 len = length >> TARGET_PAGE_BITS;
1962 mask = ~dirty_flags;
1963 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1964 for(i = 0; i < len; i++)
1965 p[i] &= mask;
1967 /* we modify the TLB cache so that the dirty bit will be set again
1968 when accessing the range */
1969 start1 = (unsigned long)qemu_get_ram_ptr(start);
1970 /* Chek that we don't span multiple blocks - this breaks the
1971 address comparisons below. */
1972 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1973 != (end - 1) - start) {
1974 abort();
1977 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1978 int mmu_idx;
1979 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1980 for(i = 0; i < CPU_TLB_SIZE; i++)
1981 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1982 start1, length);
1987 int cpu_physical_memory_set_dirty_tracking(int enable)
1989 int ret = 0;
1990 in_migration = enable;
1991 if (kvm_enabled()) {
1992 ret = kvm_set_migration_log(enable);
1994 if (ret < 0) {
1995 return ret;
1997 ret = cpu_notify_migration_log(!!enable);
1998 return ret;
2001 int cpu_physical_memory_get_dirty_tracking(void)
2003 return in_migration;
2006 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2007 target_phys_addr_t end_addr)
2009 int ret = 0;
2011 if (kvm_enabled()) {
2012 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
2014 if (ret < 0) {
2015 return ret;
2017 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2018 return ret;
2021 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2023 ram_addr_t ram_addr;
2024 void *p;
2026 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2027 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2028 + tlb_entry->addend);
2029 ram_addr = qemu_ram_addr_from_host(p);
2030 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2031 tlb_entry->addr_write |= TLB_NOTDIRTY;
2036 /* update the TLB according to the current state of the dirty bits */
2037 void cpu_tlb_update_dirty(CPUState *env)
2039 int i;
2040 int mmu_idx;
2041 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2042 for(i = 0; i < CPU_TLB_SIZE; i++)
2043 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2047 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2049 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2050 tlb_entry->addr_write = vaddr;
2053 /* update the TLB corresponding to virtual page vaddr
2054 so that it is no longer dirty */
2055 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2057 int i;
2058 int mmu_idx;
2060 vaddr &= TARGET_PAGE_MASK;
2061 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2062 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2063 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2066 /* add a new TLB entry. At most one entry for a given virtual address
2067 is permitted. Return 0 if OK or 2 if the page could not be mapped
2068 (can only happen in non SOFTMMU mode for I/O pages or pages
2069 conflicting with the host address space). */
2070 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2071 target_phys_addr_t paddr, int prot,
2072 int mmu_idx, int is_softmmu)
2074 PhysPageDesc *p;
2075 unsigned long pd;
2076 unsigned int index;
2077 target_ulong address;
2078 target_ulong code_address;
2079 target_phys_addr_t addend;
2080 int ret;
2081 CPUTLBEntry *te;
2082 CPUWatchpoint *wp;
2083 target_phys_addr_t iotlb;
2085 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2086 if (!p) {
2087 pd = IO_MEM_UNASSIGNED;
2088 } else {
2089 pd = p->phys_offset;
2091 #if defined(DEBUG_TLB)
2092 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2093 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2094 #endif
2096 ret = 0;
2097 address = vaddr;
2098 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2099 /* IO memory case (romd handled later) */
2100 address |= TLB_MMIO;
2102 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2103 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2104 /* Normal RAM. */
2105 iotlb = pd & TARGET_PAGE_MASK;
2106 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2107 iotlb |= IO_MEM_NOTDIRTY;
2108 else
2109 iotlb |= IO_MEM_ROM;
2110 } else {
2111 /* IO handlers are currently passed a physical address.
2112 It would be nice to pass an offset from the base address
2113 of that region. This would avoid having to special case RAM,
2114 and avoid full address decoding in every device.
2115 We can't use the high bits of pd for this because
2116 IO_MEM_ROMD uses these as a ram address. */
2117 iotlb = (pd & ~TARGET_PAGE_MASK);
2118 if (p) {
2119 iotlb += p->region_offset;
2120 } else {
2121 iotlb += paddr;
2125 code_address = address;
2126 /* Make accesses to pages with watchpoints go via the
2127 watchpoint trap routines. */
2128 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2129 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2130 iotlb = io_mem_watch + paddr;
2131 /* TODO: The memory case can be optimized by not trapping
2132 reads of pages with a write breakpoint. */
2133 address |= TLB_MMIO;
2137 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2138 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2139 te = &env->tlb_table[mmu_idx][index];
2140 te->addend = addend - vaddr;
2141 if (prot & PAGE_READ) {
2142 te->addr_read = address;
2143 } else {
2144 te->addr_read = -1;
2147 if (prot & PAGE_EXEC) {
2148 te->addr_code = code_address;
2149 } else {
2150 te->addr_code = -1;
2152 if (prot & PAGE_WRITE) {
2153 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2154 (pd & IO_MEM_ROMD)) {
2155 /* Write access calls the I/O callback. */
2156 te->addr_write = address | TLB_MMIO;
2157 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2158 !cpu_physical_memory_is_dirty(pd)) {
2159 te->addr_write = address | TLB_NOTDIRTY;
2160 } else {
2161 te->addr_write = address;
2163 } else {
2164 te->addr_write = -1;
2166 return ret;
2169 #else
2171 void tlb_flush(CPUState *env, int flush_global)
2175 void tlb_flush_page(CPUState *env, target_ulong addr)
2179 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2180 target_phys_addr_t paddr, int prot,
2181 int mmu_idx, int is_softmmu)
2183 return 0;
2187 * Walks guest process memory "regions" one by one
2188 * and calls callback function 'fn' for each region.
2190 int walk_memory_regions(void *priv,
2191 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2193 unsigned long start, end;
2194 PageDesc *p = NULL;
2195 int i, j, prot, prot1;
2196 int rc = 0;
2198 start = end = -1;
2199 prot = 0;
2201 for (i = 0; i <= L1_SIZE; i++) {
2202 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2203 for (j = 0; j < L2_SIZE; j++) {
2204 prot1 = (p == NULL) ? 0 : p[j].flags;
2206 * "region" is one continuous chunk of memory
2207 * that has same protection flags set.
2209 if (prot1 != prot) {
2210 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2211 if (start != -1) {
2212 rc = (*fn)(priv, start, end, prot);
2213 /* callback can stop iteration by returning != 0 */
2214 if (rc != 0)
2215 return (rc);
2217 if (prot1 != 0)
2218 start = end;
2219 else
2220 start = -1;
2221 prot = prot1;
2223 if (p == NULL)
2224 break;
2227 return (rc);
2230 static int dump_region(void *priv, unsigned long start,
2231 unsigned long end, unsigned long prot)
2233 FILE *f = (FILE *)priv;
2235 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2236 start, end, end - start,
2237 ((prot & PAGE_READ) ? 'r' : '-'),
2238 ((prot & PAGE_WRITE) ? 'w' : '-'),
2239 ((prot & PAGE_EXEC) ? 'x' : '-'));
2241 return (0);
2244 /* dump memory mappings */
2245 void page_dump(FILE *f)
2247 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2248 "start", "end", "size", "prot");
2249 walk_memory_regions(f, dump_region);
2252 int page_get_flags(target_ulong address)
2254 PageDesc *p;
2256 p = page_find(address >> TARGET_PAGE_BITS);
2257 if (!p)
2258 return 0;
2259 return p->flags;
2262 /* modify the flags of a page and invalidate the code if
2263 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2264 depending on PAGE_WRITE */
2265 void page_set_flags(target_ulong start, target_ulong end, int flags)
2267 PageDesc *p;
2268 target_ulong addr;
2270 /* mmap_lock should already be held. */
2271 start = start & TARGET_PAGE_MASK;
2272 end = TARGET_PAGE_ALIGN(end);
2273 if (flags & PAGE_WRITE)
2274 flags |= PAGE_WRITE_ORG;
2275 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2276 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2277 /* We may be called for host regions that are outside guest
2278 address space. */
2279 if (!p)
2280 return;
2281 /* if the write protection is set, then we invalidate the code
2282 inside */
2283 if (!(p->flags & PAGE_WRITE) &&
2284 (flags & PAGE_WRITE) &&
2285 p->first_tb) {
2286 tb_invalidate_phys_page(addr, 0, NULL);
2288 p->flags = flags;
2292 int page_check_range(target_ulong start, target_ulong len, int flags)
2294 PageDesc *p;
2295 target_ulong end;
2296 target_ulong addr;
2298 if (start + len < start)
2299 /* we've wrapped around */
2300 return -1;
2302 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2303 start = start & TARGET_PAGE_MASK;
2305 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2306 p = page_find(addr >> TARGET_PAGE_BITS);
2307 if( !p )
2308 return -1;
2309 if( !(p->flags & PAGE_VALID) )
2310 return -1;
2312 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2313 return -1;
2314 if (flags & PAGE_WRITE) {
2315 if (!(p->flags & PAGE_WRITE_ORG))
2316 return -1;
2317 /* unprotect the page if it was put read-only because it
2318 contains translated code */
2319 if (!(p->flags & PAGE_WRITE)) {
2320 if (!page_unprotect(addr, 0, NULL))
2321 return -1;
2323 return 0;
2326 return 0;
2329 /* called from signal handler: invalidate the code and unprotect the
2330 page. Return TRUE if the fault was successfully handled. */
2331 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2333 unsigned int page_index, prot, pindex;
2334 PageDesc *p, *p1;
2335 target_ulong host_start, host_end, addr;
2337 /* Technically this isn't safe inside a signal handler. However we
2338 know this only ever happens in a synchronous SEGV handler, so in
2339 practice it seems to be ok. */
2340 mmap_lock();
2342 host_start = address & qemu_host_page_mask;
2343 page_index = host_start >> TARGET_PAGE_BITS;
2344 p1 = page_find(page_index);
2345 if (!p1) {
2346 mmap_unlock();
2347 return 0;
2349 host_end = host_start + qemu_host_page_size;
2350 p = p1;
2351 prot = 0;
2352 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2353 prot |= p->flags;
2354 p++;
2356 /* if the page was really writable, then we change its
2357 protection back to writable */
2358 if (prot & PAGE_WRITE_ORG) {
2359 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2360 if (!(p1[pindex].flags & PAGE_WRITE)) {
2361 mprotect((void *)g2h(host_start), qemu_host_page_size,
2362 (prot & PAGE_BITS) | PAGE_WRITE);
2363 p1[pindex].flags |= PAGE_WRITE;
2364 /* and since the content will be modified, we must invalidate
2365 the corresponding translated code. */
2366 tb_invalidate_phys_page(address, pc, puc);
2367 #ifdef DEBUG_TB_CHECK
2368 tb_invalidate_check(address);
2369 #endif
2370 mmap_unlock();
2371 return 1;
2374 mmap_unlock();
2375 return 0;
2378 static inline void tlb_set_dirty(CPUState *env,
2379 unsigned long addr, target_ulong vaddr)
2382 #endif /* defined(CONFIG_USER_ONLY) */
2384 #if !defined(CONFIG_USER_ONLY)
2386 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2387 ram_addr_t memory, ram_addr_t region_offset);
2388 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2389 ram_addr_t orig_memory, ram_addr_t region_offset);
2390 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2391 need_subpage) \
2392 do { \
2393 if (addr > start_addr) \
2394 start_addr2 = 0; \
2395 else { \
2396 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2397 if (start_addr2 > 0) \
2398 need_subpage = 1; \
2401 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2402 end_addr2 = TARGET_PAGE_SIZE - 1; \
2403 else { \
2404 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2405 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2406 need_subpage = 1; \
2408 } while (0)
2410 /* register physical memory.
2411 For RAM, 'size' must be a multiple of the target page size.
2412 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2413 io memory page. The address used when calling the IO function is
2414 the offset from the start of the region, plus region_offset. Both
2415 start_addr and region_offset are rounded down to a page boundary
2416 before calculating this offset. This should not be a problem unless
2417 the low bits of start_addr and region_offset differ. */
2418 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2419 ram_addr_t size,
2420 ram_addr_t phys_offset,
2421 ram_addr_t region_offset)
2423 target_phys_addr_t addr, end_addr;
2424 PhysPageDesc *p;
2425 CPUState *env;
2426 ram_addr_t orig_size = size;
2427 void *subpage;
2429 if (kvm_enabled())
2430 kvm_set_phys_mem(start_addr, size, phys_offset);
2432 cpu_notify_set_memory(start_addr, size, phys_offset);
2434 if (phys_offset == IO_MEM_UNASSIGNED) {
2435 region_offset = start_addr;
2437 region_offset &= TARGET_PAGE_MASK;
2438 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2439 end_addr = start_addr + (target_phys_addr_t)size;
2440 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2441 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2442 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2443 ram_addr_t orig_memory = p->phys_offset;
2444 target_phys_addr_t start_addr2, end_addr2;
2445 int need_subpage = 0;
2447 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2448 need_subpage);
2449 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2450 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2451 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2452 &p->phys_offset, orig_memory,
2453 p->region_offset);
2454 } else {
2455 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2456 >> IO_MEM_SHIFT];
2458 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2459 region_offset);
2460 p->region_offset = 0;
2461 } else {
2462 p->phys_offset = phys_offset;
2463 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2464 (phys_offset & IO_MEM_ROMD))
2465 phys_offset += TARGET_PAGE_SIZE;
2467 } else {
2468 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2469 p->phys_offset = phys_offset;
2470 p->region_offset = region_offset;
2471 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2472 (phys_offset & IO_MEM_ROMD)) {
2473 phys_offset += TARGET_PAGE_SIZE;
2474 } else {
2475 target_phys_addr_t start_addr2, end_addr2;
2476 int need_subpage = 0;
2478 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2479 end_addr2, need_subpage);
2481 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2482 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2483 &p->phys_offset, IO_MEM_UNASSIGNED,
2484 addr & TARGET_PAGE_MASK);
2485 subpage_register(subpage, start_addr2, end_addr2,
2486 phys_offset, region_offset);
2487 p->region_offset = 0;
2491 region_offset += TARGET_PAGE_SIZE;
2494 /* since each CPU stores ram addresses in its TLB cache, we must
2495 reset the modified entries */
2496 /* XXX: slow ! */
2497 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2498 tlb_flush(env, 1);
2502 /* XXX: temporary until new memory mapping API */
2503 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2505 PhysPageDesc *p;
2507 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2508 if (!p)
2509 return IO_MEM_UNASSIGNED;
2510 return p->phys_offset;
2513 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2515 if (kvm_enabled())
2516 kvm_coalesce_mmio_region(addr, size);
2519 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2521 if (kvm_enabled())
2522 kvm_uncoalesce_mmio_region(addr, size);
2525 void qemu_flush_coalesced_mmio_buffer(void)
2527 if (kvm_enabled())
2528 kvm_flush_coalesced_mmio_buffer();
2531 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2533 RAMBlock *new_block;
2535 size = TARGET_PAGE_ALIGN(size);
2536 new_block = qemu_malloc(sizeof(*new_block));
2538 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2539 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2540 new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2541 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2542 #else
2543 new_block->host = qemu_vmalloc(size);
2544 #endif
2545 #ifdef MADV_MERGEABLE
2546 madvise(new_block->host, size, MADV_MERGEABLE);
2547 #endif
2548 new_block->offset = last_ram_offset;
2549 new_block->length = size;
2551 new_block->next = ram_blocks;
2552 ram_blocks = new_block;
2554 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2555 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2556 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2557 0xff, size >> TARGET_PAGE_BITS);
2559 last_ram_offset += size;
2561 if (kvm_enabled())
2562 kvm_setup_guest_memory(new_block->host, size);
2564 return new_block->offset;
2567 void qemu_ram_free(ram_addr_t addr)
2569 /* TODO: implement this. */
2572 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2573 With the exception of the softmmu code in this file, this should
2574 only be used for local memory (e.g. video ram) that the device owns,
2575 and knows it isn't going to access beyond the end of the block.
2577 It should not be used for general purpose DMA.
2578 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2580 void *qemu_get_ram_ptr(ram_addr_t addr)
2582 RAMBlock *prev;
2583 RAMBlock **prevp;
2584 RAMBlock *block;
2586 prev = NULL;
2587 prevp = &ram_blocks;
2588 block = ram_blocks;
2589 while (block && (block->offset > addr
2590 || block->offset + block->length <= addr)) {
2591 if (prev)
2592 prevp = &prev->next;
2593 prev = block;
2594 block = block->next;
2596 if (!block) {
2597 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2598 abort();
2600 /* Move this entry to to start of the list. */
2601 if (prev) {
2602 prev->next = block->next;
2603 block->next = *prevp;
2604 *prevp = block;
2606 return block->host + (addr - block->offset);
2609 /* Some of the softmmu routines need to translate from a host pointer
2610 (typically a TLB entry) back to a ram offset. */
2611 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2613 RAMBlock *prev;
2614 RAMBlock *block;
2615 uint8_t *host = ptr;
2617 prev = NULL;
2618 block = ram_blocks;
2619 while (block && (block->host > host
2620 || block->host + block->length <= host)) {
2621 prev = block;
2622 block = block->next;
2624 if (!block) {
2625 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2626 abort();
2628 return block->offset + (host - block->host);
2631 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2633 #ifdef DEBUG_UNASSIGNED
2634 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2635 #endif
2636 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2637 do_unassigned_access(addr, 0, 0, 0, 1);
2638 #endif
2639 return 0;
2642 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2644 #ifdef DEBUG_UNASSIGNED
2645 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2646 #endif
2647 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2648 do_unassigned_access(addr, 0, 0, 0, 2);
2649 #endif
2650 return 0;
2653 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2655 #ifdef DEBUG_UNASSIGNED
2656 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2657 #endif
2658 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2659 do_unassigned_access(addr, 0, 0, 0, 4);
2660 #endif
2661 return 0;
2664 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2666 #ifdef DEBUG_UNASSIGNED
2667 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2668 #endif
2669 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2670 do_unassigned_access(addr, 1, 0, 0, 1);
2671 #endif
2674 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2676 #ifdef DEBUG_UNASSIGNED
2677 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2678 #endif
2679 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2680 do_unassigned_access(addr, 1, 0, 0, 2);
2681 #endif
2684 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2686 #ifdef DEBUG_UNASSIGNED
2687 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2688 #endif
2689 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2690 do_unassigned_access(addr, 1, 0, 0, 4);
2691 #endif
2694 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2695 unassigned_mem_readb,
2696 unassigned_mem_readw,
2697 unassigned_mem_readl,
2700 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2701 unassigned_mem_writeb,
2702 unassigned_mem_writew,
2703 unassigned_mem_writel,
2706 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2707 uint32_t val)
2709 int dirty_flags;
2710 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2711 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2712 #if !defined(CONFIG_USER_ONLY)
2713 tb_invalidate_phys_page_fast(ram_addr, 1);
2714 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2715 #endif
2717 stb_p(qemu_get_ram_ptr(ram_addr), val);
2718 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2719 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2720 /* we remove the notdirty callback only if the code has been
2721 flushed */
2722 if (dirty_flags == 0xff)
2723 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2726 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2727 uint32_t val)
2729 int dirty_flags;
2730 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2731 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2732 #if !defined(CONFIG_USER_ONLY)
2733 tb_invalidate_phys_page_fast(ram_addr, 2);
2734 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2735 #endif
2737 stw_p(qemu_get_ram_ptr(ram_addr), val);
2738 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2739 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2740 /* we remove the notdirty callback only if the code has been
2741 flushed */
2742 if (dirty_flags == 0xff)
2743 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2746 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2747 uint32_t val)
2749 int dirty_flags;
2750 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2751 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2752 #if !defined(CONFIG_USER_ONLY)
2753 tb_invalidate_phys_page_fast(ram_addr, 4);
2754 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2755 #endif
2757 stl_p(qemu_get_ram_ptr(ram_addr), val);
2758 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2759 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2760 /* we remove the notdirty callback only if the code has been
2761 flushed */
2762 if (dirty_flags == 0xff)
2763 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2766 static CPUReadMemoryFunc * const error_mem_read[3] = {
2767 NULL, /* never used */
2768 NULL, /* never used */
2769 NULL, /* never used */
2772 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2773 notdirty_mem_writeb,
2774 notdirty_mem_writew,
2775 notdirty_mem_writel,
2778 /* Generate a debug exception if a watchpoint has been hit. */
2779 static void check_watchpoint(int offset, int len_mask, int flags)
2781 CPUState *env = cpu_single_env;
2782 target_ulong pc, cs_base;
2783 TranslationBlock *tb;
2784 target_ulong vaddr;
2785 CPUWatchpoint *wp;
2786 int cpu_flags;
2788 if (env->watchpoint_hit) {
2789 /* We re-entered the check after replacing the TB. Now raise
2790 * the debug interrupt so that is will trigger after the
2791 * current instruction. */
2792 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2793 return;
2795 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2796 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2797 if ((vaddr == (wp->vaddr & len_mask) ||
2798 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2799 wp->flags |= BP_WATCHPOINT_HIT;
2800 if (!env->watchpoint_hit) {
2801 env->watchpoint_hit = wp;
2802 tb = tb_find_pc(env->mem_io_pc);
2803 if (!tb) {
2804 cpu_abort(env, "check_watchpoint: could not find TB for "
2805 "pc=%p", (void *)env->mem_io_pc);
2807 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2808 tb_phys_invalidate(tb, -1);
2809 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2810 env->exception_index = EXCP_DEBUG;
2811 } else {
2812 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2813 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2815 cpu_resume_from_signal(env, NULL);
2817 } else {
2818 wp->flags &= ~BP_WATCHPOINT_HIT;
2823 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2824 so these check for a hit then pass through to the normal out-of-line
2825 phys routines. */
2826 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2828 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2829 return ldub_phys(addr);
2832 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2834 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2835 return lduw_phys(addr);
2838 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2840 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2841 return ldl_phys(addr);
2844 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2845 uint32_t val)
2847 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2848 stb_phys(addr, val);
2851 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2852 uint32_t val)
2854 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2855 stw_phys(addr, val);
2858 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2859 uint32_t val)
2861 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2862 stl_phys(addr, val);
2865 static CPUReadMemoryFunc * const watch_mem_read[3] = {
2866 watch_mem_readb,
2867 watch_mem_readw,
2868 watch_mem_readl,
2871 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2872 watch_mem_writeb,
2873 watch_mem_writew,
2874 watch_mem_writel,
2877 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2878 unsigned int len)
2880 uint32_t ret;
2881 unsigned int idx;
2883 idx = SUBPAGE_IDX(addr);
2884 #if defined(DEBUG_SUBPAGE)
2885 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2886 mmio, len, addr, idx);
2887 #endif
2888 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2889 addr + mmio->region_offset[idx][0][len]);
2891 return ret;
2894 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2895 uint32_t value, unsigned int len)
2897 unsigned int idx;
2899 idx = SUBPAGE_IDX(addr);
2900 #if defined(DEBUG_SUBPAGE)
2901 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2902 mmio, len, addr, idx, value);
2903 #endif
2904 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2905 addr + mmio->region_offset[idx][1][len],
2906 value);
2909 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2911 #if defined(DEBUG_SUBPAGE)
2912 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2913 #endif
2915 return subpage_readlen(opaque, addr, 0);
2918 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2919 uint32_t value)
2921 #if defined(DEBUG_SUBPAGE)
2922 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2923 #endif
2924 subpage_writelen(opaque, addr, value, 0);
2927 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2929 #if defined(DEBUG_SUBPAGE)
2930 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2931 #endif
2933 return subpage_readlen(opaque, addr, 1);
2936 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2937 uint32_t value)
2939 #if defined(DEBUG_SUBPAGE)
2940 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2941 #endif
2942 subpage_writelen(opaque, addr, value, 1);
2945 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2947 #if defined(DEBUG_SUBPAGE)
2948 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2949 #endif
2951 return subpage_readlen(opaque, addr, 2);
2954 static void subpage_writel (void *opaque,
2955 target_phys_addr_t addr, uint32_t value)
2957 #if defined(DEBUG_SUBPAGE)
2958 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2959 #endif
2960 subpage_writelen(opaque, addr, value, 2);
2963 static CPUReadMemoryFunc * const subpage_read[] = {
2964 &subpage_readb,
2965 &subpage_readw,
2966 &subpage_readl,
2969 static CPUWriteMemoryFunc * const subpage_write[] = {
2970 &subpage_writeb,
2971 &subpage_writew,
2972 &subpage_writel,
2975 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2976 ram_addr_t memory, ram_addr_t region_offset)
2978 int idx, eidx;
2979 unsigned int i;
2981 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2982 return -1;
2983 idx = SUBPAGE_IDX(start);
2984 eidx = SUBPAGE_IDX(end);
2985 #if defined(DEBUG_SUBPAGE)
2986 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2987 mmio, start, end, idx, eidx, memory);
2988 #endif
2989 memory >>= IO_MEM_SHIFT;
2990 for (; idx <= eidx; idx++) {
2991 for (i = 0; i < 4; i++) {
2992 if (io_mem_read[memory][i]) {
2993 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2994 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2995 mmio->region_offset[idx][0][i] = region_offset;
2997 if (io_mem_write[memory][i]) {
2998 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2999 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3000 mmio->region_offset[idx][1][i] = region_offset;
3005 return 0;
3008 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3009 ram_addr_t orig_memory, ram_addr_t region_offset)
3011 subpage_t *mmio;
3012 int subpage_memory;
3014 mmio = qemu_mallocz(sizeof(subpage_t));
3016 mmio->base = base;
3017 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3018 #if defined(DEBUG_SUBPAGE)
3019 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3020 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3021 #endif
3022 *phys = subpage_memory | IO_MEM_SUBPAGE;
3023 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3024 region_offset);
3026 return mmio;
3029 static int get_free_io_mem_idx(void)
3031 int i;
3033 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3034 if (!io_mem_used[i]) {
3035 io_mem_used[i] = 1;
3036 return i;
3038 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3039 return -1;
3042 /* mem_read and mem_write are arrays of functions containing the
3043 function to access byte (index 0), word (index 1) and dword (index
3044 2). Functions can be omitted with a NULL function pointer.
3045 If io_index is non zero, the corresponding io zone is
3046 modified. If it is zero, a new io zone is allocated. The return
3047 value can be used with cpu_register_physical_memory(). (-1) is
3048 returned if error. */
3049 static int cpu_register_io_memory_fixed(int io_index,
3050 CPUReadMemoryFunc * const *mem_read,
3051 CPUWriteMemoryFunc * const *mem_write,
3052 void *opaque)
3054 int i, subwidth = 0;
3056 if (io_index <= 0) {
3057 io_index = get_free_io_mem_idx();
3058 if (io_index == -1)
3059 return io_index;
3060 } else {
3061 io_index >>= IO_MEM_SHIFT;
3062 if (io_index >= IO_MEM_NB_ENTRIES)
3063 return -1;
3066 for(i = 0;i < 3; i++) {
3067 if (!mem_read[i] || !mem_write[i])
3068 subwidth = IO_MEM_SUBWIDTH;
3069 io_mem_read[io_index][i] = mem_read[i];
3070 io_mem_write[io_index][i] = mem_write[i];
3072 io_mem_opaque[io_index] = opaque;
3073 return (io_index << IO_MEM_SHIFT) | subwidth;
3076 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3077 CPUWriteMemoryFunc * const *mem_write,
3078 void *opaque)
3080 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3083 void cpu_unregister_io_memory(int io_table_address)
3085 int i;
3086 int io_index = io_table_address >> IO_MEM_SHIFT;
3088 for (i=0;i < 3; i++) {
3089 io_mem_read[io_index][i] = unassigned_mem_read[i];
3090 io_mem_write[io_index][i] = unassigned_mem_write[i];
3092 io_mem_opaque[io_index] = NULL;
3093 io_mem_used[io_index] = 0;
3096 static void io_mem_init(void)
3098 int i;
3100 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3101 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3102 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3103 for (i=0; i<5; i++)
3104 io_mem_used[i] = 1;
3106 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3107 watch_mem_write, NULL);
3110 #endif /* !defined(CONFIG_USER_ONLY) */
3112 /* physical memory access (slow version, mainly for debug) */
3113 #if defined(CONFIG_USER_ONLY)
3114 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3115 int len, int is_write)
3117 int l, flags;
3118 target_ulong page;
3119 void * p;
3121 while (len > 0) {
3122 page = addr & TARGET_PAGE_MASK;
3123 l = (page + TARGET_PAGE_SIZE) - addr;
3124 if (l > len)
3125 l = len;
3126 flags = page_get_flags(page);
3127 if (!(flags & PAGE_VALID))
3128 return;
3129 if (is_write) {
3130 if (!(flags & PAGE_WRITE))
3131 return;
3132 /* XXX: this code should not depend on lock_user */
3133 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3134 /* FIXME - should this return an error rather than just fail? */
3135 return;
3136 memcpy(p, buf, l);
3137 unlock_user(p, addr, l);
3138 } else {
3139 if (!(flags & PAGE_READ))
3140 return;
3141 /* XXX: this code should not depend on lock_user */
3142 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3143 /* FIXME - should this return an error rather than just fail? */
3144 return;
3145 memcpy(buf, p, l);
3146 unlock_user(p, addr, 0);
3148 len -= l;
3149 buf += l;
3150 addr += l;
3154 #else
3155 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3156 int len, int is_write)
3158 int l, io_index;
3159 uint8_t *ptr;
3160 uint32_t val;
3161 target_phys_addr_t page;
3162 unsigned long pd;
3163 PhysPageDesc *p;
3165 while (len > 0) {
3166 page = addr & TARGET_PAGE_MASK;
3167 l = (page + TARGET_PAGE_SIZE) - addr;
3168 if (l > len)
3169 l = len;
3170 p = phys_page_find(page >> TARGET_PAGE_BITS);
3171 if (!p) {
3172 pd = IO_MEM_UNASSIGNED;
3173 } else {
3174 pd = p->phys_offset;
3177 if (is_write) {
3178 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3179 target_phys_addr_t addr1 = addr;
3180 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3181 if (p)
3182 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3183 /* XXX: could force cpu_single_env to NULL to avoid
3184 potential bugs */
3185 if (l >= 4 && ((addr1 & 3) == 0)) {
3186 /* 32 bit write access */
3187 val = ldl_p(buf);
3188 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3189 l = 4;
3190 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3191 /* 16 bit write access */
3192 val = lduw_p(buf);
3193 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3194 l = 2;
3195 } else {
3196 /* 8 bit write access */
3197 val = ldub_p(buf);
3198 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3199 l = 1;
3201 } else {
3202 unsigned long addr1;
3203 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3204 /* RAM case */
3205 ptr = qemu_get_ram_ptr(addr1);
3206 memcpy(ptr, buf, l);
3207 if (!cpu_physical_memory_is_dirty(addr1)) {
3208 /* invalidate code */
3209 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3210 /* set dirty bit */
3211 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3212 (0xff & ~CODE_DIRTY_FLAG);
3215 } else {
3216 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3217 !(pd & IO_MEM_ROMD)) {
3218 target_phys_addr_t addr1 = addr;
3219 /* I/O case */
3220 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3221 if (p)
3222 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3223 if (l >= 4 && ((addr1 & 3) == 0)) {
3224 /* 32 bit read access */
3225 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3226 stl_p(buf, val);
3227 l = 4;
3228 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3229 /* 16 bit read access */
3230 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3231 stw_p(buf, val);
3232 l = 2;
3233 } else {
3234 /* 8 bit read access */
3235 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3236 stb_p(buf, val);
3237 l = 1;
3239 } else {
3240 /* RAM case */
3241 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3242 (addr & ~TARGET_PAGE_MASK);
3243 memcpy(buf, ptr, l);
3246 len -= l;
3247 buf += l;
3248 addr += l;
3252 /* used for ROM loading : can write in RAM and ROM */
3253 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3254 const uint8_t *buf, int len)
3256 int l;
3257 uint8_t *ptr;
3258 target_phys_addr_t page;
3259 unsigned long pd;
3260 PhysPageDesc *p;
3262 while (len > 0) {
3263 page = addr & TARGET_PAGE_MASK;
3264 l = (page + TARGET_PAGE_SIZE) - addr;
3265 if (l > len)
3266 l = len;
3267 p = phys_page_find(page >> TARGET_PAGE_BITS);
3268 if (!p) {
3269 pd = IO_MEM_UNASSIGNED;
3270 } else {
3271 pd = p->phys_offset;
3274 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3275 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3276 !(pd & IO_MEM_ROMD)) {
3277 /* do nothing */
3278 } else {
3279 unsigned long addr1;
3280 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3281 /* ROM/RAM case */
3282 ptr = qemu_get_ram_ptr(addr1);
3283 memcpy(ptr, buf, l);
3285 len -= l;
3286 buf += l;
3287 addr += l;
3291 typedef struct {
3292 void *buffer;
3293 target_phys_addr_t addr;
3294 target_phys_addr_t len;
3295 } BounceBuffer;
3297 static BounceBuffer bounce;
3299 typedef struct MapClient {
3300 void *opaque;
3301 void (*callback)(void *opaque);
3302 QLIST_ENTRY(MapClient) link;
3303 } MapClient;
3305 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3306 = QLIST_HEAD_INITIALIZER(map_client_list);
3308 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3310 MapClient *client = qemu_malloc(sizeof(*client));
3312 client->opaque = opaque;
3313 client->callback = callback;
3314 QLIST_INSERT_HEAD(&map_client_list, client, link);
3315 return client;
3318 void cpu_unregister_map_client(void *_client)
3320 MapClient *client = (MapClient *)_client;
3322 QLIST_REMOVE(client, link);
3323 qemu_free(client);
3326 static void cpu_notify_map_clients(void)
3328 MapClient *client;
3330 while (!QLIST_EMPTY(&map_client_list)) {
3331 client = QLIST_FIRST(&map_client_list);
3332 client->callback(client->opaque);
3333 cpu_unregister_map_client(client);
3337 /* Map a physical memory region into a host virtual address.
3338 * May map a subset of the requested range, given by and returned in *plen.
3339 * May return NULL if resources needed to perform the mapping are exhausted.
3340 * Use only for reads OR writes - not for read-modify-write operations.
3341 * Use cpu_register_map_client() to know when retrying the map operation is
3342 * likely to succeed.
3344 void *cpu_physical_memory_map(target_phys_addr_t addr,
3345 target_phys_addr_t *plen,
3346 int is_write)
3348 target_phys_addr_t len = *plen;
3349 target_phys_addr_t done = 0;
3350 int l;
3351 uint8_t *ret = NULL;
3352 uint8_t *ptr;
3353 target_phys_addr_t page;
3354 unsigned long pd;
3355 PhysPageDesc *p;
3356 unsigned long addr1;
3358 while (len > 0) {
3359 page = addr & TARGET_PAGE_MASK;
3360 l = (page + TARGET_PAGE_SIZE) - addr;
3361 if (l > len)
3362 l = len;
3363 p = phys_page_find(page >> TARGET_PAGE_BITS);
3364 if (!p) {
3365 pd = IO_MEM_UNASSIGNED;
3366 } else {
3367 pd = p->phys_offset;
3370 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3371 if (done || bounce.buffer) {
3372 break;
3374 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3375 bounce.addr = addr;
3376 bounce.len = l;
3377 if (!is_write) {
3378 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3380 ptr = bounce.buffer;
3381 } else {
3382 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3383 ptr = qemu_get_ram_ptr(addr1);
3385 if (!done) {
3386 ret = ptr;
3387 } else if (ret + done != ptr) {
3388 break;
3391 len -= l;
3392 addr += l;
3393 done += l;
3395 *plen = done;
3396 return ret;
3399 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3400 * Will also mark the memory as dirty if is_write == 1. access_len gives
3401 * the amount of memory that was actually read or written by the caller.
3403 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3404 int is_write, target_phys_addr_t access_len)
3406 if (buffer != bounce.buffer) {
3407 if (is_write) {
3408 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3409 while (access_len) {
3410 unsigned l;
3411 l = TARGET_PAGE_SIZE;
3412 if (l > access_len)
3413 l = access_len;
3414 if (!cpu_physical_memory_is_dirty(addr1)) {
3415 /* invalidate code */
3416 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3417 /* set dirty bit */
3418 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3419 (0xff & ~CODE_DIRTY_FLAG);
3421 addr1 += l;
3422 access_len -= l;
3425 return;
3427 if (is_write) {
3428 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3430 qemu_vfree(bounce.buffer);
3431 bounce.buffer = NULL;
3432 cpu_notify_map_clients();
3435 /* warning: addr must be aligned */
3436 uint32_t ldl_phys(target_phys_addr_t addr)
3438 int io_index;
3439 uint8_t *ptr;
3440 uint32_t val;
3441 unsigned long pd;
3442 PhysPageDesc *p;
3444 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3445 if (!p) {
3446 pd = IO_MEM_UNASSIGNED;
3447 } else {
3448 pd = p->phys_offset;
3451 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3452 !(pd & IO_MEM_ROMD)) {
3453 /* I/O case */
3454 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3455 if (p)
3456 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3457 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3458 } else {
3459 /* RAM case */
3460 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3461 (addr & ~TARGET_PAGE_MASK);
3462 val = ldl_p(ptr);
3464 return val;
3467 /* warning: addr must be aligned */
3468 uint64_t ldq_phys(target_phys_addr_t addr)
3470 int io_index;
3471 uint8_t *ptr;
3472 uint64_t val;
3473 unsigned long pd;
3474 PhysPageDesc *p;
3476 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3477 if (!p) {
3478 pd = IO_MEM_UNASSIGNED;
3479 } else {
3480 pd = p->phys_offset;
3483 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3484 !(pd & IO_MEM_ROMD)) {
3485 /* I/O case */
3486 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3487 if (p)
3488 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3489 #ifdef TARGET_WORDS_BIGENDIAN
3490 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3491 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3492 #else
3493 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3494 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3495 #endif
3496 } else {
3497 /* RAM case */
3498 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3499 (addr & ~TARGET_PAGE_MASK);
3500 val = ldq_p(ptr);
3502 return val;
3505 /* XXX: optimize */
3506 uint32_t ldub_phys(target_phys_addr_t addr)
3508 uint8_t val;
3509 cpu_physical_memory_read(addr, &val, 1);
3510 return val;
3513 /* XXX: optimize */
3514 uint32_t lduw_phys(target_phys_addr_t addr)
3516 uint16_t val;
3517 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3518 return tswap16(val);
3521 /* warning: addr must be aligned. The ram page is not masked as dirty
3522 and the code inside is not invalidated. It is useful if the dirty
3523 bits are used to track modified PTEs */
3524 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3526 int io_index;
3527 uint8_t *ptr;
3528 unsigned long pd;
3529 PhysPageDesc *p;
3531 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3532 if (!p) {
3533 pd = IO_MEM_UNASSIGNED;
3534 } else {
3535 pd = p->phys_offset;
3538 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3539 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3540 if (p)
3541 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3542 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3543 } else {
3544 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3545 ptr = qemu_get_ram_ptr(addr1);
3546 stl_p(ptr, val);
3548 if (unlikely(in_migration)) {
3549 if (!cpu_physical_memory_is_dirty(addr1)) {
3550 /* invalidate code */
3551 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3552 /* set dirty bit */
3553 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3554 (0xff & ~CODE_DIRTY_FLAG);
3560 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3562 int io_index;
3563 uint8_t *ptr;
3564 unsigned long pd;
3565 PhysPageDesc *p;
3567 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3568 if (!p) {
3569 pd = IO_MEM_UNASSIGNED;
3570 } else {
3571 pd = p->phys_offset;
3574 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3575 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3576 if (p)
3577 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3578 #ifdef TARGET_WORDS_BIGENDIAN
3579 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3580 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3581 #else
3582 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3583 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3584 #endif
3585 } else {
3586 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3587 (addr & ~TARGET_PAGE_MASK);
3588 stq_p(ptr, val);
3592 /* warning: addr must be aligned */
3593 void stl_phys(target_phys_addr_t addr, uint32_t val)
3595 int io_index;
3596 uint8_t *ptr;
3597 unsigned long pd;
3598 PhysPageDesc *p;
3600 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3601 if (!p) {
3602 pd = IO_MEM_UNASSIGNED;
3603 } else {
3604 pd = p->phys_offset;
3607 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3608 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3609 if (p)
3610 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3611 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3612 } else {
3613 unsigned long addr1;
3614 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3615 /* RAM case */
3616 ptr = qemu_get_ram_ptr(addr1);
3617 stl_p(ptr, val);
3618 if (!cpu_physical_memory_is_dirty(addr1)) {
3619 /* invalidate code */
3620 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3621 /* set dirty bit */
3622 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3623 (0xff & ~CODE_DIRTY_FLAG);
3628 /* XXX: optimize */
3629 void stb_phys(target_phys_addr_t addr, uint32_t val)
3631 uint8_t v = val;
3632 cpu_physical_memory_write(addr, &v, 1);
3635 /* XXX: optimize */
3636 void stw_phys(target_phys_addr_t addr, uint32_t val)
3638 uint16_t v = tswap16(val);
3639 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3642 /* XXX: optimize */
3643 void stq_phys(target_phys_addr_t addr, uint64_t val)
3645 val = tswap64(val);
3646 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3649 #endif
3651 /* virtual memory access for debug (includes writing to ROM) */
3652 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3653 uint8_t *buf, int len, int is_write)
3655 int l;
3656 target_phys_addr_t phys_addr;
3657 target_ulong page;
3659 while (len > 0) {
3660 page = addr & TARGET_PAGE_MASK;
3661 phys_addr = cpu_get_phys_page_debug(env, page);
3662 /* if no physical page mapped, return an error */
3663 if (phys_addr == -1)
3664 return -1;
3665 l = (page + TARGET_PAGE_SIZE) - addr;
3666 if (l > len)
3667 l = len;
3668 phys_addr += (addr & ~TARGET_PAGE_MASK);
3669 #if !defined(CONFIG_USER_ONLY)
3670 if (is_write)
3671 cpu_physical_memory_write_rom(phys_addr, buf, l);
3672 else
3673 #endif
3674 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3675 len -= l;
3676 buf += l;
3677 addr += l;
3679 return 0;
3682 /* in deterministic execution mode, instructions doing device I/Os
3683 must be at the end of the TB */
3684 void cpu_io_recompile(CPUState *env, void *retaddr)
3686 TranslationBlock *tb;
3687 uint32_t n, cflags;
3688 target_ulong pc, cs_base;
3689 uint64_t flags;
3691 tb = tb_find_pc((unsigned long)retaddr);
3692 if (!tb) {
3693 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3694 retaddr);
3696 n = env->icount_decr.u16.low + tb->icount;
3697 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3698 /* Calculate how many instructions had been executed before the fault
3699 occurred. */
3700 n = n - env->icount_decr.u16.low;
3701 /* Generate a new TB ending on the I/O insn. */
3702 n++;
3703 /* On MIPS and SH, delay slot instructions can only be restarted if
3704 they were already the first instruction in the TB. If this is not
3705 the first instruction in a TB then re-execute the preceding
3706 branch. */
3707 #if defined(TARGET_MIPS)
3708 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3709 env->active_tc.PC -= 4;
3710 env->icount_decr.u16.low++;
3711 env->hflags &= ~MIPS_HFLAG_BMASK;
3713 #elif defined(TARGET_SH4)
3714 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3715 && n > 1) {
3716 env->pc -= 2;
3717 env->icount_decr.u16.low++;
3718 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3720 #endif
3721 /* This should never happen. */
3722 if (n > CF_COUNT_MASK)
3723 cpu_abort(env, "TB too big during recompile");
3725 cflags = n | CF_LAST_IO;
3726 pc = tb->pc;
3727 cs_base = tb->cs_base;
3728 flags = tb->flags;
3729 tb_phys_invalidate(tb, -1);
3730 /* FIXME: In theory this could raise an exception. In practice
3731 we have already translated the block once so it's probably ok. */
3732 tb_gen_code(env, pc, cs_base, flags, cflags);
3733 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3734 the first in the TB) then we end up generating a whole new TB and
3735 repeating the fault, which is horribly inefficient.
3736 Better would be to execute just this insn uncached, or generate a
3737 second new TB. */
3738 cpu_resume_from_signal(env, NULL);
3741 void dump_exec_info(FILE *f,
3742 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3744 int i, target_code_size, max_target_code_size;
3745 int direct_jmp_count, direct_jmp2_count, cross_page;
3746 TranslationBlock *tb;
3748 target_code_size = 0;
3749 max_target_code_size = 0;
3750 cross_page = 0;
3751 direct_jmp_count = 0;
3752 direct_jmp2_count = 0;
3753 for(i = 0; i < nb_tbs; i++) {
3754 tb = &tbs[i];
3755 target_code_size += tb->size;
3756 if (tb->size > max_target_code_size)
3757 max_target_code_size = tb->size;
3758 if (tb->page_addr[1] != -1)
3759 cross_page++;
3760 if (tb->tb_next_offset[0] != 0xffff) {
3761 direct_jmp_count++;
3762 if (tb->tb_next_offset[1] != 0xffff) {
3763 direct_jmp2_count++;
3767 /* XXX: avoid using doubles ? */
3768 cpu_fprintf(f, "Translation buffer state:\n");
3769 cpu_fprintf(f, "gen code size %ld/%ld\n",
3770 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3771 cpu_fprintf(f, "TB count %d/%d\n",
3772 nb_tbs, code_gen_max_blocks);
3773 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3774 nb_tbs ? target_code_size / nb_tbs : 0,
3775 max_target_code_size);
3776 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3777 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3778 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3779 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3780 cross_page,
3781 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3782 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3783 direct_jmp_count,
3784 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3785 direct_jmp2_count,
3786 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3787 cpu_fprintf(f, "\nStatistics:\n");
3788 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3789 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3790 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3791 tcg_dump_info(f, cpu_fprintf);
3794 #if !defined(CONFIG_USER_ONLY)
3796 #define MMUSUFFIX _cmmu
3797 #define GETPC() NULL
3798 #define env cpu_single_env
3799 #define SOFTMMU_CODE_ACCESS
3801 #define SHIFT 0
3802 #include "softmmu_template.h"
3804 #define SHIFT 1
3805 #include "softmmu_template.h"
3807 #define SHIFT 2
3808 #include "softmmu_template.h"
3810 #define SHIFT 3
3811 #include "softmmu_template.h"
3813 #undef env
3815 #endif