Merge branch 'master' of git://git.sv.gnu.org/qemu
[qemu-kvm/fedora.git] / exec.c
blobe241f05475c0f5349e5e090de651ce468f8f93b3
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #include "qemu-common.h"
39 #if !defined(TARGET_IA64)
40 #include "tcg.h"
41 #endif
42 #include "qemu-kvm.h"
44 #include "hw/hw.h"
45 #include "osdep.h"
46 #include "kvm.h"
47 #if defined(CONFIG_USER_ONLY)
48 #include <qemu.h>
49 #endif
51 //#define DEBUG_TB_INVALIDATE
52 //#define DEBUG_FLUSH
53 //#define DEBUG_TLB
54 //#define DEBUG_UNASSIGNED
56 /* make various TB consistency checks */
57 //#define DEBUG_TB_CHECK
58 //#define DEBUG_TLB_CHECK
60 //#define DEBUG_IOPORT
61 //#define DEBUG_SUBPAGE
63 #if !defined(CONFIG_USER_ONLY)
64 /* TB consistency checks only implemented for usermode emulation. */
65 #undef DEBUG_TB_CHECK
66 #endif
68 #define SMC_BITMAP_USE_THRESHOLD 10
70 #if defined(TARGET_SPARC64)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 41
72 #elif defined(TARGET_SPARC)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 36
74 #elif defined(TARGET_ALPHA)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #define TARGET_VIRT_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_PPC64)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 #elif defined(TARGET_IA64)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 36
85 #else
86 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
87 #define TARGET_PHYS_ADDR_SPACE_BITS 32
88 #endif
90 static TranslationBlock *tbs;
91 int code_gen_max_blocks;
92 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
93 static int nb_tbs;
94 /* any access to the tbs or the page table must use this lock */
95 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
97 #if defined(__arm__) || defined(__sparc_v9__)
98 /* The prologue must be reachable with a direct jump. ARM and Sparc64
99 have limited branch ranges (possibly also PPC) so place it in a
100 section close to code segment. */
101 #define code_gen_section \
102 __attribute__((__section__(".gen_code"))) \
103 __attribute__((aligned (32)))
104 #else
105 #define code_gen_section \
106 __attribute__((aligned (32)))
107 #endif
109 uint8_t code_gen_prologue[1024] code_gen_section;
110 static uint8_t *code_gen_buffer;
111 static unsigned long code_gen_buffer_size;
112 /* threshold to flush the translated code buffer */
113 static unsigned long code_gen_buffer_max_size;
114 uint8_t *code_gen_ptr;
116 #if !defined(CONFIG_USER_ONLY)
117 int phys_ram_fd;
118 uint8_t *phys_ram_dirty;
119 uint8_t *bios_mem;
120 static int in_migration;
122 typedef struct RAMBlock {
123 uint8_t *host;
124 ram_addr_t offset;
125 ram_addr_t length;
126 struct RAMBlock *next;
127 } RAMBlock;
129 static RAMBlock *ram_blocks;
130 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
131 then we can no longer assume contiguous ram offsets, and external uses
132 of this variable will break. */
133 ram_addr_t last_ram_offset;
134 #endif
136 CPUState *first_cpu;
137 /* current CPU in the current thread. It is only valid inside
138 cpu_exec() */
139 CPUState *cpu_single_env;
140 /* 0 = Do not count executed instructions.
141 1 = Precise instruction counting.
142 2 = Adaptive rate instruction counting. */
143 int use_icount = 0;
144 /* Current instruction counter. While executing translated code this may
145 include some instructions that have not yet been executed. */
146 int64_t qemu_icount;
148 typedef struct PageDesc {
149 /* list of TBs intersecting this ram page */
150 TranslationBlock *first_tb;
151 /* in order to optimize self modifying code, we count the number
152 of lookups we do to a given page to use a bitmap */
153 unsigned int code_write_count;
154 uint8_t *code_bitmap;
155 #if defined(CONFIG_USER_ONLY)
156 unsigned long flags;
157 #endif
158 } PageDesc;
160 typedef struct PhysPageDesc {
161 /* offset in host memory of the page + io_index in the low bits */
162 ram_addr_t phys_offset;
163 ram_addr_t region_offset;
164 } PhysPageDesc;
166 #define L2_BITS 10
167 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
168 /* XXX: this is a temporary hack for alpha target.
169 * In the future, this is to be replaced by a multi-level table
170 * to actually be able to handle the complete 64 bits address space.
172 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
173 #else
174 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
175 #endif
177 #define L1_SIZE (1 << L1_BITS)
178 #define L2_SIZE (1 << L2_BITS)
180 unsigned long qemu_real_host_page_size;
181 unsigned long qemu_host_page_bits;
182 unsigned long qemu_host_page_size;
183 unsigned long qemu_host_page_mask;
185 /* XXX: for system emulation, it could just be an array */
186 static PageDesc *l1_map[L1_SIZE];
187 static PhysPageDesc **l1_phys_map;
189 #if !defined(CONFIG_USER_ONLY)
190 static void io_mem_init(void);
192 /* io memory support */
193 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
194 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
195 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
196 static char io_mem_used[IO_MEM_NB_ENTRIES];
197 static int io_mem_watch;
198 #endif
200 /* log support */
201 static const char *logfilename = "/tmp/qemu.log";
202 FILE *logfile;
203 int loglevel;
204 static int log_append = 0;
206 /* statistics */
207 static int tlb_flush_count;
208 static int tb_flush_count;
209 static int tb_phys_invalidate_count;
211 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
212 typedef struct subpage_t {
213 target_phys_addr_t base;
214 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
215 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
216 void *opaque[TARGET_PAGE_SIZE][2][4];
217 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
218 } subpage_t;
220 #ifdef _WIN32
221 static void map_exec(void *addr, long size)
223 DWORD old_protect;
224 VirtualProtect(addr, size,
225 PAGE_EXECUTE_READWRITE, &old_protect);
228 #else
229 static void map_exec(void *addr, long size)
231 unsigned long start, end, page_size;
233 page_size = getpagesize();
234 start = (unsigned long)addr;
235 start &= ~(page_size - 1);
237 end = (unsigned long)addr + size;
238 end += page_size - 1;
239 end &= ~(page_size - 1);
241 mprotect((void *)start, end - start,
242 PROT_READ | PROT_WRITE | PROT_EXEC);
244 #endif
246 static void page_init(void)
248 /* NOTE: we can always suppose that qemu_host_page_size >=
249 TARGET_PAGE_SIZE */
250 #ifdef _WIN32
252 SYSTEM_INFO system_info;
254 GetSystemInfo(&system_info);
255 qemu_real_host_page_size = system_info.dwPageSize;
257 #else
258 qemu_real_host_page_size = getpagesize();
259 #endif
260 if (qemu_host_page_size == 0)
261 qemu_host_page_size = qemu_real_host_page_size;
262 if (qemu_host_page_size < TARGET_PAGE_SIZE)
263 qemu_host_page_size = TARGET_PAGE_SIZE;
264 qemu_host_page_bits = 0;
265 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
266 qemu_host_page_bits++;
267 qemu_host_page_mask = ~(qemu_host_page_size - 1);
268 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
269 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
271 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
273 long long startaddr, endaddr;
274 FILE *f;
275 int n;
277 mmap_lock();
278 last_brk = (unsigned long)sbrk(0);
279 f = fopen("/proc/self/maps", "r");
280 if (f) {
281 do {
282 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
283 if (n == 2) {
284 startaddr = MIN(startaddr,
285 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
286 endaddr = MIN(endaddr,
287 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
288 page_set_flags(startaddr & TARGET_PAGE_MASK,
289 TARGET_PAGE_ALIGN(endaddr),
290 PAGE_RESERVED);
292 } while (!feof(f));
293 fclose(f);
295 mmap_unlock();
297 #endif
300 static inline PageDesc **page_l1_map(target_ulong index)
302 #if TARGET_LONG_BITS > 32
303 /* Host memory outside guest VM. For 32-bit targets we have already
304 excluded high addresses. */
305 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
306 return NULL;
307 #endif
308 return &l1_map[index >> L2_BITS];
311 static inline PageDesc *page_find_alloc(target_ulong index)
313 PageDesc **lp, *p;
314 lp = page_l1_map(index);
315 if (!lp)
316 return NULL;
318 p = *lp;
319 if (!p) {
320 /* allocate if not found */
321 #if defined(CONFIG_USER_ONLY)
322 size_t len = sizeof(PageDesc) * L2_SIZE;
323 /* Don't use qemu_malloc because it may recurse. */
324 p = mmap(0, len, PROT_READ | PROT_WRITE,
325 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
326 *lp = p;
327 if (h2g_valid(p)) {
328 unsigned long addr = h2g(p);
329 page_set_flags(addr & TARGET_PAGE_MASK,
330 TARGET_PAGE_ALIGN(addr + len),
331 PAGE_RESERVED);
333 #else
334 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
335 *lp = p;
336 #endif
338 return p + (index & (L2_SIZE - 1));
341 static inline PageDesc *page_find(target_ulong index)
343 PageDesc **lp, *p;
344 lp = page_l1_map(index);
345 if (!lp)
346 return NULL;
348 p = *lp;
349 if (!p)
350 return 0;
351 return p + (index & (L2_SIZE - 1));
354 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
356 void **lp, **p;
357 PhysPageDesc *pd;
359 p = (void **)l1_phys_map;
360 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
362 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
363 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
364 #endif
365 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
366 p = *lp;
367 if (!p) {
368 /* allocate if not found */
369 if (!alloc)
370 return NULL;
371 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
372 memset(p, 0, sizeof(void *) * L1_SIZE);
373 *lp = p;
375 #endif
376 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
377 pd = *lp;
378 if (!pd) {
379 int i;
380 /* allocate if not found */
381 if (!alloc)
382 return NULL;
383 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
384 *lp = pd;
385 for (i = 0; i < L2_SIZE; i++) {
386 pd[i].phys_offset = IO_MEM_UNASSIGNED;
387 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
390 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
393 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
395 return phys_page_find_alloc(index, 0);
398 #if !defined(CONFIG_USER_ONLY)
399 static void tlb_protect_code(ram_addr_t ram_addr);
400 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
401 target_ulong vaddr);
402 #define mmap_lock() do { } while(0)
403 #define mmap_unlock() do { } while(0)
404 #endif
406 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
408 #if defined(CONFIG_USER_ONLY)
409 /* Currently it is not recommended to allocate big chunks of data in
410 user mode. It will change when a dedicated libc will be used */
411 #define USE_STATIC_CODE_GEN_BUFFER
412 #endif
414 #ifdef USE_STATIC_CODE_GEN_BUFFER
415 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
416 #endif
418 static void code_gen_alloc(unsigned long tb_size)
420 if (kvm_enabled())
421 return;
423 #ifdef USE_STATIC_CODE_GEN_BUFFER
424 code_gen_buffer = static_code_gen_buffer;
425 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
426 map_exec(code_gen_buffer, code_gen_buffer_size);
427 #else
428 code_gen_buffer_size = tb_size;
429 if (code_gen_buffer_size == 0) {
430 #if defined(CONFIG_USER_ONLY)
431 /* in user mode, phys_ram_size is not meaningful */
432 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
433 #else
434 /* XXX: needs adjustments */
435 code_gen_buffer_size = (unsigned long)(ram_size / 4);
436 #endif
438 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
439 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
440 /* The code gen buffer location may have constraints depending on
441 the host cpu and OS */
442 #if defined(__linux__)
444 int flags;
445 void *start = NULL;
447 flags = MAP_PRIVATE | MAP_ANONYMOUS;
448 #if defined(__x86_64__)
449 flags |= MAP_32BIT;
450 /* Cannot map more than that */
451 if (code_gen_buffer_size > (800 * 1024 * 1024))
452 code_gen_buffer_size = (800 * 1024 * 1024);
453 #elif defined(__sparc_v9__)
454 // Map the buffer below 2G, so we can use direct calls and branches
455 flags |= MAP_FIXED;
456 start = (void *) 0x60000000UL;
457 if (code_gen_buffer_size > (512 * 1024 * 1024))
458 code_gen_buffer_size = (512 * 1024 * 1024);
459 #elif defined(__arm__)
460 /* Map the buffer below 32M, so we can use direct calls and branches */
461 flags |= MAP_FIXED;
462 start = (void *) 0x01000000UL;
463 if (code_gen_buffer_size > 16 * 1024 * 1024)
464 code_gen_buffer_size = 16 * 1024 * 1024;
465 #endif
466 code_gen_buffer = mmap(start, code_gen_buffer_size,
467 PROT_WRITE | PROT_READ | PROT_EXEC,
468 flags, -1, 0);
469 if (code_gen_buffer == MAP_FAILED) {
470 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
471 exit(1);
474 #elif defined(__FreeBSD__) || defined(__DragonFly__)
476 int flags;
477 void *addr = NULL;
478 flags = MAP_PRIVATE | MAP_ANONYMOUS;
479 #if defined(__x86_64__)
480 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
481 * 0x40000000 is free */
482 flags |= MAP_FIXED;
483 addr = (void *)0x40000000;
484 /* Cannot map more than that */
485 if (code_gen_buffer_size > (800 * 1024 * 1024))
486 code_gen_buffer_size = (800 * 1024 * 1024);
487 #endif
488 code_gen_buffer = mmap(addr, code_gen_buffer_size,
489 PROT_WRITE | PROT_READ | PROT_EXEC,
490 flags, -1, 0);
491 if (code_gen_buffer == MAP_FAILED) {
492 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
493 exit(1);
496 #else
497 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
498 map_exec(code_gen_buffer, code_gen_buffer_size);
499 #endif
500 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
501 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
502 code_gen_buffer_max_size = code_gen_buffer_size -
503 code_gen_max_block_size();
504 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
505 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
508 /* Must be called before using the QEMU cpus. 'tb_size' is the size
509 (in bytes) allocated to the translation buffer. Zero means default
510 size. */
511 void cpu_exec_init_all(unsigned long tb_size)
513 cpu_gen_init();
514 code_gen_alloc(tb_size);
515 code_gen_ptr = code_gen_buffer;
516 page_init();
517 #if !defined(CONFIG_USER_ONLY)
518 io_mem_init();
519 #endif
522 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
524 #define CPU_COMMON_SAVE_VERSION 1
526 static void cpu_common_save(QEMUFile *f, void *opaque)
528 CPUState *env = opaque;
530 cpu_synchronize_state(env, 0);
532 qemu_put_be32s(f, &env->halted);
533 qemu_put_be32s(f, &env->interrupt_request);
536 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
538 CPUState *env = opaque;
540 if (version_id != CPU_COMMON_SAVE_VERSION)
541 return -EINVAL;
543 qemu_get_be32s(f, &env->halted);
544 qemu_get_be32s(f, &env->interrupt_request);
545 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
546 version_id is increased. */
547 env->interrupt_request &= ~0x01;
548 tlb_flush(env, 1);
549 cpu_synchronize_state(env, 1);
551 return 0;
553 #endif
555 void cpu_exec_init(CPUState *env)
557 CPUState **penv;
558 int cpu_index;
560 #if defined(CONFIG_USER_ONLY)
561 cpu_list_lock();
562 #endif
563 env->next_cpu = NULL;
564 penv = &first_cpu;
565 cpu_index = 0;
566 while (*penv != NULL) {
567 penv = (CPUState **)&(*penv)->next_cpu;
568 cpu_index++;
570 env->cpu_index = cpu_index;
571 env->numa_node = 0;
572 TAILQ_INIT(&env->breakpoints);
573 TAILQ_INIT(&env->watchpoints);
574 #ifdef __WIN32
575 env->thread_id = GetCurrentProcessId();
576 #else
577 env->thread_id = getpid();
578 #endif
579 *penv = env;
580 #if defined(CONFIG_USER_ONLY)
581 cpu_list_unlock();
582 #endif
583 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
584 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
585 cpu_common_save, cpu_common_load, env);
586 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
587 cpu_save, cpu_load, env);
588 #endif
591 static inline void invalidate_page_bitmap(PageDesc *p)
593 if (p->code_bitmap) {
594 qemu_free(p->code_bitmap);
595 p->code_bitmap = NULL;
597 p->code_write_count = 0;
600 /* set to NULL all the 'first_tb' fields in all PageDescs */
601 static void page_flush_tb(void)
603 int i, j;
604 PageDesc *p;
606 for(i = 0; i < L1_SIZE; i++) {
607 p = l1_map[i];
608 if (p) {
609 for(j = 0; j < L2_SIZE; j++) {
610 p->first_tb = NULL;
611 invalidate_page_bitmap(p);
612 p++;
618 /* flush all the translation blocks */
619 /* XXX: tb_flush is currently not thread safe */
620 void tb_flush(CPUState *env1)
622 CPUState *env;
623 #if defined(DEBUG_FLUSH)
624 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
625 (unsigned long)(code_gen_ptr - code_gen_buffer),
626 nb_tbs, nb_tbs > 0 ?
627 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
628 #endif
629 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
630 cpu_abort(env1, "Internal error: code buffer overflow\n");
632 nb_tbs = 0;
634 for(env = first_cpu; env != NULL; env = env->next_cpu) {
635 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
638 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
639 page_flush_tb();
641 code_gen_ptr = code_gen_buffer;
642 /* XXX: flush processor icache at this point if cache flush is
643 expensive */
644 tb_flush_count++;
647 #ifdef DEBUG_TB_CHECK
649 static void tb_invalidate_check(target_ulong address)
651 TranslationBlock *tb;
652 int i;
653 address &= TARGET_PAGE_MASK;
654 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
655 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
656 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
657 address >= tb->pc + tb->size)) {
658 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
659 address, (long)tb->pc, tb->size);
665 /* verify that all the pages have correct rights for code */
666 static void tb_page_check(void)
668 TranslationBlock *tb;
669 int i, flags1, flags2;
671 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
672 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
673 flags1 = page_get_flags(tb->pc);
674 flags2 = page_get_flags(tb->pc + tb->size - 1);
675 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
676 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
677 (long)tb->pc, tb->size, flags1, flags2);
683 static void tb_jmp_check(TranslationBlock *tb)
685 TranslationBlock *tb1;
686 unsigned int n1;
688 /* suppress any remaining jumps to this TB */
689 tb1 = tb->jmp_first;
690 for(;;) {
691 n1 = (long)tb1 & 3;
692 tb1 = (TranslationBlock *)((long)tb1 & ~3);
693 if (n1 == 2)
694 break;
695 tb1 = tb1->jmp_next[n1];
697 /* check end of list */
698 if (tb1 != tb) {
699 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
703 #endif
705 /* invalidate one TB */
706 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
707 int next_offset)
709 TranslationBlock *tb1;
710 for(;;) {
711 tb1 = *ptb;
712 if (tb1 == tb) {
713 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
714 break;
716 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
720 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
722 TranslationBlock *tb1;
723 unsigned int n1;
725 for(;;) {
726 tb1 = *ptb;
727 n1 = (long)tb1 & 3;
728 tb1 = (TranslationBlock *)((long)tb1 & ~3);
729 if (tb1 == tb) {
730 *ptb = tb1->page_next[n1];
731 break;
733 ptb = &tb1->page_next[n1];
737 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
739 TranslationBlock *tb1, **ptb;
740 unsigned int n1;
742 ptb = &tb->jmp_next[n];
743 tb1 = *ptb;
744 if (tb1) {
745 /* find tb(n) in circular list */
746 for(;;) {
747 tb1 = *ptb;
748 n1 = (long)tb1 & 3;
749 tb1 = (TranslationBlock *)((long)tb1 & ~3);
750 if (n1 == n && tb1 == tb)
751 break;
752 if (n1 == 2) {
753 ptb = &tb1->jmp_first;
754 } else {
755 ptb = &tb1->jmp_next[n1];
758 /* now we can suppress tb(n) from the list */
759 *ptb = tb->jmp_next[n];
761 tb->jmp_next[n] = NULL;
765 /* reset the jump entry 'n' of a TB so that it is not chained to
766 another TB */
767 static inline void tb_reset_jump(TranslationBlock *tb, int n)
769 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
772 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
774 CPUState *env;
775 PageDesc *p;
776 unsigned int h, n1;
777 target_phys_addr_t phys_pc;
778 TranslationBlock *tb1, *tb2;
780 /* remove the TB from the hash list */
781 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
782 h = tb_phys_hash_func(phys_pc);
783 tb_remove(&tb_phys_hash[h], tb,
784 offsetof(TranslationBlock, phys_hash_next));
786 /* remove the TB from the page list */
787 if (tb->page_addr[0] != page_addr) {
788 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
789 tb_page_remove(&p->first_tb, tb);
790 invalidate_page_bitmap(p);
792 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
793 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
794 tb_page_remove(&p->first_tb, tb);
795 invalidate_page_bitmap(p);
798 tb_invalidated_flag = 1;
800 /* remove the TB from the hash list */
801 h = tb_jmp_cache_hash_func(tb->pc);
802 for(env = first_cpu; env != NULL; env = env->next_cpu) {
803 if (env->tb_jmp_cache[h] == tb)
804 env->tb_jmp_cache[h] = NULL;
807 /* suppress this TB from the two jump lists */
808 tb_jmp_remove(tb, 0);
809 tb_jmp_remove(tb, 1);
811 /* suppress any remaining jumps to this TB */
812 tb1 = tb->jmp_first;
813 for(;;) {
814 n1 = (long)tb1 & 3;
815 if (n1 == 2)
816 break;
817 tb1 = (TranslationBlock *)((long)tb1 & ~3);
818 tb2 = tb1->jmp_next[n1];
819 tb_reset_jump(tb1, n1);
820 tb1->jmp_next[n1] = NULL;
821 tb1 = tb2;
823 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
825 tb_phys_invalidate_count++;
828 static inline void set_bits(uint8_t *tab, int start, int len)
830 int end, mask, end1;
832 end = start + len;
833 tab += start >> 3;
834 mask = 0xff << (start & 7);
835 if ((start & ~7) == (end & ~7)) {
836 if (start < end) {
837 mask &= ~(0xff << (end & 7));
838 *tab |= mask;
840 } else {
841 *tab++ |= mask;
842 start = (start + 8) & ~7;
843 end1 = end & ~7;
844 while (start < end1) {
845 *tab++ = 0xff;
846 start += 8;
848 if (start < end) {
849 mask = ~(0xff << (end & 7));
850 *tab |= mask;
855 static void build_page_bitmap(PageDesc *p)
857 int n, tb_start, tb_end;
858 TranslationBlock *tb;
860 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
862 tb = p->first_tb;
863 while (tb != NULL) {
864 n = (long)tb & 3;
865 tb = (TranslationBlock *)((long)tb & ~3);
866 /* NOTE: this is subtle as a TB may span two physical pages */
867 if (n == 0) {
868 /* NOTE: tb_end may be after the end of the page, but
869 it is not a problem */
870 tb_start = tb->pc & ~TARGET_PAGE_MASK;
871 tb_end = tb_start + tb->size;
872 if (tb_end > TARGET_PAGE_SIZE)
873 tb_end = TARGET_PAGE_SIZE;
874 } else {
875 tb_start = 0;
876 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
878 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
879 tb = tb->page_next[n];
883 TranslationBlock *tb_gen_code(CPUState *env,
884 target_ulong pc, target_ulong cs_base,
885 int flags, int cflags)
887 TranslationBlock *tb;
888 uint8_t *tc_ptr;
889 target_ulong phys_pc, phys_page2, virt_page2;
890 int code_gen_size;
892 phys_pc = get_phys_addr_code(env, pc);
893 tb = tb_alloc(pc);
894 if (!tb) {
895 /* flush must be done */
896 tb_flush(env);
897 /* cannot fail at this point */
898 tb = tb_alloc(pc);
899 /* Don't forget to invalidate previous TB info. */
900 tb_invalidated_flag = 1;
902 tc_ptr = code_gen_ptr;
903 tb->tc_ptr = tc_ptr;
904 tb->cs_base = cs_base;
905 tb->flags = flags;
906 tb->cflags = cflags;
907 cpu_gen_code(env, tb, &code_gen_size);
908 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
910 /* check next page if needed */
911 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
912 phys_page2 = -1;
913 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
914 phys_page2 = get_phys_addr_code(env, virt_page2);
916 tb_link_phys(tb, phys_pc, phys_page2);
917 return tb;
920 /* invalidate all TBs which intersect with the target physical page
921 starting in range [start;end[. NOTE: start and end must refer to
922 the same physical page. 'is_cpu_write_access' should be true if called
923 from a real cpu write access: the virtual CPU will exit the current
924 TB if code is modified inside this TB. */
925 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
926 int is_cpu_write_access)
928 TranslationBlock *tb, *tb_next, *saved_tb;
929 CPUState *env = cpu_single_env;
930 target_ulong tb_start, tb_end;
931 PageDesc *p;
932 int n;
933 #ifdef TARGET_HAS_PRECISE_SMC
934 int current_tb_not_found = is_cpu_write_access;
935 TranslationBlock *current_tb = NULL;
936 int current_tb_modified = 0;
937 target_ulong current_pc = 0;
938 target_ulong current_cs_base = 0;
939 int current_flags = 0;
940 #endif /* TARGET_HAS_PRECISE_SMC */
942 p = page_find(start >> TARGET_PAGE_BITS);
943 if (!p)
944 return;
945 if (!p->code_bitmap &&
946 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
947 is_cpu_write_access) {
948 /* build code bitmap */
949 build_page_bitmap(p);
952 /* we remove all the TBs in the range [start, end[ */
953 /* XXX: see if in some cases it could be faster to invalidate all the code */
954 tb = p->first_tb;
955 while (tb != NULL) {
956 n = (long)tb & 3;
957 tb = (TranslationBlock *)((long)tb & ~3);
958 tb_next = tb->page_next[n];
959 /* NOTE: this is subtle as a TB may span two physical pages */
960 if (n == 0) {
961 /* NOTE: tb_end may be after the end of the page, but
962 it is not a problem */
963 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
964 tb_end = tb_start + tb->size;
965 } else {
966 tb_start = tb->page_addr[1];
967 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
969 if (!(tb_end <= start || tb_start >= end)) {
970 #ifdef TARGET_HAS_PRECISE_SMC
971 if (current_tb_not_found) {
972 current_tb_not_found = 0;
973 current_tb = NULL;
974 if (env->mem_io_pc) {
975 /* now we have a real cpu fault */
976 current_tb = tb_find_pc(env->mem_io_pc);
979 if (current_tb == tb &&
980 (current_tb->cflags & CF_COUNT_MASK) != 1) {
981 /* If we are modifying the current TB, we must stop
982 its execution. We could be more precise by checking
983 that the modification is after the current PC, but it
984 would require a specialized function to partially
985 restore the CPU state */
987 current_tb_modified = 1;
988 cpu_restore_state(current_tb, env,
989 env->mem_io_pc, NULL);
990 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
991 &current_flags);
993 #endif /* TARGET_HAS_PRECISE_SMC */
994 /* we need to do that to handle the case where a signal
995 occurs while doing tb_phys_invalidate() */
996 saved_tb = NULL;
997 if (env) {
998 saved_tb = env->current_tb;
999 env->current_tb = NULL;
1001 tb_phys_invalidate(tb, -1);
1002 if (env) {
1003 env->current_tb = saved_tb;
1004 if (env->interrupt_request && env->current_tb)
1005 cpu_interrupt(env, env->interrupt_request);
1008 tb = tb_next;
1010 #if !defined(CONFIG_USER_ONLY)
1011 /* if no code remaining, no need to continue to use slow writes */
1012 if (!p->first_tb) {
1013 invalidate_page_bitmap(p);
1014 if (is_cpu_write_access) {
1015 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1018 #endif
1019 #ifdef TARGET_HAS_PRECISE_SMC
1020 if (current_tb_modified) {
1021 /* we generate a block containing just the instruction
1022 modifying the memory. It will ensure that it cannot modify
1023 itself */
1024 env->current_tb = NULL;
1025 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1026 cpu_resume_from_signal(env, NULL);
1028 #endif
1031 /* len must be <= 8 and start must be a multiple of len */
1032 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1034 PageDesc *p;
1035 int offset, b;
1036 #if 0
1037 if (1) {
1038 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1039 cpu_single_env->mem_io_vaddr, len,
1040 cpu_single_env->eip,
1041 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1043 #endif
1044 p = page_find(start >> TARGET_PAGE_BITS);
1045 if (!p)
1046 return;
1047 if (p->code_bitmap) {
1048 offset = start & ~TARGET_PAGE_MASK;
1049 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1050 if (b & ((1 << len) - 1))
1051 goto do_invalidate;
1052 } else {
1053 do_invalidate:
1054 tb_invalidate_phys_page_range(start, start + len, 1);
1058 #if !defined(CONFIG_SOFTMMU)
1059 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1060 unsigned long pc, void *puc)
1062 TranslationBlock *tb;
1063 PageDesc *p;
1064 int n;
1065 #ifdef TARGET_HAS_PRECISE_SMC
1066 TranslationBlock *current_tb = NULL;
1067 CPUState *env = cpu_single_env;
1068 int current_tb_modified = 0;
1069 target_ulong current_pc = 0;
1070 target_ulong current_cs_base = 0;
1071 int current_flags = 0;
1072 #endif
1074 addr &= TARGET_PAGE_MASK;
1075 p = page_find(addr >> TARGET_PAGE_BITS);
1076 if (!p)
1077 return;
1078 tb = p->first_tb;
1079 #ifdef TARGET_HAS_PRECISE_SMC
1080 if (tb && pc != 0) {
1081 current_tb = tb_find_pc(pc);
1083 #endif
1084 while (tb != NULL) {
1085 n = (long)tb & 3;
1086 tb = (TranslationBlock *)((long)tb & ~3);
1087 #ifdef TARGET_HAS_PRECISE_SMC
1088 if (current_tb == tb &&
1089 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
1096 current_tb_modified = 1;
1097 cpu_restore_state(current_tb, env, pc, puc);
1098 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1099 &current_flags);
1101 #endif /* TARGET_HAS_PRECISE_SMC */
1102 tb_phys_invalidate(tb, addr);
1103 tb = tb->page_next[n];
1105 p->first_tb = NULL;
1106 #ifdef TARGET_HAS_PRECISE_SMC
1107 if (current_tb_modified) {
1108 /* we generate a block containing just the instruction
1109 modifying the memory. It will ensure that it cannot modify
1110 itself */
1111 env->current_tb = NULL;
1112 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1113 cpu_resume_from_signal(env, puc);
1115 #endif
1117 #endif
1119 /* add the tb in the target page and protect it if necessary */
1120 static inline void tb_alloc_page(TranslationBlock *tb,
1121 unsigned int n, target_ulong page_addr)
1123 PageDesc *p;
1124 TranslationBlock *last_first_tb;
1126 tb->page_addr[n] = page_addr;
1127 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1128 tb->page_next[n] = p->first_tb;
1129 last_first_tb = p->first_tb;
1130 p->first_tb = (TranslationBlock *)((long)tb | n);
1131 invalidate_page_bitmap(p);
1133 #if defined(TARGET_HAS_SMC) || 1
1135 #if defined(CONFIG_USER_ONLY)
1136 if (p->flags & PAGE_WRITE) {
1137 target_ulong addr;
1138 PageDesc *p2;
1139 int prot;
1141 /* force the host page as non writable (writes will have a
1142 page fault + mprotect overhead) */
1143 page_addr &= qemu_host_page_mask;
1144 prot = 0;
1145 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1146 addr += TARGET_PAGE_SIZE) {
1148 p2 = page_find (addr >> TARGET_PAGE_BITS);
1149 if (!p2)
1150 continue;
1151 prot |= p2->flags;
1152 p2->flags &= ~PAGE_WRITE;
1153 page_get_flags(addr);
1155 mprotect(g2h(page_addr), qemu_host_page_size,
1156 (prot & PAGE_BITS) & ~PAGE_WRITE);
1157 #ifdef DEBUG_TB_INVALIDATE
1158 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1159 page_addr);
1160 #endif
1162 #else
1163 /* if some code is already present, then the pages are already
1164 protected. So we handle the case where only the first TB is
1165 allocated in a physical page */
1166 if (!last_first_tb) {
1167 tlb_protect_code(page_addr);
1169 #endif
1171 #endif /* TARGET_HAS_SMC */
1174 /* Allocate a new translation block. Flush the translation buffer if
1175 too many translation blocks or too much generated code. */
1176 TranslationBlock *tb_alloc(target_ulong pc)
1178 TranslationBlock *tb;
1180 if (nb_tbs >= code_gen_max_blocks ||
1181 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1182 return NULL;
1183 tb = &tbs[nb_tbs++];
1184 tb->pc = pc;
1185 tb->cflags = 0;
1186 return tb;
1189 void tb_free(TranslationBlock *tb)
1191 /* In practice this is mostly used for single use temporary TB
1192 Ignore the hard cases and just back up if this TB happens to
1193 be the last one generated. */
1194 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1195 code_gen_ptr = tb->tc_ptr;
1196 nb_tbs--;
1200 /* add a new TB and link it to the physical page tables. phys_page2 is
1201 (-1) to indicate that only one page contains the TB. */
1202 void tb_link_phys(TranslationBlock *tb,
1203 target_ulong phys_pc, target_ulong phys_page2)
1205 unsigned int h;
1206 TranslationBlock **ptb;
1208 /* Grab the mmap lock to stop another thread invalidating this TB
1209 before we are done. */
1210 mmap_lock();
1211 /* add in the physical hash table */
1212 h = tb_phys_hash_func(phys_pc);
1213 ptb = &tb_phys_hash[h];
1214 tb->phys_hash_next = *ptb;
1215 *ptb = tb;
1217 /* add in the page list */
1218 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1219 if (phys_page2 != -1)
1220 tb_alloc_page(tb, 1, phys_page2);
1221 else
1222 tb->page_addr[1] = -1;
1224 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1225 tb->jmp_next[0] = NULL;
1226 tb->jmp_next[1] = NULL;
1228 /* init original jump addresses */
1229 if (tb->tb_next_offset[0] != 0xffff)
1230 tb_reset_jump(tb, 0);
1231 if (tb->tb_next_offset[1] != 0xffff)
1232 tb_reset_jump(tb, 1);
1234 #ifdef DEBUG_TB_CHECK
1235 tb_page_check();
1236 #endif
1237 mmap_unlock();
1240 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1241 tb[1].tc_ptr. Return NULL if not found */
1242 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1244 int m_min, m_max, m;
1245 unsigned long v;
1246 TranslationBlock *tb;
1248 if (nb_tbs <= 0)
1249 return NULL;
1250 if (tc_ptr < (unsigned long)code_gen_buffer ||
1251 tc_ptr >= (unsigned long)code_gen_ptr)
1252 return NULL;
1253 /* binary search (cf Knuth) */
1254 m_min = 0;
1255 m_max = nb_tbs - 1;
1256 while (m_min <= m_max) {
1257 m = (m_min + m_max) >> 1;
1258 tb = &tbs[m];
1259 v = (unsigned long)tb->tc_ptr;
1260 if (v == tc_ptr)
1261 return tb;
1262 else if (tc_ptr < v) {
1263 m_max = m - 1;
1264 } else {
1265 m_min = m + 1;
1268 return &tbs[m_max];
1271 static void tb_reset_jump_recursive(TranslationBlock *tb);
1273 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1275 TranslationBlock *tb1, *tb_next, **ptb;
1276 unsigned int n1;
1278 tb1 = tb->jmp_next[n];
1279 if (tb1 != NULL) {
1280 /* find head of list */
1281 for(;;) {
1282 n1 = (long)tb1 & 3;
1283 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1284 if (n1 == 2)
1285 break;
1286 tb1 = tb1->jmp_next[n1];
1288 /* we are now sure now that tb jumps to tb1 */
1289 tb_next = tb1;
1291 /* remove tb from the jmp_first list */
1292 ptb = &tb_next->jmp_first;
1293 for(;;) {
1294 tb1 = *ptb;
1295 n1 = (long)tb1 & 3;
1296 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1297 if (n1 == n && tb1 == tb)
1298 break;
1299 ptb = &tb1->jmp_next[n1];
1301 *ptb = tb->jmp_next[n];
1302 tb->jmp_next[n] = NULL;
1304 /* suppress the jump to next tb in generated code */
1305 tb_reset_jump(tb, n);
1307 /* suppress jumps in the tb on which we could have jumped */
1308 tb_reset_jump_recursive(tb_next);
1312 static void tb_reset_jump_recursive(TranslationBlock *tb)
1314 tb_reset_jump_recursive2(tb, 0);
1315 tb_reset_jump_recursive2(tb, 1);
1318 #if defined(TARGET_HAS_ICE)
1319 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1321 target_phys_addr_t addr;
1322 target_ulong pd;
1323 ram_addr_t ram_addr;
1324 PhysPageDesc *p;
1326 addr = cpu_get_phys_page_debug(env, pc);
1327 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1328 if (!p) {
1329 pd = IO_MEM_UNASSIGNED;
1330 } else {
1331 pd = p->phys_offset;
1333 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1334 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1336 #endif
1338 /* Add a watchpoint. */
1339 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1340 int flags, CPUWatchpoint **watchpoint)
1342 target_ulong len_mask = ~(len - 1);
1343 CPUWatchpoint *wp;
1345 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1346 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1347 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1348 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1349 return -EINVAL;
1351 wp = qemu_malloc(sizeof(*wp));
1353 wp->vaddr = addr;
1354 wp->len_mask = len_mask;
1355 wp->flags = flags;
1357 /* keep all GDB-injected watchpoints in front */
1358 if (flags & BP_GDB)
1359 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1360 else
1361 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1363 tlb_flush_page(env, addr);
1365 if (watchpoint)
1366 *watchpoint = wp;
1367 return 0;
1370 /* Remove a specific watchpoint. */
1371 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1372 int flags)
1374 target_ulong len_mask = ~(len - 1);
1375 CPUWatchpoint *wp;
1377 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1378 if (addr == wp->vaddr && len_mask == wp->len_mask
1379 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1380 cpu_watchpoint_remove_by_ref(env, wp);
1381 return 0;
1384 return -ENOENT;
1387 /* Remove a specific watchpoint by reference. */
1388 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1390 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1392 tlb_flush_page(env, watchpoint->vaddr);
1394 qemu_free(watchpoint);
1397 /* Remove all matching watchpoints. */
1398 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1400 CPUWatchpoint *wp, *next;
1402 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1403 if (wp->flags & mask)
1404 cpu_watchpoint_remove_by_ref(env, wp);
1408 /* Add a breakpoint. */
1409 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1410 CPUBreakpoint **breakpoint)
1412 #if defined(TARGET_HAS_ICE)
1413 CPUBreakpoint *bp;
1415 bp = qemu_malloc(sizeof(*bp));
1417 bp->pc = pc;
1418 bp->flags = flags;
1420 /* keep all GDB-injected breakpoints in front */
1421 if (flags & BP_GDB)
1422 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1423 else
1424 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1426 breakpoint_invalidate(env, pc);
1428 if (breakpoint)
1429 *breakpoint = bp;
1430 return 0;
1431 #else
1432 return -ENOSYS;
1433 #endif
1436 /* Remove a specific breakpoint. */
1437 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1439 #if defined(TARGET_HAS_ICE)
1440 CPUBreakpoint *bp;
1442 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1443 if (bp->pc == pc && bp->flags == flags) {
1444 cpu_breakpoint_remove_by_ref(env, bp);
1445 return 0;
1448 return -ENOENT;
1449 #else
1450 return -ENOSYS;
1451 #endif
1454 /* Remove a specific breakpoint by reference. */
1455 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1457 #if defined(TARGET_HAS_ICE)
1458 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1460 breakpoint_invalidate(env, breakpoint->pc);
1462 qemu_free(breakpoint);
1463 #endif
1466 /* Remove all matching breakpoints. */
1467 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1469 #if defined(TARGET_HAS_ICE)
1470 CPUBreakpoint *bp, *next;
1472 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1473 if (bp->flags & mask)
1474 cpu_breakpoint_remove_by_ref(env, bp);
1476 #endif
1479 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1480 CPU loop after each instruction */
1481 void cpu_single_step(CPUState *env, int enabled)
1483 #if defined(TARGET_HAS_ICE)
1484 if (env->singlestep_enabled != enabled) {
1485 env->singlestep_enabled = enabled;
1486 if (kvm_enabled())
1487 kvm_update_guest_debug(env, 0);
1488 else {
1489 /* must flush all the translated code to avoid inconsistencies */
1490 /* XXX: only flush what is necessary */
1491 tb_flush(env);
1494 #endif
1497 /* enable or disable low levels log */
1498 void cpu_set_log(int log_flags)
1500 loglevel = log_flags;
1501 if (loglevel && !logfile) {
1502 logfile = fopen(logfilename, log_append ? "a" : "w");
1503 if (!logfile) {
1504 perror(logfilename);
1505 _exit(1);
1507 #if !defined(CONFIG_SOFTMMU)
1508 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1510 static char logfile_buf[4096];
1511 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1513 #else
1514 setvbuf(logfile, NULL, _IOLBF, 0);
1515 #endif
1516 log_append = 1;
1518 if (!loglevel && logfile) {
1519 fclose(logfile);
1520 logfile = NULL;
1524 void cpu_set_log_filename(const char *filename)
1526 logfilename = strdup(filename);
1527 if (logfile) {
1528 fclose(logfile);
1529 logfile = NULL;
1531 cpu_set_log(loglevel);
1534 static void cpu_unlink_tb(CPUState *env)
1536 #if defined(USE_NPTL)
1537 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1538 problem and hope the cpu will stop of its own accord. For userspace
1539 emulation this often isn't actually as bad as it sounds. Often
1540 signals are used primarily to interrupt blocking syscalls. */
1541 #else
1542 TranslationBlock *tb;
1543 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1545 tb = env->current_tb;
1546 /* if the cpu is currently executing code, we must unlink it and
1547 all the potentially executing TB */
1548 if (tb && !testandset(&interrupt_lock)) {
1549 env->current_tb = NULL;
1550 tb_reset_jump_recursive(tb);
1551 resetlock(&interrupt_lock);
1553 #endif
1556 /* mask must never be zero, except for A20 change call */
1557 void cpu_interrupt(CPUState *env, int mask)
1559 int old_mask;
1561 old_mask = env->interrupt_request;
1562 env->interrupt_request |= mask;
1563 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1564 kvm_update_interrupt_request(env);
1566 #ifndef CONFIG_USER_ONLY
1568 * If called from iothread context, wake the target cpu in
1569 * case its halted.
1571 if (!qemu_cpu_self(env)) {
1572 qemu_cpu_kick(env);
1573 return;
1575 #endif
1577 if (use_icount) {
1578 env->icount_decr.u16.high = 0xffff;
1579 #ifndef CONFIG_USER_ONLY
1580 if (!can_do_io(env)
1581 && (mask & ~old_mask) != 0) {
1582 cpu_abort(env, "Raised interrupt while not in I/O function");
1584 #endif
1585 } else {
1586 cpu_unlink_tb(env);
1590 void cpu_reset_interrupt(CPUState *env, int mask)
1592 env->interrupt_request &= ~mask;
1595 void cpu_exit(CPUState *env)
1597 env->exit_request = 1;
1598 cpu_unlink_tb(env);
1601 const CPULogItem cpu_log_items[] = {
1602 { CPU_LOG_TB_OUT_ASM, "out_asm",
1603 "show generated host assembly code for each compiled TB" },
1604 { CPU_LOG_TB_IN_ASM, "in_asm",
1605 "show target assembly code for each compiled TB" },
1606 { CPU_LOG_TB_OP, "op",
1607 "show micro ops for each compiled TB" },
1608 { CPU_LOG_TB_OP_OPT, "op_opt",
1609 "show micro ops "
1610 #ifdef TARGET_I386
1611 "before eflags optimization and "
1612 #endif
1613 "after liveness analysis" },
1614 { CPU_LOG_INT, "int",
1615 "show interrupts/exceptions in short format" },
1616 { CPU_LOG_EXEC, "exec",
1617 "show trace before each executed TB (lots of logs)" },
1618 { CPU_LOG_TB_CPU, "cpu",
1619 "show CPU state before block translation" },
1620 #ifdef TARGET_I386
1621 { CPU_LOG_PCALL, "pcall",
1622 "show protected mode far calls/returns/exceptions" },
1623 { CPU_LOG_RESET, "cpu_reset",
1624 "show CPU state before CPU resets" },
1625 #endif
1626 #ifdef DEBUG_IOPORT
1627 { CPU_LOG_IOPORT, "ioport",
1628 "show all i/o ports accesses" },
1629 #endif
1630 { 0, NULL, NULL },
1633 static int cmp1(const char *s1, int n, const char *s2)
1635 if (strlen(s2) != n)
1636 return 0;
1637 return memcmp(s1, s2, n) == 0;
1640 /* takes a comma separated list of log masks. Return 0 if error. */
1641 int cpu_str_to_log_mask(const char *str)
1643 const CPULogItem *item;
1644 int mask;
1645 const char *p, *p1;
1647 p = str;
1648 mask = 0;
1649 for(;;) {
1650 p1 = strchr(p, ',');
1651 if (!p1)
1652 p1 = p + strlen(p);
1653 if(cmp1(p,p1-p,"all")) {
1654 for(item = cpu_log_items; item->mask != 0; item++) {
1655 mask |= item->mask;
1657 } else {
1658 for(item = cpu_log_items; item->mask != 0; item++) {
1659 if (cmp1(p, p1 - p, item->name))
1660 goto found;
1662 return 0;
1664 found:
1665 mask |= item->mask;
1666 if (*p1 != ',')
1667 break;
1668 p = p1 + 1;
1670 return mask;
1673 void cpu_abort(CPUState *env, const char *fmt, ...)
1675 va_list ap;
1676 va_list ap2;
1678 va_start(ap, fmt);
1679 va_copy(ap2, ap);
1680 fprintf(stderr, "qemu: fatal: ");
1681 vfprintf(stderr, fmt, ap);
1682 fprintf(stderr, "\n");
1683 #ifdef TARGET_I386
1684 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1685 #else
1686 cpu_dump_state(env, stderr, fprintf, 0);
1687 #endif
1688 if (qemu_log_enabled()) {
1689 qemu_log("qemu: fatal: ");
1690 qemu_log_vprintf(fmt, ap2);
1691 qemu_log("\n");
1692 #ifdef TARGET_I386
1693 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1694 #else
1695 log_cpu_state(env, 0);
1696 #endif
1697 qemu_log_flush();
1698 qemu_log_close();
1700 va_end(ap2);
1701 va_end(ap);
1702 abort();
1705 CPUState *cpu_copy(CPUState *env)
1707 CPUState *new_env = cpu_init(env->cpu_model_str);
1708 CPUState *next_cpu = new_env->next_cpu;
1709 int cpu_index = new_env->cpu_index;
1710 #if defined(TARGET_HAS_ICE)
1711 CPUBreakpoint *bp;
1712 CPUWatchpoint *wp;
1713 #endif
1715 memcpy(new_env, env, sizeof(CPUState));
1717 /* Preserve chaining and index. */
1718 new_env->next_cpu = next_cpu;
1719 new_env->cpu_index = cpu_index;
1721 /* Clone all break/watchpoints.
1722 Note: Once we support ptrace with hw-debug register access, make sure
1723 BP_CPU break/watchpoints are handled correctly on clone. */
1724 TAILQ_INIT(&env->breakpoints);
1725 TAILQ_INIT(&env->watchpoints);
1726 #if defined(TARGET_HAS_ICE)
1727 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1728 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1730 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1731 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1732 wp->flags, NULL);
1734 #endif
1736 return new_env;
1739 #if !defined(CONFIG_USER_ONLY)
1741 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1743 unsigned int i;
1745 /* Discard jump cache entries for any tb which might potentially
1746 overlap the flushed page. */
1747 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1748 memset (&env->tb_jmp_cache[i], 0,
1749 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1751 i = tb_jmp_cache_hash_page(addr);
1752 memset (&env->tb_jmp_cache[i], 0,
1753 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1756 /* NOTE: if flush_global is true, also flush global entries (not
1757 implemented yet) */
1758 void tlb_flush(CPUState *env, int flush_global)
1760 int i;
1762 #if defined(DEBUG_TLB)
1763 printf("tlb_flush:\n");
1764 #endif
1765 /* must reset current TB so that interrupts cannot modify the
1766 links while we are modifying them */
1767 env->current_tb = NULL;
1769 for(i = 0; i < CPU_TLB_SIZE; i++) {
1770 env->tlb_table[0][i].addr_read = -1;
1771 env->tlb_table[0][i].addr_write = -1;
1772 env->tlb_table[0][i].addr_code = -1;
1773 env->tlb_table[1][i].addr_read = -1;
1774 env->tlb_table[1][i].addr_write = -1;
1775 env->tlb_table[1][i].addr_code = -1;
1776 #if (NB_MMU_MODES >= 3)
1777 env->tlb_table[2][i].addr_read = -1;
1778 env->tlb_table[2][i].addr_write = -1;
1779 env->tlb_table[2][i].addr_code = -1;
1780 #endif
1781 #if (NB_MMU_MODES >= 4)
1782 env->tlb_table[3][i].addr_read = -1;
1783 env->tlb_table[3][i].addr_write = -1;
1784 env->tlb_table[3][i].addr_code = -1;
1785 #endif
1786 #if (NB_MMU_MODES >= 5)
1787 env->tlb_table[4][i].addr_read = -1;
1788 env->tlb_table[4][i].addr_write = -1;
1789 env->tlb_table[4][i].addr_code = -1;
1790 #endif
1794 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1796 #ifdef CONFIG_KQEMU
1797 if (env->kqemu_enabled) {
1798 kqemu_flush(env, flush_global);
1800 #endif
1801 tlb_flush_count++;
1804 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1806 if (addr == (tlb_entry->addr_read &
1807 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1808 addr == (tlb_entry->addr_write &
1809 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1810 addr == (tlb_entry->addr_code &
1811 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1812 tlb_entry->addr_read = -1;
1813 tlb_entry->addr_write = -1;
1814 tlb_entry->addr_code = -1;
1818 void tlb_flush_page(CPUState *env, target_ulong addr)
1820 int i;
1822 #if defined(DEBUG_TLB)
1823 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1824 #endif
1825 /* must reset current TB so that interrupts cannot modify the
1826 links while we are modifying them */
1827 env->current_tb = NULL;
1829 addr &= TARGET_PAGE_MASK;
1830 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1831 tlb_flush_entry(&env->tlb_table[0][i], addr);
1832 tlb_flush_entry(&env->tlb_table[1][i], addr);
1833 #if (NB_MMU_MODES >= 3)
1834 tlb_flush_entry(&env->tlb_table[2][i], addr);
1835 #endif
1836 #if (NB_MMU_MODES >= 4)
1837 tlb_flush_entry(&env->tlb_table[3][i], addr);
1838 #endif
1839 #if (NB_MMU_MODES >= 5)
1840 tlb_flush_entry(&env->tlb_table[4][i], addr);
1841 #endif
1843 tlb_flush_jmp_cache(env, addr);
1845 #ifdef CONFIG_KQEMU
1846 if (env->kqemu_enabled) {
1847 kqemu_flush_page(env, addr);
1849 #endif
1852 /* update the TLBs so that writes to code in the virtual page 'addr'
1853 can be detected */
1854 static void tlb_protect_code(ram_addr_t ram_addr)
1856 cpu_physical_memory_reset_dirty(ram_addr,
1857 ram_addr + TARGET_PAGE_SIZE,
1858 CODE_DIRTY_FLAG);
1861 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1862 tested for self modifying code */
1863 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1864 target_ulong vaddr)
1866 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1869 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1870 unsigned long start, unsigned long length)
1872 unsigned long addr;
1873 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1874 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1875 if ((addr - start) < length) {
1876 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1881 /* Note: start and end must be within the same ram block. */
1882 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1883 int dirty_flags)
1885 CPUState *env;
1886 unsigned long length, start1;
1887 int i, mask, len;
1888 uint8_t *p;
1890 start &= TARGET_PAGE_MASK;
1891 end = TARGET_PAGE_ALIGN(end);
1893 length = end - start;
1894 if (length == 0)
1895 return;
1896 len = length >> TARGET_PAGE_BITS;
1897 #ifdef CONFIG_KQEMU
1898 /* XXX: should not depend on cpu context */
1899 env = first_cpu;
1900 if (env->kqemu_enabled) {
1901 ram_addr_t addr;
1902 addr = start;
1903 for(i = 0; i < len; i++) {
1904 kqemu_set_notdirty(env, addr);
1905 addr += TARGET_PAGE_SIZE;
1908 #endif
1909 mask = ~dirty_flags;
1910 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1911 for(i = 0; i < len; i++)
1912 p[i] &= mask;
1914 /* we modify the TLB cache so that the dirty bit will be set again
1915 when accessing the range */
1916 start1 = (unsigned long)qemu_get_ram_ptr(start);
1917 /* Chek that we don't span multiple blocks - this breaks the
1918 address comparisons below. */
1919 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1920 != (end - 1) - start) {
1921 abort();
1924 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1925 for(i = 0; i < CPU_TLB_SIZE; i++)
1926 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1927 for(i = 0; i < CPU_TLB_SIZE; i++)
1928 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1929 #if (NB_MMU_MODES >= 3)
1930 for(i = 0; i < CPU_TLB_SIZE; i++)
1931 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1932 #endif
1933 #if (NB_MMU_MODES >= 4)
1934 for(i = 0; i < CPU_TLB_SIZE; i++)
1935 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1936 #endif
1937 #if (NB_MMU_MODES >= 5)
1938 for(i = 0; i < CPU_TLB_SIZE; i++)
1939 tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1940 #endif
1944 int cpu_physical_memory_set_dirty_tracking(int enable)
1946 if (kvm_enabled()) {
1947 return kvm_set_migration_log(enable);
1949 return 0;
1952 int cpu_physical_memory_get_dirty_tracking(void)
1954 return in_migration;
1957 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1958 target_phys_addr_t end_addr)
1960 int ret = 0;
1962 if (kvm_enabled())
1963 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1964 return ret;
1967 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1969 ram_addr_t ram_addr;
1970 void *p;
1972 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1973 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1974 + tlb_entry->addend);
1975 ram_addr = qemu_ram_addr_from_host(p);
1976 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1977 tlb_entry->addr_write |= TLB_NOTDIRTY;
1982 /* update the TLB according to the current state of the dirty bits */
1983 void cpu_tlb_update_dirty(CPUState *env)
1985 int i;
1986 for(i = 0; i < CPU_TLB_SIZE; i++)
1987 tlb_update_dirty(&env->tlb_table[0][i]);
1988 for(i = 0; i < CPU_TLB_SIZE; i++)
1989 tlb_update_dirty(&env->tlb_table[1][i]);
1990 #if (NB_MMU_MODES >= 3)
1991 for(i = 0; i < CPU_TLB_SIZE; i++)
1992 tlb_update_dirty(&env->tlb_table[2][i]);
1993 #endif
1994 #if (NB_MMU_MODES >= 4)
1995 for(i = 0; i < CPU_TLB_SIZE; i++)
1996 tlb_update_dirty(&env->tlb_table[3][i]);
1997 #endif
1998 #if (NB_MMU_MODES >= 5)
1999 for(i = 0; i < CPU_TLB_SIZE; i++)
2000 tlb_update_dirty(&env->tlb_table[4][i]);
2001 #endif
2004 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2006 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2007 tlb_entry->addr_write = vaddr;
2010 /* update the TLB corresponding to virtual page vaddr
2011 so that it is no longer dirty */
2012 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2014 int i;
2016 vaddr &= TARGET_PAGE_MASK;
2017 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2018 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
2019 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
2020 #if (NB_MMU_MODES >= 3)
2021 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2022 #endif
2023 #if (NB_MMU_MODES >= 4)
2024 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2025 #endif
2026 #if (NB_MMU_MODES >= 5)
2027 tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
2028 #endif
2031 /* add a new TLB entry. At most one entry for a given virtual address
2032 is permitted. Return 0 if OK or 2 if the page could not be mapped
2033 (can only happen in non SOFTMMU mode for I/O pages or pages
2034 conflicting with the host address space). */
2035 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2036 target_phys_addr_t paddr, int prot,
2037 int mmu_idx, int is_softmmu)
2039 PhysPageDesc *p;
2040 unsigned long pd;
2041 unsigned int index;
2042 target_ulong address;
2043 target_ulong code_address;
2044 target_phys_addr_t addend;
2045 int ret;
2046 CPUTLBEntry *te;
2047 CPUWatchpoint *wp;
2048 target_phys_addr_t iotlb;
2050 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2051 if (!p) {
2052 pd = IO_MEM_UNASSIGNED;
2053 } else {
2054 pd = p->phys_offset;
2056 #if defined(DEBUG_TLB)
2057 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2058 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2059 #endif
2061 ret = 0;
2062 address = vaddr;
2063 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2064 /* IO memory case (romd handled later) */
2065 address |= TLB_MMIO;
2067 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2068 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2069 /* Normal RAM. */
2070 iotlb = pd & TARGET_PAGE_MASK;
2071 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2072 iotlb |= IO_MEM_NOTDIRTY;
2073 else
2074 iotlb |= IO_MEM_ROM;
2075 } else {
2076 /* IO handlers are currently passed a physical address.
2077 It would be nice to pass an offset from the base address
2078 of that region. This would avoid having to special case RAM,
2079 and avoid full address decoding in every device.
2080 We can't use the high bits of pd for this because
2081 IO_MEM_ROMD uses these as a ram address. */
2082 iotlb = (pd & ~TARGET_PAGE_MASK);
2083 if (p) {
2084 iotlb += p->region_offset;
2085 } else {
2086 iotlb += paddr;
2090 code_address = address;
2091 /* Make accesses to pages with watchpoints go via the
2092 watchpoint trap routines. */
2093 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2094 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2095 iotlb = io_mem_watch + paddr;
2096 /* TODO: The memory case can be optimized by not trapping
2097 reads of pages with a write breakpoint. */
2098 address |= TLB_MMIO;
2102 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2103 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2104 te = &env->tlb_table[mmu_idx][index];
2105 te->addend = addend - vaddr;
2106 if (prot & PAGE_READ) {
2107 te->addr_read = address;
2108 } else {
2109 te->addr_read = -1;
2112 if (prot & PAGE_EXEC) {
2113 te->addr_code = code_address;
2114 } else {
2115 te->addr_code = -1;
2117 if (prot & PAGE_WRITE) {
2118 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2119 (pd & IO_MEM_ROMD)) {
2120 /* Write access calls the I/O callback. */
2121 te->addr_write = address | TLB_MMIO;
2122 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2123 !cpu_physical_memory_is_dirty(pd)) {
2124 te->addr_write = address | TLB_NOTDIRTY;
2125 } else {
2126 te->addr_write = address;
2128 } else {
2129 te->addr_write = -1;
2131 return ret;
2134 #else
2136 void tlb_flush(CPUState *env, int flush_global)
2140 void tlb_flush_page(CPUState *env, target_ulong addr)
2144 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2145 target_phys_addr_t paddr, int prot,
2146 int mmu_idx, int is_softmmu)
2148 return 0;
2151 /* dump memory mappings */
2152 void page_dump(FILE *f)
2154 unsigned long start, end;
2155 int i, j, prot, prot1;
2156 PageDesc *p;
2158 fprintf(f, "%-8s %-8s %-8s %s\n",
2159 "start", "end", "size", "prot");
2160 start = -1;
2161 end = -1;
2162 prot = 0;
2163 for(i = 0; i <= L1_SIZE; i++) {
2164 if (i < L1_SIZE)
2165 p = l1_map[i];
2166 else
2167 p = NULL;
2168 for(j = 0;j < L2_SIZE; j++) {
2169 if (!p)
2170 prot1 = 0;
2171 else
2172 prot1 = p[j].flags;
2173 if (prot1 != prot) {
2174 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2175 if (start != -1) {
2176 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2177 start, end, end - start,
2178 prot & PAGE_READ ? 'r' : '-',
2179 prot & PAGE_WRITE ? 'w' : '-',
2180 prot & PAGE_EXEC ? 'x' : '-');
2182 if (prot1 != 0)
2183 start = end;
2184 else
2185 start = -1;
2186 prot = prot1;
2188 if (!p)
2189 break;
2194 int page_get_flags(target_ulong address)
2196 PageDesc *p;
2198 p = page_find(address >> TARGET_PAGE_BITS);
2199 if (!p)
2200 return 0;
2201 return p->flags;
2204 /* modify the flags of a page and invalidate the code if
2205 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2206 depending on PAGE_WRITE */
2207 void page_set_flags(target_ulong start, target_ulong end, int flags)
2209 PageDesc *p;
2210 target_ulong addr;
2212 /* mmap_lock should already be held. */
2213 start = start & TARGET_PAGE_MASK;
2214 end = TARGET_PAGE_ALIGN(end);
2215 if (flags & PAGE_WRITE)
2216 flags |= PAGE_WRITE_ORG;
2217 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2218 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2219 /* We may be called for host regions that are outside guest
2220 address space. */
2221 if (!p)
2222 return;
2223 /* if the write protection is set, then we invalidate the code
2224 inside */
2225 if (!(p->flags & PAGE_WRITE) &&
2226 (flags & PAGE_WRITE) &&
2227 p->first_tb) {
2228 tb_invalidate_phys_page(addr, 0, NULL);
2230 p->flags = flags;
2234 int page_check_range(target_ulong start, target_ulong len, int flags)
2236 PageDesc *p;
2237 target_ulong end;
2238 target_ulong addr;
2240 if (start + len < start)
2241 /* we've wrapped around */
2242 return -1;
2244 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2245 start = start & TARGET_PAGE_MASK;
2247 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2248 p = page_find(addr >> TARGET_PAGE_BITS);
2249 if( !p )
2250 return -1;
2251 if( !(p->flags & PAGE_VALID) )
2252 return -1;
2254 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2255 return -1;
2256 if (flags & PAGE_WRITE) {
2257 if (!(p->flags & PAGE_WRITE_ORG))
2258 return -1;
2259 /* unprotect the page if it was put read-only because it
2260 contains translated code */
2261 if (!(p->flags & PAGE_WRITE)) {
2262 if (!page_unprotect(addr, 0, NULL))
2263 return -1;
2265 return 0;
2268 return 0;
2271 /* called from signal handler: invalidate the code and unprotect the
2272 page. Return TRUE if the fault was successfully handled. */
2273 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2275 unsigned int page_index, prot, pindex;
2276 PageDesc *p, *p1;
2277 target_ulong host_start, host_end, addr;
2279 /* Technically this isn't safe inside a signal handler. However we
2280 know this only ever happens in a synchronous SEGV handler, so in
2281 practice it seems to be ok. */
2282 mmap_lock();
2284 host_start = address & qemu_host_page_mask;
2285 page_index = host_start >> TARGET_PAGE_BITS;
2286 p1 = page_find(page_index);
2287 if (!p1) {
2288 mmap_unlock();
2289 return 0;
2291 host_end = host_start + qemu_host_page_size;
2292 p = p1;
2293 prot = 0;
2294 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2295 prot |= p->flags;
2296 p++;
2298 /* if the page was really writable, then we change its
2299 protection back to writable */
2300 if (prot & PAGE_WRITE_ORG) {
2301 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2302 if (!(p1[pindex].flags & PAGE_WRITE)) {
2303 mprotect((void *)g2h(host_start), qemu_host_page_size,
2304 (prot & PAGE_BITS) | PAGE_WRITE);
2305 p1[pindex].flags |= PAGE_WRITE;
2306 /* and since the content will be modified, we must invalidate
2307 the corresponding translated code. */
2308 tb_invalidate_phys_page(address, pc, puc);
2309 #ifdef DEBUG_TB_CHECK
2310 tb_invalidate_check(address);
2311 #endif
2312 mmap_unlock();
2313 return 1;
2316 mmap_unlock();
2317 return 0;
2320 static inline void tlb_set_dirty(CPUState *env,
2321 unsigned long addr, target_ulong vaddr)
2324 #endif /* defined(CONFIG_USER_ONLY) */
2326 #if !defined(CONFIG_USER_ONLY)
2328 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2329 ram_addr_t memory, ram_addr_t region_offset);
2330 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2331 ram_addr_t orig_memory, ram_addr_t region_offset);
2332 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2333 need_subpage) \
2334 do { \
2335 if (addr > start_addr) \
2336 start_addr2 = 0; \
2337 else { \
2338 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2339 if (start_addr2 > 0) \
2340 need_subpage = 1; \
2343 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2344 end_addr2 = TARGET_PAGE_SIZE - 1; \
2345 else { \
2346 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2347 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2348 need_subpage = 1; \
2350 } while (0)
2352 /* register physical memory. 'size' must be a multiple of the target
2353 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2354 io memory page. The address used when calling the IO function is
2355 the offset from the start of the region, plus region_offset. Both
2356 start_addr and region_offset are rounded down to a page boundary
2357 before calculating this offset. This should not be a problem unless
2358 the low bits of start_addr and region_offset differ. */
2359 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2360 ram_addr_t size,
2361 ram_addr_t phys_offset,
2362 ram_addr_t region_offset)
2364 target_phys_addr_t addr, end_addr;
2365 PhysPageDesc *p;
2366 CPUState *env;
2367 ram_addr_t orig_size = size;
2368 void *subpage;
2370 #ifdef CONFIG_KQEMU
2371 /* XXX: should not depend on cpu context */
2372 env = first_cpu;
2373 if (env->kqemu_enabled) {
2374 kqemu_set_phys_mem(start_addr, size, phys_offset);
2376 #endif
2377 if (kvm_enabled())
2378 kvm_set_phys_mem(start_addr, size, phys_offset);
2380 if (phys_offset == IO_MEM_UNASSIGNED) {
2381 region_offset = start_addr;
2383 region_offset &= TARGET_PAGE_MASK;
2384 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2385 end_addr = start_addr + (target_phys_addr_t)size;
2386 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2387 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2388 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2389 ram_addr_t orig_memory = p->phys_offset;
2390 target_phys_addr_t start_addr2, end_addr2;
2391 int need_subpage = 0;
2393 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2394 need_subpage);
2395 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2396 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2397 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2398 &p->phys_offset, orig_memory,
2399 p->region_offset);
2400 } else {
2401 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2402 >> IO_MEM_SHIFT];
2404 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2405 region_offset);
2406 p->region_offset = 0;
2407 } else {
2408 p->phys_offset = phys_offset;
2409 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2410 (phys_offset & IO_MEM_ROMD))
2411 phys_offset += TARGET_PAGE_SIZE;
2413 } else {
2414 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2415 p->phys_offset = phys_offset;
2416 p->region_offset = region_offset;
2417 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2418 (phys_offset & IO_MEM_ROMD)) {
2419 phys_offset += TARGET_PAGE_SIZE;
2420 } else {
2421 target_phys_addr_t start_addr2, end_addr2;
2422 int need_subpage = 0;
2424 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2425 end_addr2, need_subpage);
2427 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2428 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2429 &p->phys_offset, IO_MEM_UNASSIGNED,
2430 addr & TARGET_PAGE_MASK);
2431 subpage_register(subpage, start_addr2, end_addr2,
2432 phys_offset, region_offset);
2433 p->region_offset = 0;
2437 region_offset += TARGET_PAGE_SIZE;
2440 /* since each CPU stores ram addresses in its TLB cache, we must
2441 reset the modified entries */
2442 /* XXX: slow ! */
2443 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2444 tlb_flush(env, 1);
2448 /* XXX: temporary until new memory mapping API */
2449 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2451 PhysPageDesc *p;
2453 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2454 if (!p)
2455 return IO_MEM_UNASSIGNED;
2456 return p->phys_offset;
2459 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2461 if (kvm_enabled())
2462 kvm_coalesce_mmio_region(addr, size);
2465 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2467 if (kvm_enabled())
2468 kvm_uncoalesce_mmio_region(addr, size);
2471 #ifdef CONFIG_KQEMU
2472 /* XXX: better than nothing */
2473 static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2475 ram_addr_t addr;
2476 if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2477 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2478 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2479 abort();
2481 addr = last_ram_offset;
2482 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2483 return addr;
2485 #endif
2487 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2489 RAMBlock *new_block;
2491 #ifdef CONFIG_KQEMU
2492 if (kqemu_phys_ram_base) {
2493 return kqemu_ram_alloc(size);
2495 #endif
2497 size = TARGET_PAGE_ALIGN(size);
2498 new_block = qemu_malloc(sizeof(*new_block));
2500 new_block->host = qemu_vmalloc(size);
2501 new_block->offset = last_ram_offset;
2502 new_block->length = size;
2504 new_block->next = ram_blocks;
2505 ram_blocks = new_block;
2507 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2508 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2509 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2510 0xff, size >> TARGET_PAGE_BITS);
2512 last_ram_offset += size;
2514 if (kvm_enabled())
2515 kvm_setup_guest_memory(new_block->host, size);
2517 return new_block->offset;
2520 void qemu_ram_free(ram_addr_t addr)
2522 /* TODO: implement this. */
2525 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2526 With the exception of the softmmu code in this file, this should
2527 only be used for local memory (e.g. video ram) that the device owns,
2528 and knows it isn't going to access beyond the end of the block.
2530 It should not be used for general purpose DMA.
2531 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2533 void *qemu_get_ram_ptr(ram_addr_t addr)
2535 RAMBlock *prev;
2536 RAMBlock **prevp;
2537 RAMBlock *block;
2539 #ifdef CONFIG_KQEMU
2540 if (kqemu_phys_ram_base) {
2541 return kqemu_phys_ram_base + addr;
2543 #endif
2545 prev = NULL;
2546 prevp = &ram_blocks;
2547 block = ram_blocks;
2548 while (block && (block->offset > addr
2549 || block->offset + block->length <= addr)) {
2550 if (prev)
2551 prevp = &prev->next;
2552 prev = block;
2553 block = block->next;
2555 if (!block) {
2556 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2557 abort();
2559 /* Move this entry to to start of the list. */
2560 if (prev) {
2561 prev->next = block->next;
2562 block->next = *prevp;
2563 *prevp = block;
2565 return block->host + (addr - block->offset);
2568 /* Some of the softmmu routines need to translate from a host pointer
2569 (typically a TLB entry) back to a ram offset. */
2570 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2572 RAMBlock *prev;
2573 RAMBlock **prevp;
2574 RAMBlock *block;
2575 uint8_t *host = ptr;
2577 #ifdef CONFIG_KQEMU
2578 if (kqemu_phys_ram_base) {
2579 return host - kqemu_phys_ram_base;
2581 #endif
2583 prev = NULL;
2584 prevp = &ram_blocks;
2585 block = ram_blocks;
2586 while (block && (block->host > host
2587 || block->host + block->length <= host)) {
2588 if (prev)
2589 prevp = &prev->next;
2590 prev = block;
2591 block = block->next;
2593 if (!block) {
2594 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2595 abort();
2597 return block->offset + (host - block->host);
2600 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2602 #ifdef DEBUG_UNASSIGNED
2603 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2604 #endif
2605 #if defined(TARGET_SPARC)
2606 do_unassigned_access(addr, 0, 0, 0, 1);
2607 #endif
2608 return 0;
2611 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2613 #ifdef DEBUG_UNASSIGNED
2614 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2615 #endif
2616 #if defined(TARGET_SPARC)
2617 do_unassigned_access(addr, 0, 0, 0, 2);
2618 #endif
2619 return 0;
2622 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2624 #ifdef DEBUG_UNASSIGNED
2625 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2626 #endif
2627 #if defined(TARGET_SPARC)
2628 do_unassigned_access(addr, 0, 0, 0, 4);
2629 #endif
2630 return 0;
2633 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2635 #ifdef DEBUG_UNASSIGNED
2636 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2637 #endif
2638 #if defined(TARGET_SPARC)
2639 do_unassigned_access(addr, 1, 0, 0, 1);
2640 #endif
2643 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2645 #ifdef DEBUG_UNASSIGNED
2646 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2647 #endif
2648 #if defined(TARGET_SPARC)
2649 do_unassigned_access(addr, 1, 0, 0, 2);
2650 #endif
2653 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2655 #ifdef DEBUG_UNASSIGNED
2656 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2657 #endif
2658 #if defined(TARGET_SPARC)
2659 do_unassigned_access(addr, 1, 0, 0, 4);
2660 #endif
2663 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2664 unassigned_mem_readb,
2665 unassigned_mem_readw,
2666 unassigned_mem_readl,
2669 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2670 unassigned_mem_writeb,
2671 unassigned_mem_writew,
2672 unassigned_mem_writel,
2675 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2676 uint32_t val)
2678 int dirty_flags;
2679 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2680 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2681 #if !defined(CONFIG_USER_ONLY)
2682 tb_invalidate_phys_page_fast(ram_addr, 1);
2683 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2684 #endif
2686 stb_p(qemu_get_ram_ptr(ram_addr), val);
2687 #ifdef CONFIG_KQEMU
2688 if (cpu_single_env->kqemu_enabled &&
2689 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2690 kqemu_modify_page(cpu_single_env, ram_addr);
2691 #endif
2692 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2693 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2694 /* we remove the notdirty callback only if the code has been
2695 flushed */
2696 if (dirty_flags == 0xff)
2697 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2700 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2701 uint32_t val)
2703 int dirty_flags;
2704 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2705 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2706 #if !defined(CONFIG_USER_ONLY)
2707 tb_invalidate_phys_page_fast(ram_addr, 2);
2708 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2709 #endif
2711 stw_p(qemu_get_ram_ptr(ram_addr), val);
2712 #ifdef CONFIG_KQEMU
2713 if (cpu_single_env->kqemu_enabled &&
2714 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2715 kqemu_modify_page(cpu_single_env, ram_addr);
2716 #endif
2717 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2718 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2719 /* we remove the notdirty callback only if the code has been
2720 flushed */
2721 if (dirty_flags == 0xff)
2722 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2725 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2726 uint32_t val)
2728 int dirty_flags;
2729 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2730 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2731 #if !defined(CONFIG_USER_ONLY)
2732 tb_invalidate_phys_page_fast(ram_addr, 4);
2733 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2734 #endif
2736 stl_p(qemu_get_ram_ptr(ram_addr), val);
2737 #ifdef CONFIG_KQEMU
2738 if (cpu_single_env->kqemu_enabled &&
2739 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2740 kqemu_modify_page(cpu_single_env, ram_addr);
2741 #endif
2742 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2743 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2744 /* we remove the notdirty callback only if the code has been
2745 flushed */
2746 if (dirty_flags == 0xff)
2747 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2750 static CPUReadMemoryFunc *error_mem_read[3] = {
2751 NULL, /* never used */
2752 NULL, /* never used */
2753 NULL, /* never used */
2756 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2757 notdirty_mem_writeb,
2758 notdirty_mem_writew,
2759 notdirty_mem_writel,
2762 /* Generate a debug exception if a watchpoint has been hit. */
2763 static void check_watchpoint(int offset, int len_mask, int flags)
2765 CPUState *env = cpu_single_env;
2766 target_ulong pc, cs_base;
2767 TranslationBlock *tb;
2768 target_ulong vaddr;
2769 CPUWatchpoint *wp;
2770 int cpu_flags;
2772 if (env->watchpoint_hit) {
2773 /* We re-entered the check after replacing the TB. Now raise
2774 * the debug interrupt so that is will trigger after the
2775 * current instruction. */
2776 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2777 return;
2779 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2780 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2781 if ((vaddr == (wp->vaddr & len_mask) ||
2782 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2783 wp->flags |= BP_WATCHPOINT_HIT;
2784 if (!env->watchpoint_hit) {
2785 env->watchpoint_hit = wp;
2786 tb = tb_find_pc(env->mem_io_pc);
2787 if (!tb) {
2788 cpu_abort(env, "check_watchpoint: could not find TB for "
2789 "pc=%p", (void *)env->mem_io_pc);
2791 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2792 tb_phys_invalidate(tb, -1);
2793 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2794 env->exception_index = EXCP_DEBUG;
2795 } else {
2796 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2797 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2799 cpu_resume_from_signal(env, NULL);
2801 } else {
2802 wp->flags &= ~BP_WATCHPOINT_HIT;
2807 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2808 so these check for a hit then pass through to the normal out-of-line
2809 phys routines. */
2810 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2812 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2813 return ldub_phys(addr);
2816 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2818 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2819 return lduw_phys(addr);
2822 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2824 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2825 return ldl_phys(addr);
2828 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2829 uint32_t val)
2831 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2832 stb_phys(addr, val);
2835 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2836 uint32_t val)
2838 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2839 stw_phys(addr, val);
2842 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2843 uint32_t val)
2845 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2846 stl_phys(addr, val);
2849 static CPUReadMemoryFunc *watch_mem_read[3] = {
2850 watch_mem_readb,
2851 watch_mem_readw,
2852 watch_mem_readl,
2855 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2856 watch_mem_writeb,
2857 watch_mem_writew,
2858 watch_mem_writel,
2861 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2862 unsigned int len)
2864 uint32_t ret;
2865 unsigned int idx;
2867 idx = SUBPAGE_IDX(addr);
2868 #if defined(DEBUG_SUBPAGE)
2869 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2870 mmio, len, addr, idx);
2871 #endif
2872 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2873 addr + mmio->region_offset[idx][0][len]);
2875 return ret;
2878 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2879 uint32_t value, unsigned int len)
2881 unsigned int idx;
2883 idx = SUBPAGE_IDX(addr);
2884 #if defined(DEBUG_SUBPAGE)
2885 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2886 mmio, len, addr, idx, value);
2887 #endif
2888 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2889 addr + mmio->region_offset[idx][1][len],
2890 value);
2893 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2895 #if defined(DEBUG_SUBPAGE)
2896 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2897 #endif
2899 return subpage_readlen(opaque, addr, 0);
2902 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2903 uint32_t value)
2905 #if defined(DEBUG_SUBPAGE)
2906 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2907 #endif
2908 subpage_writelen(opaque, addr, value, 0);
2911 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2913 #if defined(DEBUG_SUBPAGE)
2914 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2915 #endif
2917 return subpage_readlen(opaque, addr, 1);
2920 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2921 uint32_t value)
2923 #if defined(DEBUG_SUBPAGE)
2924 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2925 #endif
2926 subpage_writelen(opaque, addr, value, 1);
2929 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2931 #if defined(DEBUG_SUBPAGE)
2932 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2933 #endif
2935 return subpage_readlen(opaque, addr, 2);
2938 static void subpage_writel (void *opaque,
2939 target_phys_addr_t addr, uint32_t value)
2941 #if defined(DEBUG_SUBPAGE)
2942 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2943 #endif
2944 subpage_writelen(opaque, addr, value, 2);
2947 static CPUReadMemoryFunc *subpage_read[] = {
2948 &subpage_readb,
2949 &subpage_readw,
2950 &subpage_readl,
2953 static CPUWriteMemoryFunc *subpage_write[] = {
2954 &subpage_writeb,
2955 &subpage_writew,
2956 &subpage_writel,
2959 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2960 ram_addr_t memory, ram_addr_t region_offset)
2962 int idx, eidx;
2963 unsigned int i;
2965 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2966 return -1;
2967 idx = SUBPAGE_IDX(start);
2968 eidx = SUBPAGE_IDX(end);
2969 #if defined(DEBUG_SUBPAGE)
2970 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2971 mmio, start, end, idx, eidx, memory);
2972 #endif
2973 memory >>= IO_MEM_SHIFT;
2974 for (; idx <= eidx; idx++) {
2975 for (i = 0; i < 4; i++) {
2976 if (io_mem_read[memory][i]) {
2977 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2978 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2979 mmio->region_offset[idx][0][i] = region_offset;
2981 if (io_mem_write[memory][i]) {
2982 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2983 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2984 mmio->region_offset[idx][1][i] = region_offset;
2989 return 0;
2992 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2993 ram_addr_t orig_memory, ram_addr_t region_offset)
2995 subpage_t *mmio;
2996 int subpage_memory;
2998 mmio = qemu_mallocz(sizeof(subpage_t));
3000 mmio->base = base;
3001 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3002 #if defined(DEBUG_SUBPAGE)
3003 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3004 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3005 #endif
3006 *phys = subpage_memory | IO_MEM_SUBPAGE;
3007 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3008 region_offset);
3010 return mmio;
3013 static int get_free_io_mem_idx(void)
3015 int i;
3017 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3018 if (!io_mem_used[i]) {
3019 io_mem_used[i] = 1;
3020 return i;
3023 return -1;
3026 static void io_mem_init(void)
3028 int i;
3030 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3031 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3032 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3033 for (i=0; i<5; i++)
3034 io_mem_used[i] = 1;
3036 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3037 watch_mem_write, NULL);
3038 #ifdef CONFIG_KQEMU
3039 if (kqemu_phys_ram_base) {
3040 /* alloc dirty bits array */
3041 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3042 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3044 #endif
3047 /* mem_read and mem_write are arrays of functions containing the
3048 function to access byte (index 0), word (index 1) and dword (index
3049 2). Functions can be omitted with a NULL function pointer.
3050 If io_index is non zero, the corresponding io zone is
3051 modified. If it is zero, a new io zone is allocated. The return
3052 value can be used with cpu_register_physical_memory(). (-1) is
3053 returned if error. */
3054 int cpu_register_io_memory(int io_index,
3055 CPUReadMemoryFunc **mem_read,
3056 CPUWriteMemoryFunc **mem_write,
3057 void *opaque)
3059 int i, subwidth = 0;
3061 if (io_index <= 0) {
3062 io_index = get_free_io_mem_idx();
3063 if (io_index == -1)
3064 return io_index;
3065 } else {
3066 if (io_index >= IO_MEM_NB_ENTRIES)
3067 return -1;
3070 for(i = 0;i < 3; i++) {
3071 if (!mem_read[i] || !mem_write[i])
3072 subwidth = IO_MEM_SUBWIDTH;
3073 io_mem_read[io_index][i] = mem_read[i];
3074 io_mem_write[io_index][i] = mem_write[i];
3076 io_mem_opaque[io_index] = opaque;
3077 return (io_index << IO_MEM_SHIFT) | subwidth;
3080 void cpu_unregister_io_memory(int io_table_address)
3082 int i;
3083 int io_index = io_table_address >> IO_MEM_SHIFT;
3085 for (i=0;i < 3; i++) {
3086 io_mem_read[io_index][i] = unassigned_mem_read[i];
3087 io_mem_write[io_index][i] = unassigned_mem_write[i];
3089 io_mem_opaque[io_index] = NULL;
3090 io_mem_used[io_index] = 0;
3093 #endif /* !defined(CONFIG_USER_ONLY) */
3095 /* physical memory access (slow version, mainly for debug) */
3096 #if defined(CONFIG_USER_ONLY)
3097 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3098 int len, int is_write)
3100 int l, flags;
3101 target_ulong page;
3102 void * p;
3104 while (len > 0) {
3105 page = addr & TARGET_PAGE_MASK;
3106 l = (page + TARGET_PAGE_SIZE) - addr;
3107 if (l > len)
3108 l = len;
3109 flags = page_get_flags(page);
3110 if (!(flags & PAGE_VALID))
3111 return;
3112 if (is_write) {
3113 if (!(flags & PAGE_WRITE))
3114 return;
3115 /* XXX: this code should not depend on lock_user */
3116 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3117 /* FIXME - should this return an error rather than just fail? */
3118 return;
3119 memcpy(p, buf, l);
3120 unlock_user(p, addr, l);
3121 } else {
3122 if (!(flags & PAGE_READ))
3123 return;
3124 /* XXX: this code should not depend on lock_user */
3125 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3126 /* FIXME - should this return an error rather than just fail? */
3127 return;
3128 memcpy(buf, p, l);
3129 unlock_user(p, addr, 0);
3131 len -= l;
3132 buf += l;
3133 addr += l;
3137 #else
3138 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3139 int len, int is_write)
3141 int l, io_index;
3142 uint8_t *ptr;
3143 uint32_t val;
3144 target_phys_addr_t page;
3145 unsigned long pd;
3146 PhysPageDesc *p;
3148 while (len > 0) {
3149 page = addr & TARGET_PAGE_MASK;
3150 l = (page + TARGET_PAGE_SIZE) - addr;
3151 if (l > len)
3152 l = len;
3153 p = phys_page_find(page >> TARGET_PAGE_BITS);
3154 if (!p) {
3155 pd = IO_MEM_UNASSIGNED;
3156 } else {
3157 pd = p->phys_offset;
3160 if (is_write) {
3161 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3162 target_phys_addr_t addr1 = addr;
3163 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3164 if (p)
3165 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3166 /* XXX: could force cpu_single_env to NULL to avoid
3167 potential bugs */
3168 if (l >= 4 && ((addr1 & 3) == 0)) {
3169 /* 32 bit write access */
3170 val = ldl_p(buf);
3171 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3172 l = 4;
3173 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3174 /* 16 bit write access */
3175 val = lduw_p(buf);
3176 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3177 l = 2;
3178 } else {
3179 /* 8 bit write access */
3180 val = ldub_p(buf);
3181 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3182 l = 1;
3184 } else {
3185 unsigned long addr1;
3186 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3187 /* RAM case */
3188 ptr = qemu_get_ram_ptr(addr1);
3189 memcpy(ptr, buf, l);
3190 if (!cpu_physical_memory_is_dirty(addr1)) {
3191 /* invalidate code */
3192 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3193 /* set dirty bit */
3194 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3195 (0xff & ~CODE_DIRTY_FLAG);
3197 /* qemu doesn't execute guest code directly, but kvm does
3198 therefore flush instruction caches */
3199 if (kvm_enabled())
3200 flush_icache_range((unsigned long)ptr,
3201 ((unsigned long)ptr)+l);
3203 } else {
3204 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3205 !(pd & IO_MEM_ROMD)) {
3206 target_phys_addr_t addr1 = addr;
3207 /* I/O case */
3208 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3209 if (p)
3210 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3211 if (l >= 4 && ((addr1 & 3) == 0)) {
3212 /* 32 bit read access */
3213 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3214 stl_p(buf, val);
3215 l = 4;
3216 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3217 /* 16 bit read access */
3218 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3219 stw_p(buf, val);
3220 l = 2;
3221 } else {
3222 /* 8 bit read access */
3223 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3224 stb_p(buf, val);
3225 l = 1;
3227 } else {
3228 /* RAM case */
3229 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3230 (addr & ~TARGET_PAGE_MASK);
3231 memcpy(buf, ptr, l);
3234 len -= l;
3235 buf += l;
3236 addr += l;
3240 /* used for ROM loading : can write in RAM and ROM */
3241 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3242 const uint8_t *buf, int len)
3244 int l;
3245 uint8_t *ptr;
3246 target_phys_addr_t page;
3247 unsigned long pd;
3248 PhysPageDesc *p;
3250 while (len > 0) {
3251 page = addr & TARGET_PAGE_MASK;
3252 l = (page + TARGET_PAGE_SIZE) - addr;
3253 if (l > len)
3254 l = len;
3255 p = phys_page_find(page >> TARGET_PAGE_BITS);
3256 if (!p) {
3257 pd = IO_MEM_UNASSIGNED;
3258 } else {
3259 pd = p->phys_offset;
3262 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3263 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3264 !(pd & IO_MEM_ROMD)) {
3265 /* do nothing */
3266 } else {
3267 unsigned long addr1;
3268 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3269 /* ROM/RAM case */
3270 ptr = qemu_get_ram_ptr(addr1);
3271 memcpy(ptr, buf, l);
3273 len -= l;
3274 buf += l;
3275 addr += l;
3279 typedef struct {
3280 void *buffer;
3281 target_phys_addr_t addr;
3282 target_phys_addr_t len;
3283 } BounceBuffer;
3285 static BounceBuffer bounce;
3287 typedef struct MapClient {
3288 void *opaque;
3289 void (*callback)(void *opaque);
3290 LIST_ENTRY(MapClient) link;
3291 } MapClient;
3293 static LIST_HEAD(map_client_list, MapClient) map_client_list
3294 = LIST_HEAD_INITIALIZER(map_client_list);
3296 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3298 MapClient *client = qemu_malloc(sizeof(*client));
3300 client->opaque = opaque;
3301 client->callback = callback;
3302 LIST_INSERT_HEAD(&map_client_list, client, link);
3303 return client;
3306 void cpu_unregister_map_client(void *_client)
3308 MapClient *client = (MapClient *)_client;
3310 LIST_REMOVE(client, link);
3313 static void cpu_notify_map_clients(void)
3315 MapClient *client;
3317 while (!LIST_EMPTY(&map_client_list)) {
3318 client = LIST_FIRST(&map_client_list);
3319 client->callback(client->opaque);
3320 LIST_REMOVE(client, link);
3324 /* Map a physical memory region into a host virtual address.
3325 * May map a subset of the requested range, given by and returned in *plen.
3326 * May return NULL if resources needed to perform the mapping are exhausted.
3327 * Use only for reads OR writes - not for read-modify-write operations.
3328 * Use cpu_register_map_client() to know when retrying the map operation is
3329 * likely to succeed.
3331 void *cpu_physical_memory_map(target_phys_addr_t addr,
3332 target_phys_addr_t *plen,
3333 int is_write)
3335 target_phys_addr_t len = *plen;
3336 target_phys_addr_t done = 0;
3337 int l;
3338 uint8_t *ret = NULL;
3339 uint8_t *ptr;
3340 target_phys_addr_t page;
3341 unsigned long pd;
3342 PhysPageDesc *p;
3343 unsigned long addr1;
3345 while (len > 0) {
3346 page = addr & TARGET_PAGE_MASK;
3347 l = (page + TARGET_PAGE_SIZE) - addr;
3348 if (l > len)
3349 l = len;
3350 p = phys_page_find(page >> TARGET_PAGE_BITS);
3351 if (!p) {
3352 pd = IO_MEM_UNASSIGNED;
3353 } else {
3354 pd = p->phys_offset;
3357 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3358 if (done || bounce.buffer) {
3359 break;
3361 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3362 bounce.addr = addr;
3363 bounce.len = l;
3364 if (!is_write) {
3365 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3367 ptr = bounce.buffer;
3368 } else {
3369 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3370 ptr = qemu_get_ram_ptr(addr1);
3372 if (!done) {
3373 ret = ptr;
3374 } else if (ret + done != ptr) {
3375 break;
3378 len -= l;
3379 addr += l;
3380 done += l;
3382 *plen = done;
3383 return ret;
3386 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3387 * Will also mark the memory as dirty if is_write == 1. access_len gives
3388 * the amount of memory that was actually read or written by the caller.
3390 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3391 int is_write, target_phys_addr_t access_len)
3393 if (buffer != bounce.buffer) {
3394 if (is_write) {
3395 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3396 while (access_len) {
3397 unsigned l;
3398 l = TARGET_PAGE_SIZE;
3399 if (l > access_len)
3400 l = access_len;
3401 if (!cpu_physical_memory_is_dirty(addr1)) {
3402 /* invalidate code */
3403 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3404 /* set dirty bit */
3405 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3406 (0xff & ~CODE_DIRTY_FLAG);
3408 addr1 += l;
3409 access_len -= l;
3412 return;
3414 if (is_write) {
3415 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3417 qemu_free(bounce.buffer);
3418 bounce.buffer = NULL;
3419 cpu_notify_map_clients();
3422 /* warning: addr must be aligned */
3423 uint32_t ldl_phys(target_phys_addr_t addr)
3425 int io_index;
3426 uint8_t *ptr;
3427 uint32_t val;
3428 unsigned long pd;
3429 PhysPageDesc *p;
3431 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3432 if (!p) {
3433 pd = IO_MEM_UNASSIGNED;
3434 } else {
3435 pd = p->phys_offset;
3438 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3439 !(pd & IO_MEM_ROMD)) {
3440 /* I/O case */
3441 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3442 if (p)
3443 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3444 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3445 } else {
3446 /* RAM case */
3447 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3448 (addr & ~TARGET_PAGE_MASK);
3449 val = ldl_p(ptr);
3451 return val;
3454 /* warning: addr must be aligned */
3455 uint64_t ldq_phys(target_phys_addr_t addr)
3457 int io_index;
3458 uint8_t *ptr;
3459 uint64_t val;
3460 unsigned long pd;
3461 PhysPageDesc *p;
3463 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3464 if (!p) {
3465 pd = IO_MEM_UNASSIGNED;
3466 } else {
3467 pd = p->phys_offset;
3470 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3471 !(pd & IO_MEM_ROMD)) {
3472 /* I/O case */
3473 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3474 if (p)
3475 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3476 #ifdef TARGET_WORDS_BIGENDIAN
3477 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3478 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3479 #else
3480 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3481 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3482 #endif
3483 } else {
3484 /* RAM case */
3485 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3486 (addr & ~TARGET_PAGE_MASK);
3487 val = ldq_p(ptr);
3489 return val;
3492 /* XXX: optimize */
3493 uint32_t ldub_phys(target_phys_addr_t addr)
3495 uint8_t val;
3496 cpu_physical_memory_read(addr, &val, 1);
3497 return val;
3500 /* XXX: optimize */
3501 uint32_t lduw_phys(target_phys_addr_t addr)
3503 uint16_t val;
3504 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3505 return tswap16(val);
3508 /* warning: addr must be aligned. The ram page is not masked as dirty
3509 and the code inside is not invalidated. It is useful if the dirty
3510 bits are used to track modified PTEs */
3511 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3513 int io_index;
3514 uint8_t *ptr;
3515 unsigned long pd;
3516 PhysPageDesc *p;
3518 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3519 if (!p) {
3520 pd = IO_MEM_UNASSIGNED;
3521 } else {
3522 pd = p->phys_offset;
3525 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3526 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3527 if (p)
3528 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3529 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3530 } else {
3531 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3532 ptr = qemu_get_ram_ptr(addr1);
3533 stl_p(ptr, val);
3535 if (unlikely(in_migration)) {
3536 if (!cpu_physical_memory_is_dirty(addr1)) {
3537 /* invalidate code */
3538 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3539 /* set dirty bit */
3540 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3541 (0xff & ~CODE_DIRTY_FLAG);
3547 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3549 int io_index;
3550 uint8_t *ptr;
3551 unsigned long pd;
3552 PhysPageDesc *p;
3554 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3555 if (!p) {
3556 pd = IO_MEM_UNASSIGNED;
3557 } else {
3558 pd = p->phys_offset;
3561 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3562 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3563 if (p)
3564 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3565 #ifdef TARGET_WORDS_BIGENDIAN
3566 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3567 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3568 #else
3569 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3570 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3571 #endif
3572 } else {
3573 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3574 (addr & ~TARGET_PAGE_MASK);
3575 stq_p(ptr, val);
3579 /* warning: addr must be aligned */
3580 void stl_phys(target_phys_addr_t addr, uint32_t val)
3582 int io_index;
3583 uint8_t *ptr;
3584 unsigned long pd;
3585 PhysPageDesc *p;
3587 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3588 if (!p) {
3589 pd = IO_MEM_UNASSIGNED;
3590 } else {
3591 pd = p->phys_offset;
3594 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3595 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3596 if (p)
3597 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3598 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3599 } else {
3600 unsigned long addr1;
3601 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3602 /* RAM case */
3603 ptr = qemu_get_ram_ptr(addr1);
3604 stl_p(ptr, val);
3605 if (!cpu_physical_memory_is_dirty(addr1)) {
3606 /* invalidate code */
3607 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3608 /* set dirty bit */
3609 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3610 (0xff & ~CODE_DIRTY_FLAG);
3615 /* XXX: optimize */
3616 void stb_phys(target_phys_addr_t addr, uint32_t val)
3618 uint8_t v = val;
3619 cpu_physical_memory_write(addr, &v, 1);
3622 /* XXX: optimize */
3623 void stw_phys(target_phys_addr_t addr, uint32_t val)
3625 uint16_t v = tswap16(val);
3626 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3629 /* XXX: optimize */
3630 void stq_phys(target_phys_addr_t addr, uint64_t val)
3632 val = tswap64(val);
3633 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3636 #endif
3638 /* virtual memory access for debug (includes writing to ROM) */
3639 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3640 uint8_t *buf, int len, int is_write)
3642 int l;
3643 target_phys_addr_t phys_addr;
3644 target_ulong page;
3646 while (len > 0) {
3647 page = addr & TARGET_PAGE_MASK;
3648 phys_addr = cpu_get_phys_page_debug(env, page);
3649 /* if no physical page mapped, return an error */
3650 if (phys_addr == -1)
3651 return -1;
3652 l = (page + TARGET_PAGE_SIZE) - addr;
3653 if (l > len)
3654 l = len;
3655 phys_addr += (addr & ~TARGET_PAGE_MASK);
3656 #if !defined(CONFIG_USER_ONLY)
3657 if (is_write)
3658 cpu_physical_memory_write_rom(phys_addr, buf, l);
3659 else
3660 #endif
3661 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3662 len -= l;
3663 buf += l;
3664 addr += l;
3666 return 0;
3669 /* in deterministic execution mode, instructions doing device I/Os
3670 must be at the end of the TB */
3671 void cpu_io_recompile(CPUState *env, void *retaddr)
3673 TranslationBlock *tb;
3674 uint32_t n, cflags;
3675 target_ulong pc, cs_base;
3676 uint64_t flags;
3678 tb = tb_find_pc((unsigned long)retaddr);
3679 if (!tb) {
3680 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3681 retaddr);
3683 n = env->icount_decr.u16.low + tb->icount;
3684 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3685 /* Calculate how many instructions had been executed before the fault
3686 occurred. */
3687 n = n - env->icount_decr.u16.low;
3688 /* Generate a new TB ending on the I/O insn. */
3689 n++;
3690 /* On MIPS and SH, delay slot instructions can only be restarted if
3691 they were already the first instruction in the TB. If this is not
3692 the first instruction in a TB then re-execute the preceding
3693 branch. */
3694 #if defined(TARGET_MIPS)
3695 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3696 env->active_tc.PC -= 4;
3697 env->icount_decr.u16.low++;
3698 env->hflags &= ~MIPS_HFLAG_BMASK;
3700 #elif defined(TARGET_SH4)
3701 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3702 && n > 1) {
3703 env->pc -= 2;
3704 env->icount_decr.u16.low++;
3705 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3707 #endif
3708 /* This should never happen. */
3709 if (n > CF_COUNT_MASK)
3710 cpu_abort(env, "TB too big during recompile");
3712 cflags = n | CF_LAST_IO;
3713 pc = tb->pc;
3714 cs_base = tb->cs_base;
3715 flags = tb->flags;
3716 tb_phys_invalidate(tb, -1);
3717 /* FIXME: In theory this could raise an exception. In practice
3718 we have already translated the block once so it's probably ok. */
3719 tb_gen_code(env, pc, cs_base, flags, cflags);
3720 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3721 the first in the TB) then we end up generating a whole new TB and
3722 repeating the fault, which is horribly inefficient.
3723 Better would be to execute just this insn uncached, or generate a
3724 second new TB. */
3725 cpu_resume_from_signal(env, NULL);
3728 void dump_exec_info(FILE *f,
3729 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3731 int i, target_code_size, max_target_code_size;
3732 int direct_jmp_count, direct_jmp2_count, cross_page;
3733 TranslationBlock *tb;
3735 target_code_size = 0;
3736 max_target_code_size = 0;
3737 cross_page = 0;
3738 direct_jmp_count = 0;
3739 direct_jmp2_count = 0;
3740 for(i = 0; i < nb_tbs; i++) {
3741 tb = &tbs[i];
3742 target_code_size += tb->size;
3743 if (tb->size > max_target_code_size)
3744 max_target_code_size = tb->size;
3745 if (tb->page_addr[1] != -1)
3746 cross_page++;
3747 if (tb->tb_next_offset[0] != 0xffff) {
3748 direct_jmp_count++;
3749 if (tb->tb_next_offset[1] != 0xffff) {
3750 direct_jmp2_count++;
3754 /* XXX: avoid using doubles ? */
3755 cpu_fprintf(f, "Translation buffer state:\n");
3756 cpu_fprintf(f, "gen code size %ld/%ld\n",
3757 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3758 cpu_fprintf(f, "TB count %d/%d\n",
3759 nb_tbs, code_gen_max_blocks);
3760 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3761 nb_tbs ? target_code_size / nb_tbs : 0,
3762 max_target_code_size);
3763 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3764 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3765 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3766 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3767 cross_page,
3768 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3769 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3770 direct_jmp_count,
3771 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3772 direct_jmp2_count,
3773 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3774 cpu_fprintf(f, "\nStatistics:\n");
3775 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3776 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3777 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3778 tcg_dump_info(f, cpu_fprintf);
3781 #if !defined(CONFIG_USER_ONLY)
3783 #define MMUSUFFIX _cmmu
3784 #define GETPC() NULL
3785 #define env cpu_single_env
3786 #define SOFTMMU_CODE_ACCESS
3788 #define SHIFT 0
3789 #include "softmmu_template.h"
3791 #define SHIFT 1
3792 #include "softmmu_template.h"
3794 #define SHIFT 2
3795 #include "softmmu_template.h"
3797 #define SHIFT 3
3798 #include "softmmu_template.h"
3800 #undef env
3802 #endif