Add qemu_unregister_reset
[qemu/aliguori-queue.git] / exec.c
blobd6e5d3c0c043e66706ed2f3fe78678297022ef54
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #include "qemu-common.h"
38 #include "tcg.h"
39 #include "hw/hw.h"
40 #include "osdep.h"
41 #include "kvm.h"
42 #if defined(CONFIG_USER_ONLY)
43 #include <qemu.h>
44 #endif
46 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_FLUSH
48 //#define DEBUG_TLB
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
60 #undef DEBUG_TB_CHECK
61 #endif
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_SPARC)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 36
69 #elif defined(TARGET_ALPHA)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 #define TARGET_VIRT_ADDR_SPACE_BITS 42
72 #elif defined(TARGET_PPC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #else
79 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 #define TARGET_PHYS_ADDR_SPACE_BITS 32
81 #endif
83 static TranslationBlock *tbs;
84 int code_gen_max_blocks;
85 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86 static int nb_tbs;
87 /* any access to the tbs or the page table must use this lock */
88 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90 #if defined(__arm__) || defined(__sparc_v9__)
91 /* The prologue must be reachable with a direct jump. ARM and Sparc64
92 have limited branch ranges (possibly also PPC) so place it in a
93 section close to code segment. */
94 #define code_gen_section \
95 __attribute__((__section__(".gen_code"))) \
96 __attribute__((aligned (32)))
97 #elif defined(_WIN32)
98 /* Maximum alignment for Win32 is 16. */
99 #define code_gen_section \
100 __attribute__((aligned (16)))
101 #else
102 #define code_gen_section \
103 __attribute__((aligned (32)))
104 #endif
106 uint8_t code_gen_prologue[1024] code_gen_section;
107 static uint8_t *code_gen_buffer;
108 static unsigned long code_gen_buffer_size;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size;
111 uint8_t *code_gen_ptr;
113 #if !defined(CONFIG_USER_ONLY)
114 int phys_ram_fd;
115 uint8_t *phys_ram_dirty;
116 static int in_migration;
118 typedef struct RAMBlock {
119 uint8_t *host;
120 ram_addr_t offset;
121 ram_addr_t length;
122 struct RAMBlock *next;
123 } RAMBlock;
125 static RAMBlock *ram_blocks;
126 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
127 then we can no longer assume contiguous ram offsets, and external uses
128 of this variable will break. */
129 ram_addr_t last_ram_offset;
130 #endif
132 CPUState *first_cpu;
133 /* current CPU in the current thread. It is only valid inside
134 cpu_exec() */
135 CPUState *cpu_single_env;
136 /* 0 = Do not count executed instructions.
137 1 = Precise instruction counting.
138 2 = Adaptive rate instruction counting. */
139 int use_icount = 0;
140 /* Current instruction counter. While executing translated code this may
141 include some instructions that have not yet been executed. */
142 int64_t qemu_icount;
144 typedef struct PageDesc {
145 /* list of TBs intersecting this ram page */
146 TranslationBlock *first_tb;
147 /* in order to optimize self modifying code, we count the number
148 of lookups we do to a given page to use a bitmap */
149 unsigned int code_write_count;
150 uint8_t *code_bitmap;
151 #if defined(CONFIG_USER_ONLY)
152 unsigned long flags;
153 #endif
154 } PageDesc;
156 typedef struct PhysPageDesc {
157 /* offset in host memory of the page + io_index in the low bits */
158 ram_addr_t phys_offset;
159 ram_addr_t region_offset;
160 } PhysPageDesc;
162 #define L2_BITS 10
163 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
164 /* XXX: this is a temporary hack for alpha target.
165 * In the future, this is to be replaced by a multi-level table
166 * to actually be able to handle the complete 64 bits address space.
168 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
169 #else
170 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
171 #endif
173 #define L1_SIZE (1 << L1_BITS)
174 #define L2_SIZE (1 << L2_BITS)
176 unsigned long qemu_real_host_page_size;
177 unsigned long qemu_host_page_bits;
178 unsigned long qemu_host_page_size;
179 unsigned long qemu_host_page_mask;
181 /* XXX: for system emulation, it could just be an array */
182 static PageDesc *l1_map[L1_SIZE];
183 static PhysPageDesc **l1_phys_map;
185 #if !defined(CONFIG_USER_ONLY)
186 static void io_mem_init(void);
188 /* io memory support */
189 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
190 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
191 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
192 static char io_mem_used[IO_MEM_NB_ENTRIES];
193 static int io_mem_watch;
194 #endif
196 /* log support */
197 static const char *logfilename = "/tmp/qemu.log";
198 FILE *logfile;
199 int loglevel;
200 static int log_append = 0;
202 /* statistics */
203 static int tlb_flush_count;
204 static int tb_flush_count;
205 static int tb_phys_invalidate_count;
207 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
208 typedef struct subpage_t {
209 target_phys_addr_t base;
210 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
211 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
212 void *opaque[TARGET_PAGE_SIZE][2][4];
213 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
214 } subpage_t;
216 #ifdef _WIN32
217 static void map_exec(void *addr, long size)
219 DWORD old_protect;
220 VirtualProtect(addr, size,
221 PAGE_EXECUTE_READWRITE, &old_protect);
224 #else
225 static void map_exec(void *addr, long size)
227 unsigned long start, end, page_size;
229 page_size = getpagesize();
230 start = (unsigned long)addr;
231 start &= ~(page_size - 1);
233 end = (unsigned long)addr + size;
234 end += page_size - 1;
235 end &= ~(page_size - 1);
237 mprotect((void *)start, end - start,
238 PROT_READ | PROT_WRITE | PROT_EXEC);
240 #endif
242 static void page_init(void)
244 /* NOTE: we can always suppose that qemu_host_page_size >=
245 TARGET_PAGE_SIZE */
246 #ifdef _WIN32
248 SYSTEM_INFO system_info;
250 GetSystemInfo(&system_info);
251 qemu_real_host_page_size = system_info.dwPageSize;
253 #else
254 qemu_real_host_page_size = getpagesize();
255 #endif
256 if (qemu_host_page_size == 0)
257 qemu_host_page_size = qemu_real_host_page_size;
258 if (qemu_host_page_size < TARGET_PAGE_SIZE)
259 qemu_host_page_size = TARGET_PAGE_SIZE;
260 qemu_host_page_bits = 0;
261 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
262 qemu_host_page_bits++;
263 qemu_host_page_mask = ~(qemu_host_page_size - 1);
264 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
265 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
267 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
269 long long startaddr, endaddr;
270 FILE *f;
271 int n;
273 mmap_lock();
274 last_brk = (unsigned long)sbrk(0);
275 f = fopen("/proc/self/maps", "r");
276 if (f) {
277 do {
278 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
279 if (n == 2) {
280 startaddr = MIN(startaddr,
281 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
282 endaddr = MIN(endaddr,
283 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
284 page_set_flags(startaddr & TARGET_PAGE_MASK,
285 TARGET_PAGE_ALIGN(endaddr),
286 PAGE_RESERVED);
288 } while (!feof(f));
289 fclose(f);
291 mmap_unlock();
293 #endif
296 static inline PageDesc **page_l1_map(target_ulong index)
298 #if TARGET_LONG_BITS > 32
299 /* Host memory outside guest VM. For 32-bit targets we have already
300 excluded high addresses. */
301 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
302 return NULL;
303 #endif
304 return &l1_map[index >> L2_BITS];
307 static inline PageDesc *page_find_alloc(target_ulong index)
309 PageDesc **lp, *p;
310 lp = page_l1_map(index);
311 if (!lp)
312 return NULL;
314 p = *lp;
315 if (!p) {
316 /* allocate if not found */
317 #if defined(CONFIG_USER_ONLY)
318 size_t len = sizeof(PageDesc) * L2_SIZE;
319 /* Don't use qemu_malloc because it may recurse. */
320 p = mmap(0, len, PROT_READ | PROT_WRITE,
321 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
322 *lp = p;
323 if (h2g_valid(p)) {
324 unsigned long addr = h2g(p);
325 page_set_flags(addr & TARGET_PAGE_MASK,
326 TARGET_PAGE_ALIGN(addr + len),
327 PAGE_RESERVED);
329 #else
330 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
331 *lp = p;
332 #endif
334 return p + (index & (L2_SIZE - 1));
337 static inline PageDesc *page_find(target_ulong index)
339 PageDesc **lp, *p;
340 lp = page_l1_map(index);
341 if (!lp)
342 return NULL;
344 p = *lp;
345 if (!p)
346 return 0;
347 return p + (index & (L2_SIZE - 1));
350 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
352 void **lp, **p;
353 PhysPageDesc *pd;
355 p = (void **)l1_phys_map;
356 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
358 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
359 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
360 #endif
361 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
362 p = *lp;
363 if (!p) {
364 /* allocate if not found */
365 if (!alloc)
366 return NULL;
367 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
368 memset(p, 0, sizeof(void *) * L1_SIZE);
369 *lp = p;
371 #endif
372 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
373 pd = *lp;
374 if (!pd) {
375 int i;
376 /* allocate if not found */
377 if (!alloc)
378 return NULL;
379 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
380 *lp = pd;
381 for (i = 0; i < L2_SIZE; i++) {
382 pd[i].phys_offset = IO_MEM_UNASSIGNED;
383 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
386 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
389 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
391 return phys_page_find_alloc(index, 0);
394 #if !defined(CONFIG_USER_ONLY)
395 static void tlb_protect_code(ram_addr_t ram_addr);
396 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
397 target_ulong vaddr);
398 #define mmap_lock() do { } while(0)
399 #define mmap_unlock() do { } while(0)
400 #endif
402 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
404 #if defined(CONFIG_USER_ONLY)
405 /* Currently it is not recommended to allocate big chunks of data in
406 user mode. It will change when a dedicated libc will be used */
407 #define USE_STATIC_CODE_GEN_BUFFER
408 #endif
410 #ifdef USE_STATIC_CODE_GEN_BUFFER
411 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
412 #endif
414 static void code_gen_alloc(unsigned long tb_size)
416 #ifdef USE_STATIC_CODE_GEN_BUFFER
417 code_gen_buffer = static_code_gen_buffer;
418 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
419 map_exec(code_gen_buffer, code_gen_buffer_size);
420 #else
421 code_gen_buffer_size = tb_size;
422 if (code_gen_buffer_size == 0) {
423 #if defined(CONFIG_USER_ONLY)
424 /* in user mode, phys_ram_size is not meaningful */
425 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
426 #else
427 /* XXX: needs adjustments */
428 code_gen_buffer_size = (unsigned long)(ram_size / 4);
429 #endif
431 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
432 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
433 /* The code gen buffer location may have constraints depending on
434 the host cpu and OS */
435 #if defined(__linux__)
437 int flags;
438 void *start = NULL;
440 flags = MAP_PRIVATE | MAP_ANONYMOUS;
441 #if defined(__x86_64__)
442 flags |= MAP_32BIT;
443 /* Cannot map more than that */
444 if (code_gen_buffer_size > (800 * 1024 * 1024))
445 code_gen_buffer_size = (800 * 1024 * 1024);
446 #elif defined(__sparc_v9__)
447 // Map the buffer below 2G, so we can use direct calls and branches
448 flags |= MAP_FIXED;
449 start = (void *) 0x60000000UL;
450 if (code_gen_buffer_size > (512 * 1024 * 1024))
451 code_gen_buffer_size = (512 * 1024 * 1024);
452 #elif defined(__arm__)
453 /* Map the buffer below 32M, so we can use direct calls and branches */
454 flags |= MAP_FIXED;
455 start = (void *) 0x01000000UL;
456 if (code_gen_buffer_size > 16 * 1024 * 1024)
457 code_gen_buffer_size = 16 * 1024 * 1024;
458 #endif
459 code_gen_buffer = mmap(start, code_gen_buffer_size,
460 PROT_WRITE | PROT_READ | PROT_EXEC,
461 flags, -1, 0);
462 if (code_gen_buffer == MAP_FAILED) {
463 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
464 exit(1);
467 #elif defined(__FreeBSD__) || defined(__DragonFly__)
469 int flags;
470 void *addr = NULL;
471 flags = MAP_PRIVATE | MAP_ANONYMOUS;
472 #if defined(__x86_64__)
473 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
474 * 0x40000000 is free */
475 flags |= MAP_FIXED;
476 addr = (void *)0x40000000;
477 /* Cannot map more than that */
478 if (code_gen_buffer_size > (800 * 1024 * 1024))
479 code_gen_buffer_size = (800 * 1024 * 1024);
480 #endif
481 code_gen_buffer = mmap(addr, code_gen_buffer_size,
482 PROT_WRITE | PROT_READ | PROT_EXEC,
483 flags, -1, 0);
484 if (code_gen_buffer == MAP_FAILED) {
485 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
486 exit(1);
489 #else
490 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
491 map_exec(code_gen_buffer, code_gen_buffer_size);
492 #endif
493 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
494 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
495 code_gen_buffer_max_size = code_gen_buffer_size -
496 code_gen_max_block_size();
497 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
498 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
501 /* Must be called before using the QEMU cpus. 'tb_size' is the size
502 (in bytes) allocated to the translation buffer. Zero means default
503 size. */
504 void cpu_exec_init_all(unsigned long tb_size)
506 cpu_gen_init();
507 code_gen_alloc(tb_size);
508 code_gen_ptr = code_gen_buffer;
509 page_init();
510 #if !defined(CONFIG_USER_ONLY)
511 io_mem_init();
512 #endif
515 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
517 #define CPU_COMMON_SAVE_VERSION 1
519 static void cpu_common_save(QEMUFile *f, void *opaque)
521 CPUState *env = opaque;
523 cpu_synchronize_state(env, 0);
525 qemu_put_be32s(f, &env->halted);
526 qemu_put_be32s(f, &env->interrupt_request);
529 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
531 CPUState *env = opaque;
533 if (version_id != CPU_COMMON_SAVE_VERSION)
534 return -EINVAL;
536 qemu_get_be32s(f, &env->halted);
537 qemu_get_be32s(f, &env->interrupt_request);
538 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
539 version_id is increased. */
540 env->interrupt_request &= ~0x01;
541 tlb_flush(env, 1);
542 cpu_synchronize_state(env, 1);
544 return 0;
546 #endif
548 CPUState *qemu_get_cpu(int cpu)
550 CPUState *env = first_cpu;
552 while (env) {
553 if (env->cpu_index == cpu)
554 break;
555 env = env->next_cpu;
558 return env;
561 void cpu_exec_init(CPUState *env)
563 CPUState **penv;
564 int cpu_index;
566 #if defined(CONFIG_USER_ONLY)
567 cpu_list_lock();
568 #endif
569 env->next_cpu = NULL;
570 penv = &first_cpu;
571 cpu_index = 0;
572 while (*penv != NULL) {
573 penv = &(*penv)->next_cpu;
574 cpu_index++;
576 env->cpu_index = cpu_index;
577 env->numa_node = 0;
578 TAILQ_INIT(&env->breakpoints);
579 TAILQ_INIT(&env->watchpoints);
580 *penv = env;
581 #if defined(CONFIG_USER_ONLY)
582 cpu_list_unlock();
583 #endif
584 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
585 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
586 cpu_common_save, cpu_common_load, env);
587 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
588 cpu_save, cpu_load, env);
589 #endif
592 static inline void invalidate_page_bitmap(PageDesc *p)
594 if (p->code_bitmap) {
595 qemu_free(p->code_bitmap);
596 p->code_bitmap = NULL;
598 p->code_write_count = 0;
601 /* set to NULL all the 'first_tb' fields in all PageDescs */
602 static void page_flush_tb(void)
604 int i, j;
605 PageDesc *p;
607 for(i = 0; i < L1_SIZE; i++) {
608 p = l1_map[i];
609 if (p) {
610 for(j = 0; j < L2_SIZE; j++) {
611 p->first_tb = NULL;
612 invalidate_page_bitmap(p);
613 p++;
619 /* flush all the translation blocks */
620 /* XXX: tb_flush is currently not thread safe */
621 void tb_flush(CPUState *env1)
623 CPUState *env;
624 #if defined(DEBUG_FLUSH)
625 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
626 (unsigned long)(code_gen_ptr - code_gen_buffer),
627 nb_tbs, nb_tbs > 0 ?
628 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
629 #endif
630 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
631 cpu_abort(env1, "Internal error: code buffer overflow\n");
633 nb_tbs = 0;
635 for(env = first_cpu; env != NULL; env = env->next_cpu) {
636 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
639 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
640 page_flush_tb();
642 code_gen_ptr = code_gen_buffer;
643 /* XXX: flush processor icache at this point if cache flush is
644 expensive */
645 tb_flush_count++;
648 #ifdef DEBUG_TB_CHECK
650 static void tb_invalidate_check(target_ulong address)
652 TranslationBlock *tb;
653 int i;
654 address &= TARGET_PAGE_MASK;
655 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
656 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
657 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
658 address >= tb->pc + tb->size)) {
659 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
660 address, (long)tb->pc, tb->size);
666 /* verify that all the pages have correct rights for code */
667 static void tb_page_check(void)
669 TranslationBlock *tb;
670 int i, flags1, flags2;
672 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
673 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
674 flags1 = page_get_flags(tb->pc);
675 flags2 = page_get_flags(tb->pc + tb->size - 1);
676 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
677 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
678 (long)tb->pc, tb->size, flags1, flags2);
684 static void tb_jmp_check(TranslationBlock *tb)
686 TranslationBlock *tb1;
687 unsigned int n1;
689 /* suppress any remaining jumps to this TB */
690 tb1 = tb->jmp_first;
691 for(;;) {
692 n1 = (long)tb1 & 3;
693 tb1 = (TranslationBlock *)((long)tb1 & ~3);
694 if (n1 == 2)
695 break;
696 tb1 = tb1->jmp_next[n1];
698 /* check end of list */
699 if (tb1 != tb) {
700 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
704 #endif
706 /* invalidate one TB */
707 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
708 int next_offset)
710 TranslationBlock *tb1;
711 for(;;) {
712 tb1 = *ptb;
713 if (tb1 == tb) {
714 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
715 break;
717 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
721 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
723 TranslationBlock *tb1;
724 unsigned int n1;
726 for(;;) {
727 tb1 = *ptb;
728 n1 = (long)tb1 & 3;
729 tb1 = (TranslationBlock *)((long)tb1 & ~3);
730 if (tb1 == tb) {
731 *ptb = tb1->page_next[n1];
732 break;
734 ptb = &tb1->page_next[n1];
738 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
740 TranslationBlock *tb1, **ptb;
741 unsigned int n1;
743 ptb = &tb->jmp_next[n];
744 tb1 = *ptb;
745 if (tb1) {
746 /* find tb(n) in circular list */
747 for(;;) {
748 tb1 = *ptb;
749 n1 = (long)tb1 & 3;
750 tb1 = (TranslationBlock *)((long)tb1 & ~3);
751 if (n1 == n && tb1 == tb)
752 break;
753 if (n1 == 2) {
754 ptb = &tb1->jmp_first;
755 } else {
756 ptb = &tb1->jmp_next[n1];
759 /* now we can suppress tb(n) from the list */
760 *ptb = tb->jmp_next[n];
762 tb->jmp_next[n] = NULL;
766 /* reset the jump entry 'n' of a TB so that it is not chained to
767 another TB */
768 static inline void tb_reset_jump(TranslationBlock *tb, int n)
770 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
773 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
775 CPUState *env;
776 PageDesc *p;
777 unsigned int h, n1;
778 target_phys_addr_t phys_pc;
779 TranslationBlock *tb1, *tb2;
781 /* remove the TB from the hash list */
782 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
783 h = tb_phys_hash_func(phys_pc);
784 tb_remove(&tb_phys_hash[h], tb,
785 offsetof(TranslationBlock, phys_hash_next));
787 /* remove the TB from the page list */
788 if (tb->page_addr[0] != page_addr) {
789 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
790 tb_page_remove(&p->first_tb, tb);
791 invalidate_page_bitmap(p);
793 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
794 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
795 tb_page_remove(&p->first_tb, tb);
796 invalidate_page_bitmap(p);
799 tb_invalidated_flag = 1;
801 /* remove the TB from the hash list */
802 h = tb_jmp_cache_hash_func(tb->pc);
803 for(env = first_cpu; env != NULL; env = env->next_cpu) {
804 if (env->tb_jmp_cache[h] == tb)
805 env->tb_jmp_cache[h] = NULL;
808 /* suppress this TB from the two jump lists */
809 tb_jmp_remove(tb, 0);
810 tb_jmp_remove(tb, 1);
812 /* suppress any remaining jumps to this TB */
813 tb1 = tb->jmp_first;
814 for(;;) {
815 n1 = (long)tb1 & 3;
816 if (n1 == 2)
817 break;
818 tb1 = (TranslationBlock *)((long)tb1 & ~3);
819 tb2 = tb1->jmp_next[n1];
820 tb_reset_jump(tb1, n1);
821 tb1->jmp_next[n1] = NULL;
822 tb1 = tb2;
824 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
826 tb_phys_invalidate_count++;
829 static inline void set_bits(uint8_t *tab, int start, int len)
831 int end, mask, end1;
833 end = start + len;
834 tab += start >> 3;
835 mask = 0xff << (start & 7);
836 if ((start & ~7) == (end & ~7)) {
837 if (start < end) {
838 mask &= ~(0xff << (end & 7));
839 *tab |= mask;
841 } else {
842 *tab++ |= mask;
843 start = (start + 8) & ~7;
844 end1 = end & ~7;
845 while (start < end1) {
846 *tab++ = 0xff;
847 start += 8;
849 if (start < end) {
850 mask = ~(0xff << (end & 7));
851 *tab |= mask;
856 static void build_page_bitmap(PageDesc *p)
858 int n, tb_start, tb_end;
859 TranslationBlock *tb;
861 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
863 tb = p->first_tb;
864 while (tb != NULL) {
865 n = (long)tb & 3;
866 tb = (TranslationBlock *)((long)tb & ~3);
867 /* NOTE: this is subtle as a TB may span two physical pages */
868 if (n == 0) {
869 /* NOTE: tb_end may be after the end of the page, but
870 it is not a problem */
871 tb_start = tb->pc & ~TARGET_PAGE_MASK;
872 tb_end = tb_start + tb->size;
873 if (tb_end > TARGET_PAGE_SIZE)
874 tb_end = TARGET_PAGE_SIZE;
875 } else {
876 tb_start = 0;
877 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
879 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
880 tb = tb->page_next[n];
884 TranslationBlock *tb_gen_code(CPUState *env,
885 target_ulong pc, target_ulong cs_base,
886 int flags, int cflags)
888 TranslationBlock *tb;
889 uint8_t *tc_ptr;
890 target_ulong phys_pc, phys_page2, virt_page2;
891 int code_gen_size;
893 phys_pc = get_phys_addr_code(env, pc);
894 tb = tb_alloc(pc);
895 if (!tb) {
896 /* flush must be done */
897 tb_flush(env);
898 /* cannot fail at this point */
899 tb = tb_alloc(pc);
900 /* Don't forget to invalidate previous TB info. */
901 tb_invalidated_flag = 1;
903 tc_ptr = code_gen_ptr;
904 tb->tc_ptr = tc_ptr;
905 tb->cs_base = cs_base;
906 tb->flags = flags;
907 tb->cflags = cflags;
908 cpu_gen_code(env, tb, &code_gen_size);
909 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
911 /* check next page if needed */
912 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
913 phys_page2 = -1;
914 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
915 phys_page2 = get_phys_addr_code(env, virt_page2);
917 tb_link_phys(tb, phys_pc, phys_page2);
918 return tb;
921 /* invalidate all TBs which intersect with the target physical page
922 starting in range [start;end[. NOTE: start and end must refer to
923 the same physical page. 'is_cpu_write_access' should be true if called
924 from a real cpu write access: the virtual CPU will exit the current
925 TB if code is modified inside this TB. */
926 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
927 int is_cpu_write_access)
929 TranslationBlock *tb, *tb_next, *saved_tb;
930 CPUState *env = cpu_single_env;
931 target_ulong tb_start, tb_end;
932 PageDesc *p;
933 int n;
934 #ifdef TARGET_HAS_PRECISE_SMC
935 int current_tb_not_found = is_cpu_write_access;
936 TranslationBlock *current_tb = NULL;
937 int current_tb_modified = 0;
938 target_ulong current_pc = 0;
939 target_ulong current_cs_base = 0;
940 int current_flags = 0;
941 #endif /* TARGET_HAS_PRECISE_SMC */
943 p = page_find(start >> TARGET_PAGE_BITS);
944 if (!p)
945 return;
946 if (!p->code_bitmap &&
947 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
948 is_cpu_write_access) {
949 /* build code bitmap */
950 build_page_bitmap(p);
953 /* we remove all the TBs in the range [start, end[ */
954 /* XXX: see if in some cases it could be faster to invalidate all the code */
955 tb = p->first_tb;
956 while (tb != NULL) {
957 n = (long)tb & 3;
958 tb = (TranslationBlock *)((long)tb & ~3);
959 tb_next = tb->page_next[n];
960 /* NOTE: this is subtle as a TB may span two physical pages */
961 if (n == 0) {
962 /* NOTE: tb_end may be after the end of the page, but
963 it is not a problem */
964 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
965 tb_end = tb_start + tb->size;
966 } else {
967 tb_start = tb->page_addr[1];
968 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
970 if (!(tb_end <= start || tb_start >= end)) {
971 #ifdef TARGET_HAS_PRECISE_SMC
972 if (current_tb_not_found) {
973 current_tb_not_found = 0;
974 current_tb = NULL;
975 if (env->mem_io_pc) {
976 /* now we have a real cpu fault */
977 current_tb = tb_find_pc(env->mem_io_pc);
980 if (current_tb == tb &&
981 (current_tb->cflags & CF_COUNT_MASK) != 1) {
982 /* If we are modifying the current TB, we must stop
983 its execution. We could be more precise by checking
984 that the modification is after the current PC, but it
985 would require a specialized function to partially
986 restore the CPU state */
988 current_tb_modified = 1;
989 cpu_restore_state(current_tb, env,
990 env->mem_io_pc, NULL);
991 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
992 &current_flags);
994 #endif /* TARGET_HAS_PRECISE_SMC */
995 /* we need to do that to handle the case where a signal
996 occurs while doing tb_phys_invalidate() */
997 saved_tb = NULL;
998 if (env) {
999 saved_tb = env->current_tb;
1000 env->current_tb = NULL;
1002 tb_phys_invalidate(tb, -1);
1003 if (env) {
1004 env->current_tb = saved_tb;
1005 if (env->interrupt_request && env->current_tb)
1006 cpu_interrupt(env, env->interrupt_request);
1009 tb = tb_next;
1011 #if !defined(CONFIG_USER_ONLY)
1012 /* if no code remaining, no need to continue to use slow writes */
1013 if (!p->first_tb) {
1014 invalidate_page_bitmap(p);
1015 if (is_cpu_write_access) {
1016 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1019 #endif
1020 #ifdef TARGET_HAS_PRECISE_SMC
1021 if (current_tb_modified) {
1022 /* we generate a block containing just the instruction
1023 modifying the memory. It will ensure that it cannot modify
1024 itself */
1025 env->current_tb = NULL;
1026 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1027 cpu_resume_from_signal(env, NULL);
1029 #endif
1032 /* len must be <= 8 and start must be a multiple of len */
1033 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1035 PageDesc *p;
1036 int offset, b;
1037 #if 0
1038 if (1) {
1039 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1040 cpu_single_env->mem_io_vaddr, len,
1041 cpu_single_env->eip,
1042 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1044 #endif
1045 p = page_find(start >> TARGET_PAGE_BITS);
1046 if (!p)
1047 return;
1048 if (p->code_bitmap) {
1049 offset = start & ~TARGET_PAGE_MASK;
1050 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1051 if (b & ((1 << len) - 1))
1052 goto do_invalidate;
1053 } else {
1054 do_invalidate:
1055 tb_invalidate_phys_page_range(start, start + len, 1);
1059 #if !defined(CONFIG_SOFTMMU)
1060 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1061 unsigned long pc, void *puc)
1063 TranslationBlock *tb;
1064 PageDesc *p;
1065 int n;
1066 #ifdef TARGET_HAS_PRECISE_SMC
1067 TranslationBlock *current_tb = NULL;
1068 CPUState *env = cpu_single_env;
1069 int current_tb_modified = 0;
1070 target_ulong current_pc = 0;
1071 target_ulong current_cs_base = 0;
1072 int current_flags = 0;
1073 #endif
1075 addr &= TARGET_PAGE_MASK;
1076 p = page_find(addr >> TARGET_PAGE_BITS);
1077 if (!p)
1078 return;
1079 tb = p->first_tb;
1080 #ifdef TARGET_HAS_PRECISE_SMC
1081 if (tb && pc != 0) {
1082 current_tb = tb_find_pc(pc);
1084 #endif
1085 while (tb != NULL) {
1086 n = (long)tb & 3;
1087 tb = (TranslationBlock *)((long)tb & ~3);
1088 #ifdef TARGET_HAS_PRECISE_SMC
1089 if (current_tb == tb &&
1090 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1091 /* If we are modifying the current TB, we must stop
1092 its execution. We could be more precise by checking
1093 that the modification is after the current PC, but it
1094 would require a specialized function to partially
1095 restore the CPU state */
1097 current_tb_modified = 1;
1098 cpu_restore_state(current_tb, env, pc, puc);
1099 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1100 &current_flags);
1102 #endif /* TARGET_HAS_PRECISE_SMC */
1103 tb_phys_invalidate(tb, addr);
1104 tb = tb->page_next[n];
1106 p->first_tb = NULL;
1107 #ifdef TARGET_HAS_PRECISE_SMC
1108 if (current_tb_modified) {
1109 /* we generate a block containing just the instruction
1110 modifying the memory. It will ensure that it cannot modify
1111 itself */
1112 env->current_tb = NULL;
1113 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1114 cpu_resume_from_signal(env, puc);
1116 #endif
1118 #endif
1120 /* add the tb in the target page and protect it if necessary */
1121 static inline void tb_alloc_page(TranslationBlock *tb,
1122 unsigned int n, target_ulong page_addr)
1124 PageDesc *p;
1125 TranslationBlock *last_first_tb;
1127 tb->page_addr[n] = page_addr;
1128 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1129 tb->page_next[n] = p->first_tb;
1130 last_first_tb = p->first_tb;
1131 p->first_tb = (TranslationBlock *)((long)tb | n);
1132 invalidate_page_bitmap(p);
1134 #if defined(TARGET_HAS_SMC) || 1
1136 #if defined(CONFIG_USER_ONLY)
1137 if (p->flags & PAGE_WRITE) {
1138 target_ulong addr;
1139 PageDesc *p2;
1140 int prot;
1142 /* force the host page as non writable (writes will have a
1143 page fault + mprotect overhead) */
1144 page_addr &= qemu_host_page_mask;
1145 prot = 0;
1146 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1147 addr += TARGET_PAGE_SIZE) {
1149 p2 = page_find (addr >> TARGET_PAGE_BITS);
1150 if (!p2)
1151 continue;
1152 prot |= p2->flags;
1153 p2->flags &= ~PAGE_WRITE;
1154 page_get_flags(addr);
1156 mprotect(g2h(page_addr), qemu_host_page_size,
1157 (prot & PAGE_BITS) & ~PAGE_WRITE);
1158 #ifdef DEBUG_TB_INVALIDATE
1159 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1160 page_addr);
1161 #endif
1163 #else
1164 /* if some code is already present, then the pages are already
1165 protected. So we handle the case where only the first TB is
1166 allocated in a physical page */
1167 if (!last_first_tb) {
1168 tlb_protect_code(page_addr);
1170 #endif
1172 #endif /* TARGET_HAS_SMC */
1175 /* Allocate a new translation block. Flush the translation buffer if
1176 too many translation blocks or too much generated code. */
1177 TranslationBlock *tb_alloc(target_ulong pc)
1179 TranslationBlock *tb;
1181 if (nb_tbs >= code_gen_max_blocks ||
1182 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1183 return NULL;
1184 tb = &tbs[nb_tbs++];
1185 tb->pc = pc;
1186 tb->cflags = 0;
1187 return tb;
1190 void tb_free(TranslationBlock *tb)
1192 /* In practice this is mostly used for single use temporary TB
1193 Ignore the hard cases and just back up if this TB happens to
1194 be the last one generated. */
1195 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1196 code_gen_ptr = tb->tc_ptr;
1197 nb_tbs--;
1201 /* add a new TB and link it to the physical page tables. phys_page2 is
1202 (-1) to indicate that only one page contains the TB. */
1203 void tb_link_phys(TranslationBlock *tb,
1204 target_ulong phys_pc, target_ulong phys_page2)
1206 unsigned int h;
1207 TranslationBlock **ptb;
1209 /* Grab the mmap lock to stop another thread invalidating this TB
1210 before we are done. */
1211 mmap_lock();
1212 /* add in the physical hash table */
1213 h = tb_phys_hash_func(phys_pc);
1214 ptb = &tb_phys_hash[h];
1215 tb->phys_hash_next = *ptb;
1216 *ptb = tb;
1218 /* add in the page list */
1219 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1220 if (phys_page2 != -1)
1221 tb_alloc_page(tb, 1, phys_page2);
1222 else
1223 tb->page_addr[1] = -1;
1225 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1226 tb->jmp_next[0] = NULL;
1227 tb->jmp_next[1] = NULL;
1229 /* init original jump addresses */
1230 if (tb->tb_next_offset[0] != 0xffff)
1231 tb_reset_jump(tb, 0);
1232 if (tb->tb_next_offset[1] != 0xffff)
1233 tb_reset_jump(tb, 1);
1235 #ifdef DEBUG_TB_CHECK
1236 tb_page_check();
1237 #endif
1238 mmap_unlock();
1241 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1242 tb[1].tc_ptr. Return NULL if not found */
1243 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1245 int m_min, m_max, m;
1246 unsigned long v;
1247 TranslationBlock *tb;
1249 if (nb_tbs <= 0)
1250 return NULL;
1251 if (tc_ptr < (unsigned long)code_gen_buffer ||
1252 tc_ptr >= (unsigned long)code_gen_ptr)
1253 return NULL;
1254 /* binary search (cf Knuth) */
1255 m_min = 0;
1256 m_max = nb_tbs - 1;
1257 while (m_min <= m_max) {
1258 m = (m_min + m_max) >> 1;
1259 tb = &tbs[m];
1260 v = (unsigned long)tb->tc_ptr;
1261 if (v == tc_ptr)
1262 return tb;
1263 else if (tc_ptr < v) {
1264 m_max = m - 1;
1265 } else {
1266 m_min = m + 1;
1269 return &tbs[m_max];
1272 static void tb_reset_jump_recursive(TranslationBlock *tb);
1274 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1276 TranslationBlock *tb1, *tb_next, **ptb;
1277 unsigned int n1;
1279 tb1 = tb->jmp_next[n];
1280 if (tb1 != NULL) {
1281 /* find head of list */
1282 for(;;) {
1283 n1 = (long)tb1 & 3;
1284 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1285 if (n1 == 2)
1286 break;
1287 tb1 = tb1->jmp_next[n1];
1289 /* we are now sure now that tb jumps to tb1 */
1290 tb_next = tb1;
1292 /* remove tb from the jmp_first list */
1293 ptb = &tb_next->jmp_first;
1294 for(;;) {
1295 tb1 = *ptb;
1296 n1 = (long)tb1 & 3;
1297 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1298 if (n1 == n && tb1 == tb)
1299 break;
1300 ptb = &tb1->jmp_next[n1];
1302 *ptb = tb->jmp_next[n];
1303 tb->jmp_next[n] = NULL;
1305 /* suppress the jump to next tb in generated code */
1306 tb_reset_jump(tb, n);
1308 /* suppress jumps in the tb on which we could have jumped */
1309 tb_reset_jump_recursive(tb_next);
1313 static void tb_reset_jump_recursive(TranslationBlock *tb)
1315 tb_reset_jump_recursive2(tb, 0);
1316 tb_reset_jump_recursive2(tb, 1);
1319 #if defined(TARGET_HAS_ICE)
1320 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1322 target_phys_addr_t addr;
1323 target_ulong pd;
1324 ram_addr_t ram_addr;
1325 PhysPageDesc *p;
1327 addr = cpu_get_phys_page_debug(env, pc);
1328 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1329 if (!p) {
1330 pd = IO_MEM_UNASSIGNED;
1331 } else {
1332 pd = p->phys_offset;
1334 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1335 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1337 #endif
1339 /* Add a watchpoint. */
1340 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1341 int flags, CPUWatchpoint **watchpoint)
1343 target_ulong len_mask = ~(len - 1);
1344 CPUWatchpoint *wp;
1346 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1347 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1348 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1349 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1350 return -EINVAL;
1352 wp = qemu_malloc(sizeof(*wp));
1354 wp->vaddr = addr;
1355 wp->len_mask = len_mask;
1356 wp->flags = flags;
1358 /* keep all GDB-injected watchpoints in front */
1359 if (flags & BP_GDB)
1360 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1361 else
1362 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1364 tlb_flush_page(env, addr);
1366 if (watchpoint)
1367 *watchpoint = wp;
1368 return 0;
1371 /* Remove a specific watchpoint. */
1372 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1373 int flags)
1375 target_ulong len_mask = ~(len - 1);
1376 CPUWatchpoint *wp;
1378 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1379 if (addr == wp->vaddr && len_mask == wp->len_mask
1380 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1381 cpu_watchpoint_remove_by_ref(env, wp);
1382 return 0;
1385 return -ENOENT;
1388 /* Remove a specific watchpoint by reference. */
1389 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1391 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1393 tlb_flush_page(env, watchpoint->vaddr);
1395 qemu_free(watchpoint);
1398 /* Remove all matching watchpoints. */
1399 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1401 CPUWatchpoint *wp, *next;
1403 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1404 if (wp->flags & mask)
1405 cpu_watchpoint_remove_by_ref(env, wp);
1409 /* Add a breakpoint. */
1410 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1411 CPUBreakpoint **breakpoint)
1413 #if defined(TARGET_HAS_ICE)
1414 CPUBreakpoint *bp;
1416 bp = qemu_malloc(sizeof(*bp));
1418 bp->pc = pc;
1419 bp->flags = flags;
1421 /* keep all GDB-injected breakpoints in front */
1422 if (flags & BP_GDB)
1423 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1424 else
1425 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1427 breakpoint_invalidate(env, pc);
1429 if (breakpoint)
1430 *breakpoint = bp;
1431 return 0;
1432 #else
1433 return -ENOSYS;
1434 #endif
1437 /* Remove a specific breakpoint. */
1438 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1440 #if defined(TARGET_HAS_ICE)
1441 CPUBreakpoint *bp;
1443 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1444 if (bp->pc == pc && bp->flags == flags) {
1445 cpu_breakpoint_remove_by_ref(env, bp);
1446 return 0;
1449 return -ENOENT;
1450 #else
1451 return -ENOSYS;
1452 #endif
1455 /* Remove a specific breakpoint by reference. */
1456 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1458 #if defined(TARGET_HAS_ICE)
1459 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1461 breakpoint_invalidate(env, breakpoint->pc);
1463 qemu_free(breakpoint);
1464 #endif
1467 /* Remove all matching breakpoints. */
1468 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1470 #if defined(TARGET_HAS_ICE)
1471 CPUBreakpoint *bp, *next;
1473 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1474 if (bp->flags & mask)
1475 cpu_breakpoint_remove_by_ref(env, bp);
1477 #endif
1480 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1481 CPU loop after each instruction */
1482 void cpu_single_step(CPUState *env, int enabled)
1484 #if defined(TARGET_HAS_ICE)
1485 if (env->singlestep_enabled != enabled) {
1486 env->singlestep_enabled = enabled;
1487 if (kvm_enabled())
1488 kvm_update_guest_debug(env, 0);
1489 else {
1490 /* must flush all the translated code to avoid inconsistencies */
1491 /* XXX: only flush what is necessary */
1492 tb_flush(env);
1495 #endif
1498 /* enable or disable low levels log */
1499 void cpu_set_log(int log_flags)
1501 loglevel = log_flags;
1502 if (loglevel && !logfile) {
1503 logfile = fopen(logfilename, log_append ? "a" : "w");
1504 if (!logfile) {
1505 perror(logfilename);
1506 _exit(1);
1508 #if !defined(CONFIG_SOFTMMU)
1509 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1511 static char logfile_buf[4096];
1512 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1514 #else
1515 setvbuf(logfile, NULL, _IOLBF, 0);
1516 #endif
1517 log_append = 1;
1519 if (!loglevel && logfile) {
1520 fclose(logfile);
1521 logfile = NULL;
1525 void cpu_set_log_filename(const char *filename)
1527 logfilename = strdup(filename);
1528 if (logfile) {
1529 fclose(logfile);
1530 logfile = NULL;
1532 cpu_set_log(loglevel);
1535 static void cpu_unlink_tb(CPUState *env)
1537 #if defined(USE_NPTL)
1538 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1539 problem and hope the cpu will stop of its own accord. For userspace
1540 emulation this often isn't actually as bad as it sounds. Often
1541 signals are used primarily to interrupt blocking syscalls. */
1542 #else
1543 TranslationBlock *tb;
1544 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1546 tb = env->current_tb;
1547 /* if the cpu is currently executing code, we must unlink it and
1548 all the potentially executing TB */
1549 if (tb && !testandset(&interrupt_lock)) {
1550 env->current_tb = NULL;
1551 tb_reset_jump_recursive(tb);
1552 resetlock(&interrupt_lock);
1554 #endif
1557 /* mask must never be zero, except for A20 change call */
1558 void cpu_interrupt(CPUState *env, int mask)
1560 int old_mask;
1562 old_mask = env->interrupt_request;
1563 env->interrupt_request |= mask;
1565 #ifndef CONFIG_USER_ONLY
1567 * If called from iothread context, wake the target cpu in
1568 * case its halted.
1570 if (!qemu_cpu_self(env)) {
1571 qemu_cpu_kick(env);
1572 return;
1574 #endif
1576 if (use_icount) {
1577 env->icount_decr.u16.high = 0xffff;
1578 #ifndef CONFIG_USER_ONLY
1579 if (!can_do_io(env)
1580 && (mask & ~old_mask) != 0) {
1581 cpu_abort(env, "Raised interrupt while not in I/O function");
1583 #endif
1584 } else {
1585 cpu_unlink_tb(env);
1589 void cpu_reset_interrupt(CPUState *env, int mask)
1591 env->interrupt_request &= ~mask;
1594 void cpu_exit(CPUState *env)
1596 env->exit_request = 1;
1597 cpu_unlink_tb(env);
1600 const CPULogItem cpu_log_items[] = {
1601 { CPU_LOG_TB_OUT_ASM, "out_asm",
1602 "show generated host assembly code for each compiled TB" },
1603 { CPU_LOG_TB_IN_ASM, "in_asm",
1604 "show target assembly code for each compiled TB" },
1605 { CPU_LOG_TB_OP, "op",
1606 "show micro ops for each compiled TB" },
1607 { CPU_LOG_TB_OP_OPT, "op_opt",
1608 "show micro ops "
1609 #ifdef TARGET_I386
1610 "before eflags optimization and "
1611 #endif
1612 "after liveness analysis" },
1613 { CPU_LOG_INT, "int",
1614 "show interrupts/exceptions in short format" },
1615 { CPU_LOG_EXEC, "exec",
1616 "show trace before each executed TB (lots of logs)" },
1617 { CPU_LOG_TB_CPU, "cpu",
1618 "show CPU state before block translation" },
1619 #ifdef TARGET_I386
1620 { CPU_LOG_PCALL, "pcall",
1621 "show protected mode far calls/returns/exceptions" },
1622 { CPU_LOG_RESET, "cpu_reset",
1623 "show CPU state before CPU resets" },
1624 #endif
1625 #ifdef DEBUG_IOPORT
1626 { CPU_LOG_IOPORT, "ioport",
1627 "show all i/o ports accesses" },
1628 #endif
1629 { 0, NULL, NULL },
1632 static int cmp1(const char *s1, int n, const char *s2)
1634 if (strlen(s2) != n)
1635 return 0;
1636 return memcmp(s1, s2, n) == 0;
1639 /* takes a comma separated list of log masks. Return 0 if error. */
1640 int cpu_str_to_log_mask(const char *str)
1642 const CPULogItem *item;
1643 int mask;
1644 const char *p, *p1;
1646 p = str;
1647 mask = 0;
1648 for(;;) {
1649 p1 = strchr(p, ',');
1650 if (!p1)
1651 p1 = p + strlen(p);
1652 if(cmp1(p,p1-p,"all")) {
1653 for(item = cpu_log_items; item->mask != 0; item++) {
1654 mask |= item->mask;
1656 } else {
1657 for(item = cpu_log_items; item->mask != 0; item++) {
1658 if (cmp1(p, p1 - p, item->name))
1659 goto found;
1661 return 0;
1663 found:
1664 mask |= item->mask;
1665 if (*p1 != ',')
1666 break;
1667 p = p1 + 1;
1669 return mask;
1672 void cpu_abort(CPUState *env, const char *fmt, ...)
1674 va_list ap;
1675 va_list ap2;
1677 va_start(ap, fmt);
1678 va_copy(ap2, ap);
1679 fprintf(stderr, "qemu: fatal: ");
1680 vfprintf(stderr, fmt, ap);
1681 fprintf(stderr, "\n");
1682 #ifdef TARGET_I386
1683 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1684 #else
1685 cpu_dump_state(env, stderr, fprintf, 0);
1686 #endif
1687 if (qemu_log_enabled()) {
1688 qemu_log("qemu: fatal: ");
1689 qemu_log_vprintf(fmt, ap2);
1690 qemu_log("\n");
1691 #ifdef TARGET_I386
1692 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1693 #else
1694 log_cpu_state(env, 0);
1695 #endif
1696 qemu_log_flush();
1697 qemu_log_close();
1699 va_end(ap2);
1700 va_end(ap);
1701 abort();
1704 CPUState *cpu_copy(CPUState *env)
1706 CPUState *new_env = cpu_init(env->cpu_model_str);
1707 CPUState *next_cpu = new_env->next_cpu;
1708 int cpu_index = new_env->cpu_index;
1709 #if defined(TARGET_HAS_ICE)
1710 CPUBreakpoint *bp;
1711 CPUWatchpoint *wp;
1712 #endif
1714 memcpy(new_env, env, sizeof(CPUState));
1716 /* Preserve chaining and index. */
1717 new_env->next_cpu = next_cpu;
1718 new_env->cpu_index = cpu_index;
1720 /* Clone all break/watchpoints.
1721 Note: Once we support ptrace with hw-debug register access, make sure
1722 BP_CPU break/watchpoints are handled correctly on clone. */
1723 TAILQ_INIT(&env->breakpoints);
1724 TAILQ_INIT(&env->watchpoints);
1725 #if defined(TARGET_HAS_ICE)
1726 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1727 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1729 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1730 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1731 wp->flags, NULL);
1733 #endif
1735 return new_env;
1738 #if !defined(CONFIG_USER_ONLY)
1740 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1742 unsigned int i;
1744 /* Discard jump cache entries for any tb which might potentially
1745 overlap the flushed page. */
1746 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1747 memset (&env->tb_jmp_cache[i], 0,
1748 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1750 i = tb_jmp_cache_hash_page(addr);
1751 memset (&env->tb_jmp_cache[i], 0,
1752 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1755 /* NOTE: if flush_global is true, also flush global entries (not
1756 implemented yet) */
1757 void tlb_flush(CPUState *env, int flush_global)
1759 int i;
1761 #if defined(DEBUG_TLB)
1762 printf("tlb_flush:\n");
1763 #endif
1764 /* must reset current TB so that interrupts cannot modify the
1765 links while we are modifying them */
1766 env->current_tb = NULL;
1768 for(i = 0; i < CPU_TLB_SIZE; i++) {
1769 int mmu_idx;
1770 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1771 env->tlb_table[mmu_idx][i].addr_read = -1;
1772 env->tlb_table[mmu_idx][i].addr_write = -1;
1773 env->tlb_table[mmu_idx][i].addr_code = -1;
1777 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1779 #ifdef CONFIG_KQEMU
1780 if (env->kqemu_enabled) {
1781 kqemu_flush(env, flush_global);
1783 #endif
1784 tlb_flush_count++;
1787 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1789 if (addr == (tlb_entry->addr_read &
1790 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1791 addr == (tlb_entry->addr_write &
1792 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1793 addr == (tlb_entry->addr_code &
1794 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1795 tlb_entry->addr_read = -1;
1796 tlb_entry->addr_write = -1;
1797 tlb_entry->addr_code = -1;
1801 void tlb_flush_page(CPUState *env, target_ulong addr)
1803 int i;
1804 int mmu_idx;
1806 #if defined(DEBUG_TLB)
1807 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1808 #endif
1809 /* must reset current TB so that interrupts cannot modify the
1810 links while we are modifying them */
1811 env->current_tb = NULL;
1813 addr &= TARGET_PAGE_MASK;
1814 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1815 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1816 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1818 tlb_flush_jmp_cache(env, addr);
1820 #ifdef CONFIG_KQEMU
1821 if (env->kqemu_enabled) {
1822 kqemu_flush_page(env, addr);
1824 #endif
1827 /* update the TLBs so that writes to code in the virtual page 'addr'
1828 can be detected */
1829 static void tlb_protect_code(ram_addr_t ram_addr)
1831 cpu_physical_memory_reset_dirty(ram_addr,
1832 ram_addr + TARGET_PAGE_SIZE,
1833 CODE_DIRTY_FLAG);
1836 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1837 tested for self modifying code */
1838 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1839 target_ulong vaddr)
1841 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1844 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1845 unsigned long start, unsigned long length)
1847 unsigned long addr;
1848 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1849 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1850 if ((addr - start) < length) {
1851 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1856 /* Note: start and end must be within the same ram block. */
1857 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1858 int dirty_flags)
1860 CPUState *env;
1861 unsigned long length, start1;
1862 int i, mask, len;
1863 uint8_t *p;
1865 start &= TARGET_PAGE_MASK;
1866 end = TARGET_PAGE_ALIGN(end);
1868 length = end - start;
1869 if (length == 0)
1870 return;
1871 len = length >> TARGET_PAGE_BITS;
1872 #ifdef CONFIG_KQEMU
1873 /* XXX: should not depend on cpu context */
1874 env = first_cpu;
1875 if (env->kqemu_enabled) {
1876 ram_addr_t addr;
1877 addr = start;
1878 for(i = 0; i < len; i++) {
1879 kqemu_set_notdirty(env, addr);
1880 addr += TARGET_PAGE_SIZE;
1883 #endif
1884 mask = ~dirty_flags;
1885 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1886 for(i = 0; i < len; i++)
1887 p[i] &= mask;
1889 /* we modify the TLB cache so that the dirty bit will be set again
1890 when accessing the range */
1891 start1 = (unsigned long)qemu_get_ram_ptr(start);
1892 /* Chek that we don't span multiple blocks - this breaks the
1893 address comparisons below. */
1894 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1895 != (end - 1) - start) {
1896 abort();
1899 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1900 int mmu_idx;
1901 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1902 for(i = 0; i < CPU_TLB_SIZE; i++)
1903 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1904 start1, length);
1909 int cpu_physical_memory_set_dirty_tracking(int enable)
1911 in_migration = enable;
1912 if (kvm_enabled()) {
1913 return kvm_set_migration_log(enable);
1915 return 0;
1918 int cpu_physical_memory_get_dirty_tracking(void)
1920 return in_migration;
1923 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1924 target_phys_addr_t end_addr)
1926 int ret = 0;
1928 if (kvm_enabled())
1929 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1930 return ret;
1933 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1935 ram_addr_t ram_addr;
1936 void *p;
1938 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1939 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1940 + tlb_entry->addend);
1941 ram_addr = qemu_ram_addr_from_host(p);
1942 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1943 tlb_entry->addr_write |= TLB_NOTDIRTY;
1948 /* update the TLB according to the current state of the dirty bits */
1949 void cpu_tlb_update_dirty(CPUState *env)
1951 int i;
1952 int mmu_idx;
1953 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1954 for(i = 0; i < CPU_TLB_SIZE; i++)
1955 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1959 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1961 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1962 tlb_entry->addr_write = vaddr;
1965 /* update the TLB corresponding to virtual page vaddr
1966 so that it is no longer dirty */
1967 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1969 int i;
1970 int mmu_idx;
1972 vaddr &= TARGET_PAGE_MASK;
1973 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1974 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1975 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1978 /* add a new TLB entry. At most one entry for a given virtual address
1979 is permitted. Return 0 if OK or 2 if the page could not be mapped
1980 (can only happen in non SOFTMMU mode for I/O pages or pages
1981 conflicting with the host address space). */
1982 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1983 target_phys_addr_t paddr, int prot,
1984 int mmu_idx, int is_softmmu)
1986 PhysPageDesc *p;
1987 unsigned long pd;
1988 unsigned int index;
1989 target_ulong address;
1990 target_ulong code_address;
1991 target_phys_addr_t addend;
1992 int ret;
1993 CPUTLBEntry *te;
1994 CPUWatchpoint *wp;
1995 target_phys_addr_t iotlb;
1997 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1998 if (!p) {
1999 pd = IO_MEM_UNASSIGNED;
2000 } else {
2001 pd = p->phys_offset;
2003 #if defined(DEBUG_TLB)
2004 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2005 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2006 #endif
2008 ret = 0;
2009 address = vaddr;
2010 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2011 /* IO memory case (romd handled later) */
2012 address |= TLB_MMIO;
2014 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2015 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2016 /* Normal RAM. */
2017 iotlb = pd & TARGET_PAGE_MASK;
2018 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2019 iotlb |= IO_MEM_NOTDIRTY;
2020 else
2021 iotlb |= IO_MEM_ROM;
2022 } else {
2023 /* IO handlers are currently passed a physical address.
2024 It would be nice to pass an offset from the base address
2025 of that region. This would avoid having to special case RAM,
2026 and avoid full address decoding in every device.
2027 We can't use the high bits of pd for this because
2028 IO_MEM_ROMD uses these as a ram address. */
2029 iotlb = (pd & ~TARGET_PAGE_MASK);
2030 if (p) {
2031 iotlb += p->region_offset;
2032 } else {
2033 iotlb += paddr;
2037 code_address = address;
2038 /* Make accesses to pages with watchpoints go via the
2039 watchpoint trap routines. */
2040 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2041 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2042 iotlb = io_mem_watch + paddr;
2043 /* TODO: The memory case can be optimized by not trapping
2044 reads of pages with a write breakpoint. */
2045 address |= TLB_MMIO;
2049 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2050 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2051 te = &env->tlb_table[mmu_idx][index];
2052 te->addend = addend - vaddr;
2053 if (prot & PAGE_READ) {
2054 te->addr_read = address;
2055 } else {
2056 te->addr_read = -1;
2059 if (prot & PAGE_EXEC) {
2060 te->addr_code = code_address;
2061 } else {
2062 te->addr_code = -1;
2064 if (prot & PAGE_WRITE) {
2065 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2066 (pd & IO_MEM_ROMD)) {
2067 /* Write access calls the I/O callback. */
2068 te->addr_write = address | TLB_MMIO;
2069 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2070 !cpu_physical_memory_is_dirty(pd)) {
2071 te->addr_write = address | TLB_NOTDIRTY;
2072 } else {
2073 te->addr_write = address;
2075 } else {
2076 te->addr_write = -1;
2078 return ret;
2081 #else
2083 void tlb_flush(CPUState *env, int flush_global)
2087 void tlb_flush_page(CPUState *env, target_ulong addr)
2091 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2092 target_phys_addr_t paddr, int prot,
2093 int mmu_idx, int is_softmmu)
2095 return 0;
2099 * Walks guest process memory "regions" one by one
2100 * and calls callback function 'fn' for each region.
2102 int walk_memory_regions(void *priv,
2103 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2105 unsigned long start, end;
2106 PageDesc *p = NULL;
2107 int i, j, prot, prot1;
2108 int rc = 0;
2110 start = end = -1;
2111 prot = 0;
2113 for (i = 0; i <= L1_SIZE; i++) {
2114 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2115 for (j = 0; j < L2_SIZE; j++) {
2116 prot1 = (p == NULL) ? 0 : p[j].flags;
2118 * "region" is one continuous chunk of memory
2119 * that has same protection flags set.
2121 if (prot1 != prot) {
2122 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2123 if (start != -1) {
2124 rc = (*fn)(priv, start, end, prot);
2125 /* callback can stop iteration by returning != 0 */
2126 if (rc != 0)
2127 return (rc);
2129 if (prot1 != 0)
2130 start = end;
2131 else
2132 start = -1;
2133 prot = prot1;
2135 if (p == NULL)
2136 break;
2139 return (rc);
2142 static int dump_region(void *priv, unsigned long start,
2143 unsigned long end, unsigned long prot)
2145 FILE *f = (FILE *)priv;
2147 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2148 start, end, end - start,
2149 ((prot & PAGE_READ) ? 'r' : '-'),
2150 ((prot & PAGE_WRITE) ? 'w' : '-'),
2151 ((prot & PAGE_EXEC) ? 'x' : '-'));
2153 return (0);
2156 /* dump memory mappings */
2157 void page_dump(FILE *f)
2159 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2160 "start", "end", "size", "prot");
2161 walk_memory_regions(f, dump_region);
2164 int page_get_flags(target_ulong address)
2166 PageDesc *p;
2168 p = page_find(address >> TARGET_PAGE_BITS);
2169 if (!p)
2170 return 0;
2171 return p->flags;
2174 /* modify the flags of a page and invalidate the code if
2175 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2176 depending on PAGE_WRITE */
2177 void page_set_flags(target_ulong start, target_ulong end, int flags)
2179 PageDesc *p;
2180 target_ulong addr;
2182 /* mmap_lock should already be held. */
2183 start = start & TARGET_PAGE_MASK;
2184 end = TARGET_PAGE_ALIGN(end);
2185 if (flags & PAGE_WRITE)
2186 flags |= PAGE_WRITE_ORG;
2187 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2188 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2189 /* We may be called for host regions that are outside guest
2190 address space. */
2191 if (!p)
2192 return;
2193 /* if the write protection is set, then we invalidate the code
2194 inside */
2195 if (!(p->flags & PAGE_WRITE) &&
2196 (flags & PAGE_WRITE) &&
2197 p->first_tb) {
2198 tb_invalidate_phys_page(addr, 0, NULL);
2200 p->flags = flags;
2204 int page_check_range(target_ulong start, target_ulong len, int flags)
2206 PageDesc *p;
2207 target_ulong end;
2208 target_ulong addr;
2210 if (start + len < start)
2211 /* we've wrapped around */
2212 return -1;
2214 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2215 start = start & TARGET_PAGE_MASK;
2217 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2218 p = page_find(addr >> TARGET_PAGE_BITS);
2219 if( !p )
2220 return -1;
2221 if( !(p->flags & PAGE_VALID) )
2222 return -1;
2224 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2225 return -1;
2226 if (flags & PAGE_WRITE) {
2227 if (!(p->flags & PAGE_WRITE_ORG))
2228 return -1;
2229 /* unprotect the page if it was put read-only because it
2230 contains translated code */
2231 if (!(p->flags & PAGE_WRITE)) {
2232 if (!page_unprotect(addr, 0, NULL))
2233 return -1;
2235 return 0;
2238 return 0;
2241 /* called from signal handler: invalidate the code and unprotect the
2242 page. Return TRUE if the fault was successfully handled. */
2243 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2245 unsigned int page_index, prot, pindex;
2246 PageDesc *p, *p1;
2247 target_ulong host_start, host_end, addr;
2249 /* Technically this isn't safe inside a signal handler. However we
2250 know this only ever happens in a synchronous SEGV handler, so in
2251 practice it seems to be ok. */
2252 mmap_lock();
2254 host_start = address & qemu_host_page_mask;
2255 page_index = host_start >> TARGET_PAGE_BITS;
2256 p1 = page_find(page_index);
2257 if (!p1) {
2258 mmap_unlock();
2259 return 0;
2261 host_end = host_start + qemu_host_page_size;
2262 p = p1;
2263 prot = 0;
2264 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2265 prot |= p->flags;
2266 p++;
2268 /* if the page was really writable, then we change its
2269 protection back to writable */
2270 if (prot & PAGE_WRITE_ORG) {
2271 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2272 if (!(p1[pindex].flags & PAGE_WRITE)) {
2273 mprotect((void *)g2h(host_start), qemu_host_page_size,
2274 (prot & PAGE_BITS) | PAGE_WRITE);
2275 p1[pindex].flags |= PAGE_WRITE;
2276 /* and since the content will be modified, we must invalidate
2277 the corresponding translated code. */
2278 tb_invalidate_phys_page(address, pc, puc);
2279 #ifdef DEBUG_TB_CHECK
2280 tb_invalidate_check(address);
2281 #endif
2282 mmap_unlock();
2283 return 1;
2286 mmap_unlock();
2287 return 0;
2290 static inline void tlb_set_dirty(CPUState *env,
2291 unsigned long addr, target_ulong vaddr)
2294 #endif /* defined(CONFIG_USER_ONLY) */
2296 #if !defined(CONFIG_USER_ONLY)
2298 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2299 ram_addr_t memory, ram_addr_t region_offset);
2300 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2301 ram_addr_t orig_memory, ram_addr_t region_offset);
2302 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2303 need_subpage) \
2304 do { \
2305 if (addr > start_addr) \
2306 start_addr2 = 0; \
2307 else { \
2308 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2309 if (start_addr2 > 0) \
2310 need_subpage = 1; \
2313 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2314 end_addr2 = TARGET_PAGE_SIZE - 1; \
2315 else { \
2316 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2317 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2318 need_subpage = 1; \
2320 } while (0)
2322 /* register physical memory. 'size' must be a multiple of the target
2323 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2324 io memory page. The address used when calling the IO function is
2325 the offset from the start of the region, plus region_offset. Both
2326 start_addr and region_offset are rounded down to a page boundary
2327 before calculating this offset. This should not be a problem unless
2328 the low bits of start_addr and region_offset differ. */
2329 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2330 ram_addr_t size,
2331 ram_addr_t phys_offset,
2332 ram_addr_t region_offset)
2334 target_phys_addr_t addr, end_addr;
2335 PhysPageDesc *p;
2336 CPUState *env;
2337 ram_addr_t orig_size = size;
2338 void *subpage;
2340 #ifdef CONFIG_KQEMU
2341 /* XXX: should not depend on cpu context */
2342 env = first_cpu;
2343 if (env->kqemu_enabled) {
2344 kqemu_set_phys_mem(start_addr, size, phys_offset);
2346 #endif
2347 if (kvm_enabled())
2348 kvm_set_phys_mem(start_addr, size, phys_offset);
2350 if (phys_offset == IO_MEM_UNASSIGNED) {
2351 region_offset = start_addr;
2353 region_offset &= TARGET_PAGE_MASK;
2354 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2355 end_addr = start_addr + (target_phys_addr_t)size;
2356 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2357 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2358 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2359 ram_addr_t orig_memory = p->phys_offset;
2360 target_phys_addr_t start_addr2, end_addr2;
2361 int need_subpage = 0;
2363 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2364 need_subpage);
2365 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2366 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2367 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2368 &p->phys_offset, orig_memory,
2369 p->region_offset);
2370 } else {
2371 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2372 >> IO_MEM_SHIFT];
2374 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2375 region_offset);
2376 p->region_offset = 0;
2377 } else {
2378 p->phys_offset = phys_offset;
2379 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2380 (phys_offset & IO_MEM_ROMD))
2381 phys_offset += TARGET_PAGE_SIZE;
2383 } else {
2384 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2385 p->phys_offset = phys_offset;
2386 p->region_offset = region_offset;
2387 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2388 (phys_offset & IO_MEM_ROMD)) {
2389 phys_offset += TARGET_PAGE_SIZE;
2390 } else {
2391 target_phys_addr_t start_addr2, end_addr2;
2392 int need_subpage = 0;
2394 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2395 end_addr2, need_subpage);
2397 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2398 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2399 &p->phys_offset, IO_MEM_UNASSIGNED,
2400 addr & TARGET_PAGE_MASK);
2401 subpage_register(subpage, start_addr2, end_addr2,
2402 phys_offset, region_offset);
2403 p->region_offset = 0;
2407 region_offset += TARGET_PAGE_SIZE;
2410 /* since each CPU stores ram addresses in its TLB cache, we must
2411 reset the modified entries */
2412 /* XXX: slow ! */
2413 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2414 tlb_flush(env, 1);
2418 /* XXX: temporary until new memory mapping API */
2419 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2421 PhysPageDesc *p;
2423 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2424 if (!p)
2425 return IO_MEM_UNASSIGNED;
2426 return p->phys_offset;
2429 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2431 if (kvm_enabled())
2432 kvm_coalesce_mmio_region(addr, size);
2435 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2437 if (kvm_enabled())
2438 kvm_uncoalesce_mmio_region(addr, size);
2441 #ifdef CONFIG_KQEMU
2442 /* XXX: better than nothing */
2443 static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2445 ram_addr_t addr;
2446 if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2447 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2448 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2449 abort();
2451 addr = last_ram_offset;
2452 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2453 return addr;
2455 #endif
2457 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2459 RAMBlock *new_block;
2461 #ifdef CONFIG_KQEMU
2462 if (kqemu_phys_ram_base) {
2463 return kqemu_ram_alloc(size);
2465 #endif
2467 size = TARGET_PAGE_ALIGN(size);
2468 new_block = qemu_malloc(sizeof(*new_block));
2470 new_block->host = qemu_vmalloc(size);
2471 new_block->offset = last_ram_offset;
2472 new_block->length = size;
2474 new_block->next = ram_blocks;
2475 ram_blocks = new_block;
2477 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2478 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2479 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2480 0xff, size >> TARGET_PAGE_BITS);
2482 last_ram_offset += size;
2484 if (kvm_enabled())
2485 kvm_setup_guest_memory(new_block->host, size);
2487 return new_block->offset;
2490 void qemu_ram_free(ram_addr_t addr)
2492 /* TODO: implement this. */
2495 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2496 With the exception of the softmmu code in this file, this should
2497 only be used for local memory (e.g. video ram) that the device owns,
2498 and knows it isn't going to access beyond the end of the block.
2500 It should not be used for general purpose DMA.
2501 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2503 void *qemu_get_ram_ptr(ram_addr_t addr)
2505 RAMBlock *prev;
2506 RAMBlock **prevp;
2507 RAMBlock *block;
2509 #ifdef CONFIG_KQEMU
2510 if (kqemu_phys_ram_base) {
2511 return kqemu_phys_ram_base + addr;
2513 #endif
2515 prev = NULL;
2516 prevp = &ram_blocks;
2517 block = ram_blocks;
2518 while (block && (block->offset > addr
2519 || block->offset + block->length <= addr)) {
2520 if (prev)
2521 prevp = &prev->next;
2522 prev = block;
2523 block = block->next;
2525 if (!block) {
2526 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2527 abort();
2529 /* Move this entry to to start of the list. */
2530 if (prev) {
2531 prev->next = block->next;
2532 block->next = *prevp;
2533 *prevp = block;
2535 return block->host + (addr - block->offset);
2538 /* Some of the softmmu routines need to translate from a host pointer
2539 (typically a TLB entry) back to a ram offset. */
2540 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2542 RAMBlock *prev;
2543 RAMBlock **prevp;
2544 RAMBlock *block;
2545 uint8_t *host = ptr;
2547 #ifdef CONFIG_KQEMU
2548 if (kqemu_phys_ram_base) {
2549 return host - kqemu_phys_ram_base;
2551 #endif
2553 prev = NULL;
2554 prevp = &ram_blocks;
2555 block = ram_blocks;
2556 while (block && (block->host > host
2557 || block->host + block->length <= host)) {
2558 if (prev)
2559 prevp = &prev->next;
2560 prev = block;
2561 block = block->next;
2563 if (!block) {
2564 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2565 abort();
2567 return block->offset + (host - block->host);
2570 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2572 #ifdef DEBUG_UNASSIGNED
2573 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2574 #endif
2575 #if defined(TARGET_SPARC)
2576 do_unassigned_access(addr, 0, 0, 0, 1);
2577 #endif
2578 return 0;
2581 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2583 #ifdef DEBUG_UNASSIGNED
2584 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2585 #endif
2586 #if defined(TARGET_SPARC)
2587 do_unassigned_access(addr, 0, 0, 0, 2);
2588 #endif
2589 return 0;
2592 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2594 #ifdef DEBUG_UNASSIGNED
2595 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2596 #endif
2597 #if defined(TARGET_SPARC)
2598 do_unassigned_access(addr, 0, 0, 0, 4);
2599 #endif
2600 return 0;
2603 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2605 #ifdef DEBUG_UNASSIGNED
2606 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2607 #endif
2608 #if defined(TARGET_SPARC)
2609 do_unassigned_access(addr, 1, 0, 0, 1);
2610 #endif
2613 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2615 #ifdef DEBUG_UNASSIGNED
2616 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2617 #endif
2618 #if defined(TARGET_SPARC)
2619 do_unassigned_access(addr, 1, 0, 0, 2);
2620 #endif
2623 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2625 #ifdef DEBUG_UNASSIGNED
2626 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2627 #endif
2628 #if defined(TARGET_SPARC)
2629 do_unassigned_access(addr, 1, 0, 0, 4);
2630 #endif
2633 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2634 unassigned_mem_readb,
2635 unassigned_mem_readw,
2636 unassigned_mem_readl,
2639 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2640 unassigned_mem_writeb,
2641 unassigned_mem_writew,
2642 unassigned_mem_writel,
2645 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2646 uint32_t val)
2648 int dirty_flags;
2649 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2650 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2651 #if !defined(CONFIG_USER_ONLY)
2652 tb_invalidate_phys_page_fast(ram_addr, 1);
2653 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2654 #endif
2656 stb_p(qemu_get_ram_ptr(ram_addr), val);
2657 #ifdef CONFIG_KQEMU
2658 if (cpu_single_env->kqemu_enabled &&
2659 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2660 kqemu_modify_page(cpu_single_env, ram_addr);
2661 #endif
2662 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2663 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2664 /* we remove the notdirty callback only if the code has been
2665 flushed */
2666 if (dirty_flags == 0xff)
2667 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2670 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2671 uint32_t val)
2673 int dirty_flags;
2674 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2675 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2676 #if !defined(CONFIG_USER_ONLY)
2677 tb_invalidate_phys_page_fast(ram_addr, 2);
2678 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2679 #endif
2681 stw_p(qemu_get_ram_ptr(ram_addr), val);
2682 #ifdef CONFIG_KQEMU
2683 if (cpu_single_env->kqemu_enabled &&
2684 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2685 kqemu_modify_page(cpu_single_env, ram_addr);
2686 #endif
2687 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2688 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2689 /* we remove the notdirty callback only if the code has been
2690 flushed */
2691 if (dirty_flags == 0xff)
2692 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2695 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2696 uint32_t val)
2698 int dirty_flags;
2699 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2700 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2701 #if !defined(CONFIG_USER_ONLY)
2702 tb_invalidate_phys_page_fast(ram_addr, 4);
2703 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2704 #endif
2706 stl_p(qemu_get_ram_ptr(ram_addr), val);
2707 #ifdef CONFIG_KQEMU
2708 if (cpu_single_env->kqemu_enabled &&
2709 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2710 kqemu_modify_page(cpu_single_env, ram_addr);
2711 #endif
2712 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2713 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2714 /* we remove the notdirty callback only if the code has been
2715 flushed */
2716 if (dirty_flags == 0xff)
2717 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2720 static CPUReadMemoryFunc *error_mem_read[3] = {
2721 NULL, /* never used */
2722 NULL, /* never used */
2723 NULL, /* never used */
2726 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2727 notdirty_mem_writeb,
2728 notdirty_mem_writew,
2729 notdirty_mem_writel,
2732 /* Generate a debug exception if a watchpoint has been hit. */
2733 static void check_watchpoint(int offset, int len_mask, int flags)
2735 CPUState *env = cpu_single_env;
2736 target_ulong pc, cs_base;
2737 TranslationBlock *tb;
2738 target_ulong vaddr;
2739 CPUWatchpoint *wp;
2740 int cpu_flags;
2742 if (env->watchpoint_hit) {
2743 /* We re-entered the check after replacing the TB. Now raise
2744 * the debug interrupt so that is will trigger after the
2745 * current instruction. */
2746 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2747 return;
2749 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2750 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2751 if ((vaddr == (wp->vaddr & len_mask) ||
2752 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2753 wp->flags |= BP_WATCHPOINT_HIT;
2754 if (!env->watchpoint_hit) {
2755 env->watchpoint_hit = wp;
2756 tb = tb_find_pc(env->mem_io_pc);
2757 if (!tb) {
2758 cpu_abort(env, "check_watchpoint: could not find TB for "
2759 "pc=%p", (void *)env->mem_io_pc);
2761 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2762 tb_phys_invalidate(tb, -1);
2763 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2764 env->exception_index = EXCP_DEBUG;
2765 } else {
2766 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2767 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2769 cpu_resume_from_signal(env, NULL);
2771 } else {
2772 wp->flags &= ~BP_WATCHPOINT_HIT;
2777 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2778 so these check for a hit then pass through to the normal out-of-line
2779 phys routines. */
2780 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2782 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2783 return ldub_phys(addr);
2786 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2788 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2789 return lduw_phys(addr);
2792 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2794 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2795 return ldl_phys(addr);
2798 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2799 uint32_t val)
2801 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2802 stb_phys(addr, val);
2805 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2806 uint32_t val)
2808 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2809 stw_phys(addr, val);
2812 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2813 uint32_t val)
2815 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2816 stl_phys(addr, val);
2819 static CPUReadMemoryFunc *watch_mem_read[3] = {
2820 watch_mem_readb,
2821 watch_mem_readw,
2822 watch_mem_readl,
2825 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2826 watch_mem_writeb,
2827 watch_mem_writew,
2828 watch_mem_writel,
2831 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2832 unsigned int len)
2834 uint32_t ret;
2835 unsigned int idx;
2837 idx = SUBPAGE_IDX(addr);
2838 #if defined(DEBUG_SUBPAGE)
2839 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2840 mmio, len, addr, idx);
2841 #endif
2842 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2843 addr + mmio->region_offset[idx][0][len]);
2845 return ret;
2848 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2849 uint32_t value, unsigned int len)
2851 unsigned int idx;
2853 idx = SUBPAGE_IDX(addr);
2854 #if defined(DEBUG_SUBPAGE)
2855 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2856 mmio, len, addr, idx, value);
2857 #endif
2858 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2859 addr + mmio->region_offset[idx][1][len],
2860 value);
2863 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2865 #if defined(DEBUG_SUBPAGE)
2866 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2867 #endif
2869 return subpage_readlen(opaque, addr, 0);
2872 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2873 uint32_t value)
2875 #if defined(DEBUG_SUBPAGE)
2876 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2877 #endif
2878 subpage_writelen(opaque, addr, value, 0);
2881 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2883 #if defined(DEBUG_SUBPAGE)
2884 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2885 #endif
2887 return subpage_readlen(opaque, addr, 1);
2890 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2891 uint32_t value)
2893 #if defined(DEBUG_SUBPAGE)
2894 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2895 #endif
2896 subpage_writelen(opaque, addr, value, 1);
2899 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2901 #if defined(DEBUG_SUBPAGE)
2902 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2903 #endif
2905 return subpage_readlen(opaque, addr, 2);
2908 static void subpage_writel (void *opaque,
2909 target_phys_addr_t addr, uint32_t value)
2911 #if defined(DEBUG_SUBPAGE)
2912 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2913 #endif
2914 subpage_writelen(opaque, addr, value, 2);
2917 static CPUReadMemoryFunc *subpage_read[] = {
2918 &subpage_readb,
2919 &subpage_readw,
2920 &subpage_readl,
2923 static CPUWriteMemoryFunc *subpage_write[] = {
2924 &subpage_writeb,
2925 &subpage_writew,
2926 &subpage_writel,
2929 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2930 ram_addr_t memory, ram_addr_t region_offset)
2932 int idx, eidx;
2933 unsigned int i;
2935 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2936 return -1;
2937 idx = SUBPAGE_IDX(start);
2938 eidx = SUBPAGE_IDX(end);
2939 #if defined(DEBUG_SUBPAGE)
2940 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2941 mmio, start, end, idx, eidx, memory);
2942 #endif
2943 memory >>= IO_MEM_SHIFT;
2944 for (; idx <= eidx; idx++) {
2945 for (i = 0; i < 4; i++) {
2946 if (io_mem_read[memory][i]) {
2947 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2948 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2949 mmio->region_offset[idx][0][i] = region_offset;
2951 if (io_mem_write[memory][i]) {
2952 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2953 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2954 mmio->region_offset[idx][1][i] = region_offset;
2959 return 0;
2962 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2963 ram_addr_t orig_memory, ram_addr_t region_offset)
2965 subpage_t *mmio;
2966 int subpage_memory;
2968 mmio = qemu_mallocz(sizeof(subpage_t));
2970 mmio->base = base;
2971 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2972 #if defined(DEBUG_SUBPAGE)
2973 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2974 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2975 #endif
2976 *phys = subpage_memory | IO_MEM_SUBPAGE;
2977 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2978 region_offset);
2980 return mmio;
2983 static int get_free_io_mem_idx(void)
2985 int i;
2987 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2988 if (!io_mem_used[i]) {
2989 io_mem_used[i] = 1;
2990 return i;
2993 return -1;
2996 /* mem_read and mem_write are arrays of functions containing the
2997 function to access byte (index 0), word (index 1) and dword (index
2998 2). Functions can be omitted with a NULL function pointer.
2999 If io_index is non zero, the corresponding io zone is
3000 modified. If it is zero, a new io zone is allocated. The return
3001 value can be used with cpu_register_physical_memory(). (-1) is
3002 returned if error. */
3003 static int cpu_register_io_memory_fixed(int io_index,
3004 CPUReadMemoryFunc **mem_read,
3005 CPUWriteMemoryFunc **mem_write,
3006 void *opaque)
3008 int i, subwidth = 0;
3010 if (io_index <= 0) {
3011 io_index = get_free_io_mem_idx();
3012 if (io_index == -1)
3013 return io_index;
3014 } else {
3015 io_index >>= IO_MEM_SHIFT;
3016 if (io_index >= IO_MEM_NB_ENTRIES)
3017 return -1;
3020 for(i = 0;i < 3; i++) {
3021 if (!mem_read[i] || !mem_write[i])
3022 subwidth = IO_MEM_SUBWIDTH;
3023 io_mem_read[io_index][i] = mem_read[i];
3024 io_mem_write[io_index][i] = mem_write[i];
3026 io_mem_opaque[io_index] = opaque;
3027 return (io_index << IO_MEM_SHIFT) | subwidth;
3030 int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
3031 CPUWriteMemoryFunc **mem_write,
3032 void *opaque)
3034 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3037 void cpu_unregister_io_memory(int io_table_address)
3039 int i;
3040 int io_index = io_table_address >> IO_MEM_SHIFT;
3042 for (i=0;i < 3; i++) {
3043 io_mem_read[io_index][i] = unassigned_mem_read[i];
3044 io_mem_write[io_index][i] = unassigned_mem_write[i];
3046 io_mem_opaque[io_index] = NULL;
3047 io_mem_used[io_index] = 0;
3050 static void io_mem_init(void)
3052 int i;
3054 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3055 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3056 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3057 for (i=0; i<5; i++)
3058 io_mem_used[i] = 1;
3060 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3061 watch_mem_write, NULL);
3062 #ifdef CONFIG_KQEMU
3063 if (kqemu_phys_ram_base) {
3064 /* alloc dirty bits array */
3065 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3066 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3068 #endif
3071 #endif /* !defined(CONFIG_USER_ONLY) */
3073 /* physical memory access (slow version, mainly for debug) */
3074 #if defined(CONFIG_USER_ONLY)
3075 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3076 int len, int is_write)
3078 int l, flags;
3079 target_ulong page;
3080 void * p;
3082 while (len > 0) {
3083 page = addr & TARGET_PAGE_MASK;
3084 l = (page + TARGET_PAGE_SIZE) - addr;
3085 if (l > len)
3086 l = len;
3087 flags = page_get_flags(page);
3088 if (!(flags & PAGE_VALID))
3089 return;
3090 if (is_write) {
3091 if (!(flags & PAGE_WRITE))
3092 return;
3093 /* XXX: this code should not depend on lock_user */
3094 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3095 /* FIXME - should this return an error rather than just fail? */
3096 return;
3097 memcpy(p, buf, l);
3098 unlock_user(p, addr, l);
3099 } else {
3100 if (!(flags & PAGE_READ))
3101 return;
3102 /* XXX: this code should not depend on lock_user */
3103 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3104 /* FIXME - should this return an error rather than just fail? */
3105 return;
3106 memcpy(buf, p, l);
3107 unlock_user(p, addr, 0);
3109 len -= l;
3110 buf += l;
3111 addr += l;
3115 #else
3116 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3117 int len, int is_write)
3119 int l, io_index;
3120 uint8_t *ptr;
3121 uint32_t val;
3122 target_phys_addr_t page;
3123 unsigned long pd;
3124 PhysPageDesc *p;
3126 while (len > 0) {
3127 page = addr & TARGET_PAGE_MASK;
3128 l = (page + TARGET_PAGE_SIZE) - addr;
3129 if (l > len)
3130 l = len;
3131 p = phys_page_find(page >> TARGET_PAGE_BITS);
3132 if (!p) {
3133 pd = IO_MEM_UNASSIGNED;
3134 } else {
3135 pd = p->phys_offset;
3138 if (is_write) {
3139 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3140 target_phys_addr_t addr1 = addr;
3141 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3142 if (p)
3143 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3144 /* XXX: could force cpu_single_env to NULL to avoid
3145 potential bugs */
3146 if (l >= 4 && ((addr1 & 3) == 0)) {
3147 /* 32 bit write access */
3148 val = ldl_p(buf);
3149 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3150 l = 4;
3151 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3152 /* 16 bit write access */
3153 val = lduw_p(buf);
3154 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3155 l = 2;
3156 } else {
3157 /* 8 bit write access */
3158 val = ldub_p(buf);
3159 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3160 l = 1;
3162 } else {
3163 unsigned long addr1;
3164 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3165 /* RAM case */
3166 ptr = qemu_get_ram_ptr(addr1);
3167 memcpy(ptr, buf, l);
3168 if (!cpu_physical_memory_is_dirty(addr1)) {
3169 /* invalidate code */
3170 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3171 /* set dirty bit */
3172 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3173 (0xff & ~CODE_DIRTY_FLAG);
3176 } else {
3177 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3178 !(pd & IO_MEM_ROMD)) {
3179 target_phys_addr_t addr1 = addr;
3180 /* I/O case */
3181 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3182 if (p)
3183 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3184 if (l >= 4 && ((addr1 & 3) == 0)) {
3185 /* 32 bit read access */
3186 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3187 stl_p(buf, val);
3188 l = 4;
3189 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3190 /* 16 bit read access */
3191 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3192 stw_p(buf, val);
3193 l = 2;
3194 } else {
3195 /* 8 bit read access */
3196 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3197 stb_p(buf, val);
3198 l = 1;
3200 } else {
3201 /* RAM case */
3202 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3203 (addr & ~TARGET_PAGE_MASK);
3204 memcpy(buf, ptr, l);
3207 len -= l;
3208 buf += l;
3209 addr += l;
3213 /* used for ROM loading : can write in RAM and ROM */
3214 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3215 const uint8_t *buf, int len)
3217 int l;
3218 uint8_t *ptr;
3219 target_phys_addr_t page;
3220 unsigned long pd;
3221 PhysPageDesc *p;
3223 while (len > 0) {
3224 page = addr & TARGET_PAGE_MASK;
3225 l = (page + TARGET_PAGE_SIZE) - addr;
3226 if (l > len)
3227 l = len;
3228 p = phys_page_find(page >> TARGET_PAGE_BITS);
3229 if (!p) {
3230 pd = IO_MEM_UNASSIGNED;
3231 } else {
3232 pd = p->phys_offset;
3235 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3236 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3237 !(pd & IO_MEM_ROMD)) {
3238 /* do nothing */
3239 } else {
3240 unsigned long addr1;
3241 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3242 /* ROM/RAM case */
3243 ptr = qemu_get_ram_ptr(addr1);
3244 memcpy(ptr, buf, l);
3246 len -= l;
3247 buf += l;
3248 addr += l;
3252 typedef struct {
3253 void *buffer;
3254 target_phys_addr_t addr;
3255 target_phys_addr_t len;
3256 } BounceBuffer;
3258 static BounceBuffer bounce;
3260 typedef struct MapClient {
3261 void *opaque;
3262 void (*callback)(void *opaque);
3263 LIST_ENTRY(MapClient) link;
3264 } MapClient;
3266 static LIST_HEAD(map_client_list, MapClient) map_client_list
3267 = LIST_HEAD_INITIALIZER(map_client_list);
3269 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3271 MapClient *client = qemu_malloc(sizeof(*client));
3273 client->opaque = opaque;
3274 client->callback = callback;
3275 LIST_INSERT_HEAD(&map_client_list, client, link);
3276 return client;
3279 void cpu_unregister_map_client(void *_client)
3281 MapClient *client = (MapClient *)_client;
3283 LIST_REMOVE(client, link);
3284 qemu_free(client);
3287 static void cpu_notify_map_clients(void)
3289 MapClient *client;
3291 while (!LIST_EMPTY(&map_client_list)) {
3292 client = LIST_FIRST(&map_client_list);
3293 client->callback(client->opaque);
3294 cpu_unregister_map_client(client);
3298 /* Map a physical memory region into a host virtual address.
3299 * May map a subset of the requested range, given by and returned in *plen.
3300 * May return NULL if resources needed to perform the mapping are exhausted.
3301 * Use only for reads OR writes - not for read-modify-write operations.
3302 * Use cpu_register_map_client() to know when retrying the map operation is
3303 * likely to succeed.
3305 void *cpu_physical_memory_map(target_phys_addr_t addr,
3306 target_phys_addr_t *plen,
3307 int is_write)
3309 target_phys_addr_t len = *plen;
3310 target_phys_addr_t done = 0;
3311 int l;
3312 uint8_t *ret = NULL;
3313 uint8_t *ptr;
3314 target_phys_addr_t page;
3315 unsigned long pd;
3316 PhysPageDesc *p;
3317 unsigned long addr1;
3319 while (len > 0) {
3320 page = addr & TARGET_PAGE_MASK;
3321 l = (page + TARGET_PAGE_SIZE) - addr;
3322 if (l > len)
3323 l = len;
3324 p = phys_page_find(page >> TARGET_PAGE_BITS);
3325 if (!p) {
3326 pd = IO_MEM_UNASSIGNED;
3327 } else {
3328 pd = p->phys_offset;
3331 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3332 if (done || bounce.buffer) {
3333 break;
3335 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3336 bounce.addr = addr;
3337 bounce.len = l;
3338 if (!is_write) {
3339 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3341 ptr = bounce.buffer;
3342 } else {
3343 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3344 ptr = qemu_get_ram_ptr(addr1);
3346 if (!done) {
3347 ret = ptr;
3348 } else if (ret + done != ptr) {
3349 break;
3352 len -= l;
3353 addr += l;
3354 done += l;
3356 *plen = done;
3357 return ret;
3360 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3361 * Will also mark the memory as dirty if is_write == 1. access_len gives
3362 * the amount of memory that was actually read or written by the caller.
3364 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3365 int is_write, target_phys_addr_t access_len)
3367 if (buffer != bounce.buffer) {
3368 if (is_write) {
3369 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3370 while (access_len) {
3371 unsigned l;
3372 l = TARGET_PAGE_SIZE;
3373 if (l > access_len)
3374 l = access_len;
3375 if (!cpu_physical_memory_is_dirty(addr1)) {
3376 /* invalidate code */
3377 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3378 /* set dirty bit */
3379 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3380 (0xff & ~CODE_DIRTY_FLAG);
3382 addr1 += l;
3383 access_len -= l;
3386 return;
3388 if (is_write) {
3389 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3391 qemu_free(bounce.buffer);
3392 bounce.buffer = NULL;
3393 cpu_notify_map_clients();
3396 /* warning: addr must be aligned */
3397 uint32_t ldl_phys(target_phys_addr_t addr)
3399 int io_index;
3400 uint8_t *ptr;
3401 uint32_t val;
3402 unsigned long pd;
3403 PhysPageDesc *p;
3405 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3406 if (!p) {
3407 pd = IO_MEM_UNASSIGNED;
3408 } else {
3409 pd = p->phys_offset;
3412 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3413 !(pd & IO_MEM_ROMD)) {
3414 /* I/O case */
3415 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3416 if (p)
3417 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3418 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3419 } else {
3420 /* RAM case */
3421 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3422 (addr & ~TARGET_PAGE_MASK);
3423 val = ldl_p(ptr);
3425 return val;
3428 /* warning: addr must be aligned */
3429 uint64_t ldq_phys(target_phys_addr_t addr)
3431 int io_index;
3432 uint8_t *ptr;
3433 uint64_t val;
3434 unsigned long pd;
3435 PhysPageDesc *p;
3437 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3438 if (!p) {
3439 pd = IO_MEM_UNASSIGNED;
3440 } else {
3441 pd = p->phys_offset;
3444 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3445 !(pd & IO_MEM_ROMD)) {
3446 /* I/O case */
3447 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3448 if (p)
3449 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3450 #ifdef TARGET_WORDS_BIGENDIAN
3451 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3452 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3453 #else
3454 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3455 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3456 #endif
3457 } else {
3458 /* RAM case */
3459 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3460 (addr & ~TARGET_PAGE_MASK);
3461 val = ldq_p(ptr);
3463 return val;
3466 /* XXX: optimize */
3467 uint32_t ldub_phys(target_phys_addr_t addr)
3469 uint8_t val;
3470 cpu_physical_memory_read(addr, &val, 1);
3471 return val;
3474 /* XXX: optimize */
3475 uint32_t lduw_phys(target_phys_addr_t addr)
3477 uint16_t val;
3478 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3479 return tswap16(val);
3482 /* warning: addr must be aligned. The ram page is not masked as dirty
3483 and the code inside is not invalidated. It is useful if the dirty
3484 bits are used to track modified PTEs */
3485 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3487 int io_index;
3488 uint8_t *ptr;
3489 unsigned long pd;
3490 PhysPageDesc *p;
3492 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3493 if (!p) {
3494 pd = IO_MEM_UNASSIGNED;
3495 } else {
3496 pd = p->phys_offset;
3499 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3500 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3501 if (p)
3502 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3503 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3504 } else {
3505 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3506 ptr = qemu_get_ram_ptr(addr1);
3507 stl_p(ptr, val);
3509 if (unlikely(in_migration)) {
3510 if (!cpu_physical_memory_is_dirty(addr1)) {
3511 /* invalidate code */
3512 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3513 /* set dirty bit */
3514 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3515 (0xff & ~CODE_DIRTY_FLAG);
3521 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3523 int io_index;
3524 uint8_t *ptr;
3525 unsigned long pd;
3526 PhysPageDesc *p;
3528 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3529 if (!p) {
3530 pd = IO_MEM_UNASSIGNED;
3531 } else {
3532 pd = p->phys_offset;
3535 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3536 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3537 if (p)
3538 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3539 #ifdef TARGET_WORDS_BIGENDIAN
3540 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3541 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3542 #else
3543 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3544 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3545 #endif
3546 } else {
3547 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3548 (addr & ~TARGET_PAGE_MASK);
3549 stq_p(ptr, val);
3553 /* warning: addr must be aligned */
3554 void stl_phys(target_phys_addr_t addr, uint32_t val)
3556 int io_index;
3557 uint8_t *ptr;
3558 unsigned long pd;
3559 PhysPageDesc *p;
3561 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3562 if (!p) {
3563 pd = IO_MEM_UNASSIGNED;
3564 } else {
3565 pd = p->phys_offset;
3568 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3569 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3570 if (p)
3571 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3572 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3573 } else {
3574 unsigned long addr1;
3575 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3576 /* RAM case */
3577 ptr = qemu_get_ram_ptr(addr1);
3578 stl_p(ptr, val);
3579 if (!cpu_physical_memory_is_dirty(addr1)) {
3580 /* invalidate code */
3581 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3582 /* set dirty bit */
3583 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3584 (0xff & ~CODE_DIRTY_FLAG);
3589 /* XXX: optimize */
3590 void stb_phys(target_phys_addr_t addr, uint32_t val)
3592 uint8_t v = val;
3593 cpu_physical_memory_write(addr, &v, 1);
3596 /* XXX: optimize */
3597 void stw_phys(target_phys_addr_t addr, uint32_t val)
3599 uint16_t v = tswap16(val);
3600 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3603 /* XXX: optimize */
3604 void stq_phys(target_phys_addr_t addr, uint64_t val)
3606 val = tswap64(val);
3607 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3610 #endif
3612 /* virtual memory access for debug (includes writing to ROM) */
3613 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3614 uint8_t *buf, int len, int is_write)
3616 int l;
3617 target_phys_addr_t phys_addr;
3618 target_ulong page;
3620 while (len > 0) {
3621 page = addr & TARGET_PAGE_MASK;
3622 phys_addr = cpu_get_phys_page_debug(env, page);
3623 /* if no physical page mapped, return an error */
3624 if (phys_addr == -1)
3625 return -1;
3626 l = (page + TARGET_PAGE_SIZE) - addr;
3627 if (l > len)
3628 l = len;
3629 phys_addr += (addr & ~TARGET_PAGE_MASK);
3630 #if !defined(CONFIG_USER_ONLY)
3631 if (is_write)
3632 cpu_physical_memory_write_rom(phys_addr, buf, l);
3633 else
3634 #endif
3635 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3636 len -= l;
3637 buf += l;
3638 addr += l;
3640 return 0;
3643 /* in deterministic execution mode, instructions doing device I/Os
3644 must be at the end of the TB */
3645 void cpu_io_recompile(CPUState *env, void *retaddr)
3647 TranslationBlock *tb;
3648 uint32_t n, cflags;
3649 target_ulong pc, cs_base;
3650 uint64_t flags;
3652 tb = tb_find_pc((unsigned long)retaddr);
3653 if (!tb) {
3654 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3655 retaddr);
3657 n = env->icount_decr.u16.low + tb->icount;
3658 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3659 /* Calculate how many instructions had been executed before the fault
3660 occurred. */
3661 n = n - env->icount_decr.u16.low;
3662 /* Generate a new TB ending on the I/O insn. */
3663 n++;
3664 /* On MIPS and SH, delay slot instructions can only be restarted if
3665 they were already the first instruction in the TB. If this is not
3666 the first instruction in a TB then re-execute the preceding
3667 branch. */
3668 #if defined(TARGET_MIPS)
3669 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3670 env->active_tc.PC -= 4;
3671 env->icount_decr.u16.low++;
3672 env->hflags &= ~MIPS_HFLAG_BMASK;
3674 #elif defined(TARGET_SH4)
3675 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3676 && n > 1) {
3677 env->pc -= 2;
3678 env->icount_decr.u16.low++;
3679 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3681 #endif
3682 /* This should never happen. */
3683 if (n > CF_COUNT_MASK)
3684 cpu_abort(env, "TB too big during recompile");
3686 cflags = n | CF_LAST_IO;
3687 pc = tb->pc;
3688 cs_base = tb->cs_base;
3689 flags = tb->flags;
3690 tb_phys_invalidate(tb, -1);
3691 /* FIXME: In theory this could raise an exception. In practice
3692 we have already translated the block once so it's probably ok. */
3693 tb_gen_code(env, pc, cs_base, flags, cflags);
3694 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3695 the first in the TB) then we end up generating a whole new TB and
3696 repeating the fault, which is horribly inefficient.
3697 Better would be to execute just this insn uncached, or generate a
3698 second new TB. */
3699 cpu_resume_from_signal(env, NULL);
3702 void dump_exec_info(FILE *f,
3703 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3705 int i, target_code_size, max_target_code_size;
3706 int direct_jmp_count, direct_jmp2_count, cross_page;
3707 TranslationBlock *tb;
3709 target_code_size = 0;
3710 max_target_code_size = 0;
3711 cross_page = 0;
3712 direct_jmp_count = 0;
3713 direct_jmp2_count = 0;
3714 for(i = 0; i < nb_tbs; i++) {
3715 tb = &tbs[i];
3716 target_code_size += tb->size;
3717 if (tb->size > max_target_code_size)
3718 max_target_code_size = tb->size;
3719 if (tb->page_addr[1] != -1)
3720 cross_page++;
3721 if (tb->tb_next_offset[0] != 0xffff) {
3722 direct_jmp_count++;
3723 if (tb->tb_next_offset[1] != 0xffff) {
3724 direct_jmp2_count++;
3728 /* XXX: avoid using doubles ? */
3729 cpu_fprintf(f, "Translation buffer state:\n");
3730 cpu_fprintf(f, "gen code size %ld/%ld\n",
3731 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3732 cpu_fprintf(f, "TB count %d/%d\n",
3733 nb_tbs, code_gen_max_blocks);
3734 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3735 nb_tbs ? target_code_size / nb_tbs : 0,
3736 max_target_code_size);
3737 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3738 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3739 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3740 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3741 cross_page,
3742 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3743 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3744 direct_jmp_count,
3745 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3746 direct_jmp2_count,
3747 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3748 cpu_fprintf(f, "\nStatistics:\n");
3749 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3750 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3751 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3752 tcg_dump_info(f, cpu_fprintf);
3755 #if !defined(CONFIG_USER_ONLY)
3757 #define MMUSUFFIX _cmmu
3758 #define GETPC() NULL
3759 #define env cpu_single_env
3760 #define SOFTMMU_CODE_ACCESS
3762 #define SHIFT 0
3763 #include "softmmu_template.h"
3765 #define SHIFT 1
3766 #include "softmmu_template.h"
3768 #define SHIFT 2
3769 #include "softmmu_template.h"
3771 #define SHIFT 3
3772 #include "softmmu_template.h"
3774 #undef env
3776 #endif