Fix 32-bit overflow in parallels image support
[qemu-kvm/fedora.git] / exec.c
bloba0e220310bb21147ce16013772c74fdbfd37c462
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <unistd.h>
32 #include <inttypes.h>
34 #include "cpu.h"
35 #include "exec-all.h"
36 #include "qemu-common.h"
37 #include "cache-utils.h"
39 #if !defined(TARGET_IA64)
40 #include "tcg.h"
41 #endif
42 #include "qemu-kvm.h"
44 #include "hw/hw.h"
45 #include "osdep.h"
46 #include "kvm.h"
47 #if defined(CONFIG_USER_ONLY)
48 #include <qemu.h>
49 #endif
51 //#define DEBUG_TB_INVALIDATE
52 //#define DEBUG_FLUSH
53 //#define DEBUG_TLB
54 //#define DEBUG_UNASSIGNED
56 /* make various TB consistency checks */
57 //#define DEBUG_TB_CHECK
58 //#define DEBUG_TLB_CHECK
60 //#define DEBUG_IOPORT
61 //#define DEBUG_SUBPAGE
63 #if !defined(CONFIG_USER_ONLY)
64 /* TB consistency checks only implemented for usermode emulation. */
65 #undef DEBUG_TB_CHECK
66 #endif
68 /* Quick hack to enable KSM support */
69 #define MADV_MERGEABLE 12 /* KSM may merge identical pages */
71 #define SMC_BITMAP_USE_THRESHOLD 10
73 #if defined(TARGET_SPARC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 41
75 #elif defined(TARGET_SPARC)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 36
77 #elif defined(TARGET_ALPHA)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #define TARGET_VIRT_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_PPC64)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 42
82 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
83 #define TARGET_PHYS_ADDR_SPACE_BITS 42
84 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
85 #define TARGET_PHYS_ADDR_SPACE_BITS 36
86 #elif defined(TARGET_IA64)
87 #define TARGET_PHYS_ADDR_SPACE_BITS 36
88 #else
89 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
90 #define TARGET_PHYS_ADDR_SPACE_BITS 32
91 #endif
93 static TranslationBlock *tbs;
94 int code_gen_max_blocks;
95 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
96 static int nb_tbs;
97 /* any access to the tbs or the page table must use this lock */
98 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
100 #if defined(__arm__) || defined(__sparc_v9__)
101 /* The prologue must be reachable with a direct jump. ARM and Sparc64
102 have limited branch ranges (possibly also PPC) so place it in a
103 section close to code segment. */
104 #define code_gen_section \
105 __attribute__((__section__(".gen_code"))) \
106 __attribute__((aligned (32)))
107 #elif defined(_WIN32)
108 /* Maximum alignment for Win32 is 16. */
109 #define code_gen_section \
110 __attribute__((aligned (16)))
111 #else
112 #define code_gen_section \
113 __attribute__((aligned (32)))
114 #endif
116 uint8_t code_gen_prologue[1024] code_gen_section;
117 static uint8_t *code_gen_buffer;
118 static unsigned long code_gen_buffer_size;
119 /* threshold to flush the translated code buffer */
120 static unsigned long code_gen_buffer_max_size;
121 uint8_t *code_gen_ptr;
123 #if !defined(CONFIG_USER_ONLY)
124 int phys_ram_fd;
125 uint8_t *phys_ram_dirty;
126 uint8_t *bios_mem;
127 static int in_migration;
129 typedef struct RAMBlock {
130 uint8_t *host;
131 ram_addr_t offset;
132 ram_addr_t length;
133 struct RAMBlock *next;
134 } RAMBlock;
136 static RAMBlock *ram_blocks;
137 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
138 then we can no longer assume contiguous ram offsets, and external uses
139 of this variable will break. */
140 ram_addr_t last_ram_offset;
141 #endif
143 CPUState *first_cpu;
144 /* current CPU in the current thread. It is only valid inside
145 cpu_exec() */
146 CPUState *cpu_single_env;
147 /* 0 = Do not count executed instructions.
148 1 = Precise instruction counting.
149 2 = Adaptive rate instruction counting. */
150 int use_icount = 0;
151 /* Current instruction counter. While executing translated code this may
152 include some instructions that have not yet been executed. */
153 int64_t qemu_icount;
155 typedef struct PageDesc {
156 /* list of TBs intersecting this ram page */
157 TranslationBlock *first_tb;
158 /* in order to optimize self modifying code, we count the number
159 of lookups we do to a given page to use a bitmap */
160 unsigned int code_write_count;
161 uint8_t *code_bitmap;
162 #if defined(CONFIG_USER_ONLY)
163 unsigned long flags;
164 #endif
165 } PageDesc;
167 typedef struct PhysPageDesc {
168 /* offset in host memory of the page + io_index in the low bits */
169 ram_addr_t phys_offset;
170 ram_addr_t region_offset;
171 } PhysPageDesc;
173 #define L2_BITS 10
174 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
175 /* XXX: this is a temporary hack for alpha target.
176 * In the future, this is to be replaced by a multi-level table
177 * to actually be able to handle the complete 64 bits address space.
179 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
180 #else
181 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
182 #endif
184 #define L1_SIZE (1 << L1_BITS)
185 #define L2_SIZE (1 << L2_BITS)
187 unsigned long qemu_real_host_page_size;
188 unsigned long qemu_host_page_bits;
189 unsigned long qemu_host_page_size;
190 unsigned long qemu_host_page_mask;
192 /* XXX: for system emulation, it could just be an array */
193 static PageDesc *l1_map[L1_SIZE];
194 static PhysPageDesc **l1_phys_map;
196 #if !defined(CONFIG_USER_ONLY)
197 static void io_mem_init(void);
199 /* io memory support */
200 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
201 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
202 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
203 static char io_mem_used[IO_MEM_NB_ENTRIES];
204 static int io_mem_watch;
205 #endif
207 /* log support */
208 static const char *logfilename = "/tmp/qemu.log";
209 FILE *logfile;
210 int loglevel;
211 static int log_append = 0;
213 /* statistics */
214 static int tlb_flush_count;
215 static int tb_flush_count;
216 static int tb_phys_invalidate_count;
218 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
219 typedef struct subpage_t {
220 target_phys_addr_t base;
221 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
222 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
223 void *opaque[TARGET_PAGE_SIZE][2][4];
224 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
225 } subpage_t;
227 #ifdef _WIN32
228 static void map_exec(void *addr, long size)
230 DWORD old_protect;
231 VirtualProtect(addr, size,
232 PAGE_EXECUTE_READWRITE, &old_protect);
235 #else
236 static void map_exec(void *addr, long size)
238 unsigned long start, end, page_size;
240 page_size = getpagesize();
241 start = (unsigned long)addr;
242 start &= ~(page_size - 1);
244 end = (unsigned long)addr + size;
245 end += page_size - 1;
246 end &= ~(page_size - 1);
248 mprotect((void *)start, end - start,
249 PROT_READ | PROT_WRITE | PROT_EXEC);
251 #endif
253 static void page_init(void)
255 /* NOTE: we can always suppose that qemu_host_page_size >=
256 TARGET_PAGE_SIZE */
257 #ifdef _WIN32
259 SYSTEM_INFO system_info;
261 GetSystemInfo(&system_info);
262 qemu_real_host_page_size = system_info.dwPageSize;
264 #else
265 qemu_real_host_page_size = getpagesize();
266 #endif
267 if (qemu_host_page_size == 0)
268 qemu_host_page_size = qemu_real_host_page_size;
269 if (qemu_host_page_size < TARGET_PAGE_SIZE)
270 qemu_host_page_size = TARGET_PAGE_SIZE;
271 qemu_host_page_bits = 0;
272 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
273 qemu_host_page_bits++;
274 qemu_host_page_mask = ~(qemu_host_page_size - 1);
275 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
276 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
278 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
280 long long startaddr, endaddr;
281 FILE *f;
282 int n;
284 mmap_lock();
285 last_brk = (unsigned long)sbrk(0);
286 f = fopen("/proc/self/maps", "r");
287 if (f) {
288 do {
289 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
290 if (n == 2) {
291 startaddr = MIN(startaddr,
292 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
293 endaddr = MIN(endaddr,
294 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
295 page_set_flags(startaddr & TARGET_PAGE_MASK,
296 TARGET_PAGE_ALIGN(endaddr),
297 PAGE_RESERVED);
299 } while (!feof(f));
300 fclose(f);
302 mmap_unlock();
304 #endif
307 static inline PageDesc **page_l1_map(target_ulong index)
309 #if TARGET_LONG_BITS > 32
310 /* Host memory outside guest VM. For 32-bit targets we have already
311 excluded high addresses. */
312 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
313 return NULL;
314 #endif
315 return &l1_map[index >> L2_BITS];
318 static inline PageDesc *page_find_alloc(target_ulong index)
320 PageDesc **lp, *p;
321 lp = page_l1_map(index);
322 if (!lp)
323 return NULL;
325 p = *lp;
326 if (!p) {
327 /* allocate if not found */
328 #if defined(CONFIG_USER_ONLY)
329 size_t len = sizeof(PageDesc) * L2_SIZE;
330 /* Don't use qemu_malloc because it may recurse. */
331 p = mmap(NULL, len, PROT_READ | PROT_WRITE,
332 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
333 *lp = p;
334 if (h2g_valid(p)) {
335 unsigned long addr = h2g(p);
336 page_set_flags(addr & TARGET_PAGE_MASK,
337 TARGET_PAGE_ALIGN(addr + len),
338 PAGE_RESERVED);
340 #else
341 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
342 *lp = p;
343 #endif
345 return p + (index & (L2_SIZE - 1));
348 static inline PageDesc *page_find(target_ulong index)
350 PageDesc **lp, *p;
351 lp = page_l1_map(index);
352 if (!lp)
353 return NULL;
355 p = *lp;
356 if (!p) {
357 return NULL;
359 return p + (index & (L2_SIZE - 1));
362 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
364 void **lp, **p;
365 PhysPageDesc *pd;
367 p = (void **)l1_phys_map;
368 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
370 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
371 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
372 #endif
373 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
374 p = *lp;
375 if (!p) {
376 /* allocate if not found */
377 if (!alloc)
378 return NULL;
379 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
380 memset(p, 0, sizeof(void *) * L1_SIZE);
381 *lp = p;
383 #endif
384 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
385 pd = *lp;
386 if (!pd) {
387 int i;
388 /* allocate if not found */
389 if (!alloc)
390 return NULL;
391 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
392 *lp = pd;
393 for (i = 0; i < L2_SIZE; i++) {
394 pd[i].phys_offset = IO_MEM_UNASSIGNED;
395 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
398 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
401 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
403 return phys_page_find_alloc(index, 0);
406 #if !defined(CONFIG_USER_ONLY)
407 static void tlb_protect_code(ram_addr_t ram_addr);
408 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
409 target_ulong vaddr);
410 #define mmap_lock() do { } while(0)
411 #define mmap_unlock() do { } while(0)
412 #endif
414 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
416 #if defined(CONFIG_USER_ONLY)
417 /* Currently it is not recommended to allocate big chunks of data in
418 user mode. It will change when a dedicated libc will be used */
419 #define USE_STATIC_CODE_GEN_BUFFER
420 #endif
422 #ifdef USE_STATIC_CODE_GEN_BUFFER
423 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
424 #endif
426 static void code_gen_alloc(unsigned long tb_size)
428 if (kvm_enabled())
429 return;
431 #ifdef USE_STATIC_CODE_GEN_BUFFER
432 code_gen_buffer = static_code_gen_buffer;
433 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
434 map_exec(code_gen_buffer, code_gen_buffer_size);
435 #else
436 code_gen_buffer_size = tb_size;
437 if (code_gen_buffer_size == 0) {
438 #if defined(CONFIG_USER_ONLY)
439 /* in user mode, phys_ram_size is not meaningful */
440 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
441 #else
442 /* XXX: needs adjustments */
443 code_gen_buffer_size = (unsigned long)(ram_size / 4);
444 #endif
446 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
447 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
448 /* The code gen buffer location may have constraints depending on
449 the host cpu and OS */
450 #if defined(__linux__)
452 int flags;
453 void *start = NULL;
455 flags = MAP_PRIVATE | MAP_ANONYMOUS;
456 #if defined(__x86_64__)
457 flags |= MAP_32BIT;
458 /* Cannot map more than that */
459 if (code_gen_buffer_size > (800 * 1024 * 1024))
460 code_gen_buffer_size = (800 * 1024 * 1024);
461 #elif defined(__sparc_v9__)
462 // Map the buffer below 2G, so we can use direct calls and branches
463 flags |= MAP_FIXED;
464 start = (void *) 0x60000000UL;
465 if (code_gen_buffer_size > (512 * 1024 * 1024))
466 code_gen_buffer_size = (512 * 1024 * 1024);
467 #elif defined(__arm__)
468 /* Map the buffer below 32M, so we can use direct calls and branches */
469 flags |= MAP_FIXED;
470 start = (void *) 0x01000000UL;
471 if (code_gen_buffer_size > 16 * 1024 * 1024)
472 code_gen_buffer_size = 16 * 1024 * 1024;
473 #endif
474 code_gen_buffer = mmap(start, code_gen_buffer_size,
475 PROT_WRITE | PROT_READ | PROT_EXEC,
476 flags, -1, 0);
477 if (code_gen_buffer == MAP_FAILED) {
478 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
479 exit(1);
482 #elif defined(__FreeBSD__) || defined(__DragonFly__)
484 int flags;
485 void *addr = NULL;
486 flags = MAP_PRIVATE | MAP_ANONYMOUS;
487 #if defined(__x86_64__)
488 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
489 * 0x40000000 is free */
490 flags |= MAP_FIXED;
491 addr = (void *)0x40000000;
492 /* Cannot map more than that */
493 if (code_gen_buffer_size > (800 * 1024 * 1024))
494 code_gen_buffer_size = (800 * 1024 * 1024);
495 #endif
496 code_gen_buffer = mmap(addr, code_gen_buffer_size,
497 PROT_WRITE | PROT_READ | PROT_EXEC,
498 flags, -1, 0);
499 if (code_gen_buffer == MAP_FAILED) {
500 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
501 exit(1);
504 #else
505 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
506 map_exec(code_gen_buffer, code_gen_buffer_size);
507 #endif
508 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
509 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
510 code_gen_buffer_max_size = code_gen_buffer_size -
511 code_gen_max_block_size();
512 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
513 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
516 /* Must be called before using the QEMU cpus. 'tb_size' is the size
517 (in bytes) allocated to the translation buffer. Zero means default
518 size. */
519 void cpu_exec_init_all(unsigned long tb_size)
521 cpu_gen_init();
522 code_gen_alloc(tb_size);
523 code_gen_ptr = code_gen_buffer;
524 page_init();
525 #if !defined(CONFIG_USER_ONLY)
526 io_mem_init();
527 #endif
530 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
532 #define CPU_COMMON_SAVE_VERSION 1
534 static void cpu_common_save(QEMUFile *f, void *opaque)
536 CPUState *env = opaque;
538 cpu_synchronize_state(env, 0);
540 qemu_put_be32s(f, &env->halted);
541 qemu_put_be32s(f, &env->interrupt_request);
544 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
546 CPUState *env = opaque;
548 if (version_id != CPU_COMMON_SAVE_VERSION)
549 return -EINVAL;
551 qemu_get_be32s(f, &env->halted);
552 qemu_get_be32s(f, &env->interrupt_request);
553 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
554 version_id is increased. */
555 env->interrupt_request &= ~0x01;
556 tlb_flush(env, 1);
557 cpu_synchronize_state(env, 1);
559 return 0;
561 #endif
563 CPUState *qemu_get_cpu(int cpu)
565 CPUState *env = first_cpu;
567 while (env) {
568 if (env->cpu_index == cpu)
569 break;
570 env = env->next_cpu;
573 return env;
576 void cpu_exec_init(CPUState *env)
578 CPUState **penv;
579 int cpu_index;
581 #if defined(CONFIG_USER_ONLY)
582 cpu_list_lock();
583 #endif
584 env->next_cpu = NULL;
585 penv = &first_cpu;
586 cpu_index = 0;
587 while (*penv != NULL) {
588 penv = &(*penv)->next_cpu;
589 cpu_index++;
591 env->cpu_index = cpu_index;
592 env->numa_node = 0;
593 TAILQ_INIT(&env->breakpoints);
594 TAILQ_INIT(&env->watchpoints);
595 #ifdef __WIN32
596 env->thread_id = GetCurrentProcessId();
597 #else
598 env->thread_id = getpid();
599 #endif
600 *penv = env;
601 #if defined(CONFIG_USER_ONLY)
602 cpu_list_unlock();
603 #endif
604 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
605 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
606 cpu_common_save, cpu_common_load, env);
607 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
608 cpu_save, cpu_load, env);
609 #endif
612 static inline void invalidate_page_bitmap(PageDesc *p)
614 if (p->code_bitmap) {
615 qemu_free(p->code_bitmap);
616 p->code_bitmap = NULL;
618 p->code_write_count = 0;
621 /* set to NULL all the 'first_tb' fields in all PageDescs */
622 static void page_flush_tb(void)
624 int i, j;
625 PageDesc *p;
627 for(i = 0; i < L1_SIZE; i++) {
628 p = l1_map[i];
629 if (p) {
630 for(j = 0; j < L2_SIZE; j++) {
631 p->first_tb = NULL;
632 invalidate_page_bitmap(p);
633 p++;
639 /* flush all the translation blocks */
640 /* XXX: tb_flush is currently not thread safe */
641 void tb_flush(CPUState *env1)
643 CPUState *env;
644 #if defined(DEBUG_FLUSH)
645 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
646 (unsigned long)(code_gen_ptr - code_gen_buffer),
647 nb_tbs, nb_tbs > 0 ?
648 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
649 #endif
650 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
651 cpu_abort(env1, "Internal error: code buffer overflow\n");
653 nb_tbs = 0;
655 for(env = first_cpu; env != NULL; env = env->next_cpu) {
656 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
659 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
660 page_flush_tb();
662 code_gen_ptr = code_gen_buffer;
663 /* XXX: flush processor icache at this point if cache flush is
664 expensive */
665 tb_flush_count++;
668 #ifdef DEBUG_TB_CHECK
670 static void tb_invalidate_check(target_ulong address)
672 TranslationBlock *tb;
673 int i;
674 address &= TARGET_PAGE_MASK;
675 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
676 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
677 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
678 address >= tb->pc + tb->size)) {
679 printf("ERROR invalidate: address=" TARGET_FMT_lx
680 " PC=%08lx size=%04x\n",
681 address, (long)tb->pc, tb->size);
687 /* verify that all the pages have correct rights for code */
688 static void tb_page_check(void)
690 TranslationBlock *tb;
691 int i, flags1, flags2;
693 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
694 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
695 flags1 = page_get_flags(tb->pc);
696 flags2 = page_get_flags(tb->pc + tb->size - 1);
697 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
698 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
699 (long)tb->pc, tb->size, flags1, flags2);
705 #endif
707 /* invalidate one TB */
708 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
709 int next_offset)
711 TranslationBlock *tb1;
712 for(;;) {
713 tb1 = *ptb;
714 if (tb1 == tb) {
715 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
716 break;
718 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
722 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
724 TranslationBlock *tb1;
725 unsigned int n1;
727 for(;;) {
728 tb1 = *ptb;
729 n1 = (long)tb1 & 3;
730 tb1 = (TranslationBlock *)((long)tb1 & ~3);
731 if (tb1 == tb) {
732 *ptb = tb1->page_next[n1];
733 break;
735 ptb = &tb1->page_next[n1];
739 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
741 TranslationBlock *tb1, **ptb;
742 unsigned int n1;
744 ptb = &tb->jmp_next[n];
745 tb1 = *ptb;
746 if (tb1) {
747 /* find tb(n) in circular list */
748 for(;;) {
749 tb1 = *ptb;
750 n1 = (long)tb1 & 3;
751 tb1 = (TranslationBlock *)((long)tb1 & ~3);
752 if (n1 == n && tb1 == tb)
753 break;
754 if (n1 == 2) {
755 ptb = &tb1->jmp_first;
756 } else {
757 ptb = &tb1->jmp_next[n1];
760 /* now we can suppress tb(n) from the list */
761 *ptb = tb->jmp_next[n];
763 tb->jmp_next[n] = NULL;
767 /* reset the jump entry 'n' of a TB so that it is not chained to
768 another TB */
769 static inline void tb_reset_jump(TranslationBlock *tb, int n)
771 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
774 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
776 CPUState *env;
777 PageDesc *p;
778 unsigned int h, n1;
779 target_phys_addr_t phys_pc;
780 TranslationBlock *tb1, *tb2;
782 /* remove the TB from the hash list */
783 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
784 h = tb_phys_hash_func(phys_pc);
785 tb_remove(&tb_phys_hash[h], tb,
786 offsetof(TranslationBlock, phys_hash_next));
788 /* remove the TB from the page list */
789 if (tb->page_addr[0] != page_addr) {
790 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
791 tb_page_remove(&p->first_tb, tb);
792 invalidate_page_bitmap(p);
794 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
795 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
796 tb_page_remove(&p->first_tb, tb);
797 invalidate_page_bitmap(p);
800 tb_invalidated_flag = 1;
802 /* remove the TB from the hash list */
803 h = tb_jmp_cache_hash_func(tb->pc);
804 for(env = first_cpu; env != NULL; env = env->next_cpu) {
805 if (env->tb_jmp_cache[h] == tb)
806 env->tb_jmp_cache[h] = NULL;
809 /* suppress this TB from the two jump lists */
810 tb_jmp_remove(tb, 0);
811 tb_jmp_remove(tb, 1);
813 /* suppress any remaining jumps to this TB */
814 tb1 = tb->jmp_first;
815 for(;;) {
816 n1 = (long)tb1 & 3;
817 if (n1 == 2)
818 break;
819 tb1 = (TranslationBlock *)((long)tb1 & ~3);
820 tb2 = tb1->jmp_next[n1];
821 tb_reset_jump(tb1, n1);
822 tb1->jmp_next[n1] = NULL;
823 tb1 = tb2;
825 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
827 tb_phys_invalidate_count++;
830 static inline void set_bits(uint8_t *tab, int start, int len)
832 int end, mask, end1;
834 end = start + len;
835 tab += start >> 3;
836 mask = 0xff << (start & 7);
837 if ((start & ~7) == (end & ~7)) {
838 if (start < end) {
839 mask &= ~(0xff << (end & 7));
840 *tab |= mask;
842 } else {
843 *tab++ |= mask;
844 start = (start + 8) & ~7;
845 end1 = end & ~7;
846 while (start < end1) {
847 *tab++ = 0xff;
848 start += 8;
850 if (start < end) {
851 mask = ~(0xff << (end & 7));
852 *tab |= mask;
857 static void build_page_bitmap(PageDesc *p)
859 int n, tb_start, tb_end;
860 TranslationBlock *tb;
862 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
864 tb = p->first_tb;
865 while (tb != NULL) {
866 n = (long)tb & 3;
867 tb = (TranslationBlock *)((long)tb & ~3);
868 /* NOTE: this is subtle as a TB may span two physical pages */
869 if (n == 0) {
870 /* NOTE: tb_end may be after the end of the page, but
871 it is not a problem */
872 tb_start = tb->pc & ~TARGET_PAGE_MASK;
873 tb_end = tb_start + tb->size;
874 if (tb_end > TARGET_PAGE_SIZE)
875 tb_end = TARGET_PAGE_SIZE;
876 } else {
877 tb_start = 0;
878 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
880 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
881 tb = tb->page_next[n];
885 TranslationBlock *tb_gen_code(CPUState *env,
886 target_ulong pc, target_ulong cs_base,
887 int flags, int cflags)
889 TranslationBlock *tb;
890 uint8_t *tc_ptr;
891 target_ulong phys_pc, phys_page2, virt_page2;
892 int code_gen_size;
894 phys_pc = get_phys_addr_code(env, pc);
895 tb = tb_alloc(pc);
896 if (!tb) {
897 /* flush must be done */
898 tb_flush(env);
899 /* cannot fail at this point */
900 tb = tb_alloc(pc);
901 /* Don't forget to invalidate previous TB info. */
902 tb_invalidated_flag = 1;
904 tc_ptr = code_gen_ptr;
905 tb->tc_ptr = tc_ptr;
906 tb->cs_base = cs_base;
907 tb->flags = flags;
908 tb->cflags = cflags;
909 cpu_gen_code(env, tb, &code_gen_size);
910 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
912 /* check next page if needed */
913 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
914 phys_page2 = -1;
915 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
916 phys_page2 = get_phys_addr_code(env, virt_page2);
918 tb_link_phys(tb, phys_pc, phys_page2);
919 return tb;
922 /* invalidate all TBs which intersect with the target physical page
923 starting in range [start;end[. NOTE: start and end must refer to
924 the same physical page. 'is_cpu_write_access' should be true if called
925 from a real cpu write access: the virtual CPU will exit the current
926 TB if code is modified inside this TB. */
927 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
928 int is_cpu_write_access)
930 TranslationBlock *tb, *tb_next, *saved_tb;
931 CPUState *env = cpu_single_env;
932 target_ulong tb_start, tb_end;
933 PageDesc *p;
934 int n;
935 #ifdef TARGET_HAS_PRECISE_SMC
936 int current_tb_not_found = is_cpu_write_access;
937 TranslationBlock *current_tb = NULL;
938 int current_tb_modified = 0;
939 target_ulong current_pc = 0;
940 target_ulong current_cs_base = 0;
941 int current_flags = 0;
942 #endif /* TARGET_HAS_PRECISE_SMC */
944 p = page_find(start >> TARGET_PAGE_BITS);
945 if (!p)
946 return;
947 if (!p->code_bitmap &&
948 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
949 is_cpu_write_access) {
950 /* build code bitmap */
951 build_page_bitmap(p);
954 /* we remove all the TBs in the range [start, end[ */
955 /* XXX: see if in some cases it could be faster to invalidate all the code */
956 tb = p->first_tb;
957 while (tb != NULL) {
958 n = (long)tb & 3;
959 tb = (TranslationBlock *)((long)tb & ~3);
960 tb_next = tb->page_next[n];
961 /* NOTE: this is subtle as a TB may span two physical pages */
962 if (n == 0) {
963 /* NOTE: tb_end may be after the end of the page, but
964 it is not a problem */
965 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
966 tb_end = tb_start + tb->size;
967 } else {
968 tb_start = tb->page_addr[1];
969 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
971 if (!(tb_end <= start || tb_start >= end)) {
972 #ifdef TARGET_HAS_PRECISE_SMC
973 if (current_tb_not_found) {
974 current_tb_not_found = 0;
975 current_tb = NULL;
976 if (env->mem_io_pc) {
977 /* now we have a real cpu fault */
978 current_tb = tb_find_pc(env->mem_io_pc);
981 if (current_tb == tb &&
982 (current_tb->cflags & CF_COUNT_MASK) != 1) {
983 /* If we are modifying the current TB, we must stop
984 its execution. We could be more precise by checking
985 that the modification is after the current PC, but it
986 would require a specialized function to partially
987 restore the CPU state */
989 current_tb_modified = 1;
990 cpu_restore_state(current_tb, env,
991 env->mem_io_pc, NULL);
992 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
993 &current_flags);
995 #endif /* TARGET_HAS_PRECISE_SMC */
996 /* we need to do that to handle the case where a signal
997 occurs while doing tb_phys_invalidate() */
998 saved_tb = NULL;
999 if (env) {
1000 saved_tb = env->current_tb;
1001 env->current_tb = NULL;
1003 tb_phys_invalidate(tb, -1);
1004 if (env) {
1005 env->current_tb = saved_tb;
1006 if (env->interrupt_request && env->current_tb)
1007 cpu_interrupt(env, env->interrupt_request);
1010 tb = tb_next;
1012 #if !defined(CONFIG_USER_ONLY)
1013 /* if no code remaining, no need to continue to use slow writes */
1014 if (!p->first_tb) {
1015 invalidate_page_bitmap(p);
1016 if (is_cpu_write_access) {
1017 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1020 #endif
1021 #ifdef TARGET_HAS_PRECISE_SMC
1022 if (current_tb_modified) {
1023 /* we generate a block containing just the instruction
1024 modifying the memory. It will ensure that it cannot modify
1025 itself */
1026 env->current_tb = NULL;
1027 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1028 cpu_resume_from_signal(env, NULL);
1030 #endif
1033 /* len must be <= 8 and start must be a multiple of len */
1034 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1036 PageDesc *p;
1037 int offset, b;
1038 #if 0
1039 if (1) {
1040 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1041 cpu_single_env->mem_io_vaddr, len,
1042 cpu_single_env->eip,
1043 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1045 #endif
1046 p = page_find(start >> TARGET_PAGE_BITS);
1047 if (!p)
1048 return;
1049 if (p->code_bitmap) {
1050 offset = start & ~TARGET_PAGE_MASK;
1051 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1052 if (b & ((1 << len) - 1))
1053 goto do_invalidate;
1054 } else {
1055 do_invalidate:
1056 tb_invalidate_phys_page_range(start, start + len, 1);
1060 #if !defined(CONFIG_SOFTMMU)
1061 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1062 unsigned long pc, void *puc)
1064 TranslationBlock *tb;
1065 PageDesc *p;
1066 int n;
1067 #ifdef TARGET_HAS_PRECISE_SMC
1068 TranslationBlock *current_tb = NULL;
1069 CPUState *env = cpu_single_env;
1070 int current_tb_modified = 0;
1071 target_ulong current_pc = 0;
1072 target_ulong current_cs_base = 0;
1073 int current_flags = 0;
1074 #endif
1076 addr &= TARGET_PAGE_MASK;
1077 p = page_find(addr >> TARGET_PAGE_BITS);
1078 if (!p)
1079 return;
1080 tb = p->first_tb;
1081 #ifdef TARGET_HAS_PRECISE_SMC
1082 if (tb && pc != 0) {
1083 current_tb = tb_find_pc(pc);
1085 #endif
1086 while (tb != NULL) {
1087 n = (long)tb & 3;
1088 tb = (TranslationBlock *)((long)tb & ~3);
1089 #ifdef TARGET_HAS_PRECISE_SMC
1090 if (current_tb == tb &&
1091 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1092 /* If we are modifying the current TB, we must stop
1093 its execution. We could be more precise by checking
1094 that the modification is after the current PC, but it
1095 would require a specialized function to partially
1096 restore the CPU state */
1098 current_tb_modified = 1;
1099 cpu_restore_state(current_tb, env, pc, puc);
1100 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1101 &current_flags);
1103 #endif /* TARGET_HAS_PRECISE_SMC */
1104 tb_phys_invalidate(tb, addr);
1105 tb = tb->page_next[n];
1107 p->first_tb = NULL;
1108 #ifdef TARGET_HAS_PRECISE_SMC
1109 if (current_tb_modified) {
1110 /* we generate a block containing just the instruction
1111 modifying the memory. It will ensure that it cannot modify
1112 itself */
1113 env->current_tb = NULL;
1114 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1115 cpu_resume_from_signal(env, puc);
1117 #endif
1119 #endif
1121 /* add the tb in the target page and protect it if necessary */
1122 static inline void tb_alloc_page(TranslationBlock *tb,
1123 unsigned int n, target_ulong page_addr)
1125 PageDesc *p;
1126 TranslationBlock *last_first_tb;
1128 tb->page_addr[n] = page_addr;
1129 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1130 tb->page_next[n] = p->first_tb;
1131 last_first_tb = p->first_tb;
1132 p->first_tb = (TranslationBlock *)((long)tb | n);
1133 invalidate_page_bitmap(p);
1135 #if defined(TARGET_HAS_SMC) || 1
1137 #if defined(CONFIG_USER_ONLY)
1138 if (p->flags & PAGE_WRITE) {
1139 target_ulong addr;
1140 PageDesc *p2;
1141 int prot;
1143 /* force the host page as non writable (writes will have a
1144 page fault + mprotect overhead) */
1145 page_addr &= qemu_host_page_mask;
1146 prot = 0;
1147 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1148 addr += TARGET_PAGE_SIZE) {
1150 p2 = page_find (addr >> TARGET_PAGE_BITS);
1151 if (!p2)
1152 continue;
1153 prot |= p2->flags;
1154 p2->flags &= ~PAGE_WRITE;
1155 page_get_flags(addr);
1157 mprotect(g2h(page_addr), qemu_host_page_size,
1158 (prot & PAGE_BITS) & ~PAGE_WRITE);
1159 #ifdef DEBUG_TB_INVALIDATE
1160 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1161 page_addr);
1162 #endif
1164 #else
1165 /* if some code is already present, then the pages are already
1166 protected. So we handle the case where only the first TB is
1167 allocated in a physical page */
1168 if (!last_first_tb) {
1169 tlb_protect_code(page_addr);
1171 #endif
1173 #endif /* TARGET_HAS_SMC */
1176 /* Allocate a new translation block. Flush the translation buffer if
1177 too many translation blocks or too much generated code. */
1178 TranslationBlock *tb_alloc(target_ulong pc)
1180 TranslationBlock *tb;
1182 if (nb_tbs >= code_gen_max_blocks ||
1183 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1184 return NULL;
1185 tb = &tbs[nb_tbs++];
1186 tb->pc = pc;
1187 tb->cflags = 0;
1188 return tb;
1191 void tb_free(TranslationBlock *tb)
1193 /* In practice this is mostly used for single use temporary TB
1194 Ignore the hard cases and just back up if this TB happens to
1195 be the last one generated. */
1196 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1197 code_gen_ptr = tb->tc_ptr;
1198 nb_tbs--;
1202 /* add a new TB and link it to the physical page tables. phys_page2 is
1203 (-1) to indicate that only one page contains the TB. */
1204 void tb_link_phys(TranslationBlock *tb,
1205 target_ulong phys_pc, target_ulong phys_page2)
1207 unsigned int h;
1208 TranslationBlock **ptb;
1210 /* Grab the mmap lock to stop another thread invalidating this TB
1211 before we are done. */
1212 mmap_lock();
1213 /* add in the physical hash table */
1214 h = tb_phys_hash_func(phys_pc);
1215 ptb = &tb_phys_hash[h];
1216 tb->phys_hash_next = *ptb;
1217 *ptb = tb;
1219 /* add in the page list */
1220 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1221 if (phys_page2 != -1)
1222 tb_alloc_page(tb, 1, phys_page2);
1223 else
1224 tb->page_addr[1] = -1;
1226 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1227 tb->jmp_next[0] = NULL;
1228 tb->jmp_next[1] = NULL;
1230 /* init original jump addresses */
1231 if (tb->tb_next_offset[0] != 0xffff)
1232 tb_reset_jump(tb, 0);
1233 if (tb->tb_next_offset[1] != 0xffff)
1234 tb_reset_jump(tb, 1);
1236 #ifdef DEBUG_TB_CHECK
1237 tb_page_check();
1238 #endif
1239 mmap_unlock();
1242 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1243 tb[1].tc_ptr. Return NULL if not found */
1244 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1246 int m_min, m_max, m;
1247 unsigned long v;
1248 TranslationBlock *tb;
1250 if (nb_tbs <= 0)
1251 return NULL;
1252 if (tc_ptr < (unsigned long)code_gen_buffer ||
1253 tc_ptr >= (unsigned long)code_gen_ptr)
1254 return NULL;
1255 /* binary search (cf Knuth) */
1256 m_min = 0;
1257 m_max = nb_tbs - 1;
1258 while (m_min <= m_max) {
1259 m = (m_min + m_max) >> 1;
1260 tb = &tbs[m];
1261 v = (unsigned long)tb->tc_ptr;
1262 if (v == tc_ptr)
1263 return tb;
1264 else if (tc_ptr < v) {
1265 m_max = m - 1;
1266 } else {
1267 m_min = m + 1;
1270 return &tbs[m_max];
1273 static void tb_reset_jump_recursive(TranslationBlock *tb);
1275 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1277 TranslationBlock *tb1, *tb_next, **ptb;
1278 unsigned int n1;
1280 tb1 = tb->jmp_next[n];
1281 if (tb1 != NULL) {
1282 /* find head of list */
1283 for(;;) {
1284 n1 = (long)tb1 & 3;
1285 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1286 if (n1 == 2)
1287 break;
1288 tb1 = tb1->jmp_next[n1];
1290 /* we are now sure now that tb jumps to tb1 */
1291 tb_next = tb1;
1293 /* remove tb from the jmp_first list */
1294 ptb = &tb_next->jmp_first;
1295 for(;;) {
1296 tb1 = *ptb;
1297 n1 = (long)tb1 & 3;
1298 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1299 if (n1 == n && tb1 == tb)
1300 break;
1301 ptb = &tb1->jmp_next[n1];
1303 *ptb = tb->jmp_next[n];
1304 tb->jmp_next[n] = NULL;
1306 /* suppress the jump to next tb in generated code */
1307 tb_reset_jump(tb, n);
1309 /* suppress jumps in the tb on which we could have jumped */
1310 tb_reset_jump_recursive(tb_next);
1314 static void tb_reset_jump_recursive(TranslationBlock *tb)
1316 tb_reset_jump_recursive2(tb, 0);
1317 tb_reset_jump_recursive2(tb, 1);
1320 #if defined(TARGET_HAS_ICE)
1321 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1323 target_phys_addr_t addr;
1324 target_ulong pd;
1325 ram_addr_t ram_addr;
1326 PhysPageDesc *p;
1328 addr = cpu_get_phys_page_debug(env, pc);
1329 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1330 if (!p) {
1331 pd = IO_MEM_UNASSIGNED;
1332 } else {
1333 pd = p->phys_offset;
1335 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1336 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1338 #endif
1340 /* Add a watchpoint. */
1341 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1342 int flags, CPUWatchpoint **watchpoint)
1344 target_ulong len_mask = ~(len - 1);
1345 CPUWatchpoint *wp;
1347 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1348 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1349 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1350 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1351 return -EINVAL;
1353 wp = qemu_malloc(sizeof(*wp));
1355 wp->vaddr = addr;
1356 wp->len_mask = len_mask;
1357 wp->flags = flags;
1359 /* keep all GDB-injected watchpoints in front */
1360 if (flags & BP_GDB)
1361 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1362 else
1363 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1365 tlb_flush_page(env, addr);
1367 if (watchpoint)
1368 *watchpoint = wp;
1369 return 0;
1372 /* Remove a specific watchpoint. */
1373 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1374 int flags)
1376 target_ulong len_mask = ~(len - 1);
1377 CPUWatchpoint *wp;
1379 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1380 if (addr == wp->vaddr && len_mask == wp->len_mask
1381 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1382 cpu_watchpoint_remove_by_ref(env, wp);
1383 return 0;
1386 return -ENOENT;
1389 /* Remove a specific watchpoint by reference. */
1390 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1392 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1394 tlb_flush_page(env, watchpoint->vaddr);
1396 qemu_free(watchpoint);
1399 /* Remove all matching watchpoints. */
1400 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1402 CPUWatchpoint *wp, *next;
1404 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1405 if (wp->flags & mask)
1406 cpu_watchpoint_remove_by_ref(env, wp);
1410 /* Add a breakpoint. */
1411 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1412 CPUBreakpoint **breakpoint)
1414 #if defined(TARGET_HAS_ICE)
1415 CPUBreakpoint *bp;
1417 bp = qemu_malloc(sizeof(*bp));
1419 bp->pc = pc;
1420 bp->flags = flags;
1422 /* keep all GDB-injected breakpoints in front */
1423 if (flags & BP_GDB)
1424 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1425 else
1426 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1428 breakpoint_invalidate(env, pc);
1430 if (breakpoint)
1431 *breakpoint = bp;
1432 return 0;
1433 #else
1434 return -ENOSYS;
1435 #endif
1438 /* Remove a specific breakpoint. */
1439 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1441 #if defined(TARGET_HAS_ICE)
1442 CPUBreakpoint *bp;
1444 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1445 if (bp->pc == pc && bp->flags == flags) {
1446 cpu_breakpoint_remove_by_ref(env, bp);
1447 return 0;
1450 return -ENOENT;
1451 #else
1452 return -ENOSYS;
1453 #endif
1456 /* Remove a specific breakpoint by reference. */
1457 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1459 #if defined(TARGET_HAS_ICE)
1460 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1462 breakpoint_invalidate(env, breakpoint->pc);
1464 qemu_free(breakpoint);
1465 #endif
1468 /* Remove all matching breakpoints. */
1469 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1471 #if defined(TARGET_HAS_ICE)
1472 CPUBreakpoint *bp, *next;
1474 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1475 if (bp->flags & mask)
1476 cpu_breakpoint_remove_by_ref(env, bp);
1478 #endif
1481 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1482 CPU loop after each instruction */
1483 void cpu_single_step(CPUState *env, int enabled)
1485 #if defined(TARGET_HAS_ICE)
1486 if (env->singlestep_enabled != enabled) {
1487 env->singlestep_enabled = enabled;
1488 if (kvm_enabled())
1489 kvm_update_guest_debug(env, 0);
1490 else {
1491 /* must flush all the translated code to avoid inconsistencies */
1492 /* XXX: only flush what is necessary */
1493 tb_flush(env);
1496 #endif
1499 /* enable or disable low levels log */
1500 void cpu_set_log(int log_flags)
1502 loglevel = log_flags;
1503 if (loglevel && !logfile) {
1504 logfile = fopen(logfilename, log_append ? "a" : "w");
1505 if (!logfile) {
1506 perror(logfilename);
1507 _exit(1);
1509 #if !defined(CONFIG_SOFTMMU)
1510 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1512 static char logfile_buf[4096];
1513 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1515 #else
1516 setvbuf(logfile, NULL, _IOLBF, 0);
1517 #endif
1518 log_append = 1;
1520 if (!loglevel && logfile) {
1521 fclose(logfile);
1522 logfile = NULL;
1526 void cpu_set_log_filename(const char *filename)
1528 logfilename = strdup(filename);
1529 if (logfile) {
1530 fclose(logfile);
1531 logfile = NULL;
1533 cpu_set_log(loglevel);
1536 static void cpu_unlink_tb(CPUState *env)
1538 #if defined(USE_NPTL)
1539 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1540 problem and hope the cpu will stop of its own accord. For userspace
1541 emulation this often isn't actually as bad as it sounds. Often
1542 signals are used primarily to interrupt blocking syscalls. */
1543 #else
1544 TranslationBlock *tb;
1545 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1547 tb = env->current_tb;
1548 /* if the cpu is currently executing code, we must unlink it and
1549 all the potentially executing TB */
1550 if (tb && !testandset(&interrupt_lock)) {
1551 env->current_tb = NULL;
1552 tb_reset_jump_recursive(tb);
1553 resetlock(&interrupt_lock);
1555 #endif
1558 /* mask must never be zero, except for A20 change call */
1559 void cpu_interrupt(CPUState *env, int mask)
1561 int old_mask;
1563 old_mask = env->interrupt_request;
1564 env->interrupt_request |= mask;
1565 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1566 kvm_update_interrupt_request(env);
1568 #ifndef CONFIG_USER_ONLY
1570 * If called from iothread context, wake the target cpu in
1571 * case its halted.
1573 if (!qemu_cpu_self(env)) {
1574 qemu_cpu_kick(env);
1575 return;
1577 #endif
1579 if (use_icount) {
1580 env->icount_decr.u16.high = 0xffff;
1581 #ifndef CONFIG_USER_ONLY
1582 if (!can_do_io(env)
1583 && (mask & ~old_mask) != 0) {
1584 cpu_abort(env, "Raised interrupt while not in I/O function");
1586 #endif
1587 } else {
1588 cpu_unlink_tb(env);
1592 void cpu_reset_interrupt(CPUState *env, int mask)
1594 env->interrupt_request &= ~mask;
1597 void cpu_exit(CPUState *env)
1599 env->exit_request = 1;
1600 cpu_unlink_tb(env);
1603 const CPULogItem cpu_log_items[] = {
1604 { CPU_LOG_TB_OUT_ASM, "out_asm",
1605 "show generated host assembly code for each compiled TB" },
1606 { CPU_LOG_TB_IN_ASM, "in_asm",
1607 "show target assembly code for each compiled TB" },
1608 { CPU_LOG_TB_OP, "op",
1609 "show micro ops for each compiled TB" },
1610 { CPU_LOG_TB_OP_OPT, "op_opt",
1611 "show micro ops "
1612 #ifdef TARGET_I386
1613 "before eflags optimization and "
1614 #endif
1615 "after liveness analysis" },
1616 { CPU_LOG_INT, "int",
1617 "show interrupts/exceptions in short format" },
1618 { CPU_LOG_EXEC, "exec",
1619 "show trace before each executed TB (lots of logs)" },
1620 { CPU_LOG_TB_CPU, "cpu",
1621 "show CPU state before block translation" },
1622 #ifdef TARGET_I386
1623 { CPU_LOG_PCALL, "pcall",
1624 "show protected mode far calls/returns/exceptions" },
1625 { CPU_LOG_RESET, "cpu_reset",
1626 "show CPU state before CPU resets" },
1627 #endif
1628 #ifdef DEBUG_IOPORT
1629 { CPU_LOG_IOPORT, "ioport",
1630 "show all i/o ports accesses" },
1631 #endif
1632 { 0, NULL, NULL },
1635 static int cmp1(const char *s1, int n, const char *s2)
1637 if (strlen(s2) != n)
1638 return 0;
1639 return memcmp(s1, s2, n) == 0;
1642 /* takes a comma separated list of log masks. Return 0 if error. */
1643 int cpu_str_to_log_mask(const char *str)
1645 const CPULogItem *item;
1646 int mask;
1647 const char *p, *p1;
1649 p = str;
1650 mask = 0;
1651 for(;;) {
1652 p1 = strchr(p, ',');
1653 if (!p1)
1654 p1 = p + strlen(p);
1655 if(cmp1(p,p1-p,"all")) {
1656 for(item = cpu_log_items; item->mask != 0; item++) {
1657 mask |= item->mask;
1659 } else {
1660 for(item = cpu_log_items; item->mask != 0; item++) {
1661 if (cmp1(p, p1 - p, item->name))
1662 goto found;
1664 return 0;
1666 found:
1667 mask |= item->mask;
1668 if (*p1 != ',')
1669 break;
1670 p = p1 + 1;
1672 return mask;
1675 void cpu_abort(CPUState *env, const char *fmt, ...)
1677 va_list ap;
1678 va_list ap2;
1680 va_start(ap, fmt);
1681 va_copy(ap2, ap);
1682 fprintf(stderr, "qemu: fatal: ");
1683 vfprintf(stderr, fmt, ap);
1684 fprintf(stderr, "\n");
1685 #ifdef TARGET_I386
1686 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1687 #else
1688 cpu_dump_state(env, stderr, fprintf, 0);
1689 #endif
1690 if (qemu_log_enabled()) {
1691 qemu_log("qemu: fatal: ");
1692 qemu_log_vprintf(fmt, ap2);
1693 qemu_log("\n");
1694 #ifdef TARGET_I386
1695 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1696 #else
1697 log_cpu_state(env, 0);
1698 #endif
1699 qemu_log_flush();
1700 qemu_log_close();
1702 va_end(ap2);
1703 va_end(ap);
1704 abort();
1707 CPUState *cpu_copy(CPUState *env)
1709 CPUState *new_env = cpu_init(env->cpu_model_str);
1710 CPUState *next_cpu = new_env->next_cpu;
1711 int cpu_index = new_env->cpu_index;
1712 #if defined(TARGET_HAS_ICE)
1713 CPUBreakpoint *bp;
1714 CPUWatchpoint *wp;
1715 #endif
1717 memcpy(new_env, env, sizeof(CPUState));
1719 /* Preserve chaining and index. */
1720 new_env->next_cpu = next_cpu;
1721 new_env->cpu_index = cpu_index;
1723 /* Clone all break/watchpoints.
1724 Note: Once we support ptrace with hw-debug register access, make sure
1725 BP_CPU break/watchpoints are handled correctly on clone. */
1726 TAILQ_INIT(&env->breakpoints);
1727 TAILQ_INIT(&env->watchpoints);
1728 #if defined(TARGET_HAS_ICE)
1729 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1730 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1732 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1733 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1734 wp->flags, NULL);
1736 #endif
1738 return new_env;
1741 #if !defined(CONFIG_USER_ONLY)
1743 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1745 unsigned int i;
1747 /* Discard jump cache entries for any tb which might potentially
1748 overlap the flushed page. */
1749 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1750 memset (&env->tb_jmp_cache[i], 0,
1751 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1753 i = tb_jmp_cache_hash_page(addr);
1754 memset (&env->tb_jmp_cache[i], 0,
1755 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1758 static CPUTLBEntry s_cputlb_empty_entry = {
1759 .addr_read = -1,
1760 .addr_write = -1,
1761 .addr_code = -1,
1762 .addend = -1,
1765 /* NOTE: if flush_global is true, also flush global entries (not
1766 implemented yet) */
1767 void tlb_flush(CPUState *env, int flush_global)
1769 int i;
1771 #if defined(DEBUG_TLB)
1772 printf("tlb_flush:\n");
1773 #endif
1774 /* must reset current TB so that interrupts cannot modify the
1775 links while we are modifying them */
1776 env->current_tb = NULL;
1778 for(i = 0; i < CPU_TLB_SIZE; i++) {
1779 int mmu_idx;
1780 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1781 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1785 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1787 #ifdef CONFIG_KQEMU
1788 if (env->kqemu_enabled) {
1789 kqemu_flush(env, flush_global);
1791 #endif
1792 tlb_flush_count++;
1795 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1797 if (addr == (tlb_entry->addr_read &
1798 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1799 addr == (tlb_entry->addr_write &
1800 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1801 addr == (tlb_entry->addr_code &
1802 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1803 *tlb_entry = s_cputlb_empty_entry;
1807 void tlb_flush_page(CPUState *env, target_ulong addr)
1809 int i;
1810 int mmu_idx;
1812 #if defined(DEBUG_TLB)
1813 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1814 #endif
1815 /* must reset current TB so that interrupts cannot modify the
1816 links while we are modifying them */
1817 env->current_tb = NULL;
1819 addr &= TARGET_PAGE_MASK;
1820 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1821 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1822 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1824 tlb_flush_jmp_cache(env, addr);
1826 #ifdef CONFIG_KQEMU
1827 if (env->kqemu_enabled) {
1828 kqemu_flush_page(env, addr);
1830 #endif
1833 /* update the TLBs so that writes to code in the virtual page 'addr'
1834 can be detected */
1835 static void tlb_protect_code(ram_addr_t ram_addr)
1837 cpu_physical_memory_reset_dirty(ram_addr,
1838 ram_addr + TARGET_PAGE_SIZE,
1839 CODE_DIRTY_FLAG);
1842 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1843 tested for self modifying code */
1844 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1845 target_ulong vaddr)
1847 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1850 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1851 unsigned long start, unsigned long length)
1853 unsigned long addr;
1854 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1855 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1856 if ((addr - start) < length) {
1857 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1862 /* Note: start and end must be within the same ram block. */
1863 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1864 int dirty_flags)
1866 CPUState *env;
1867 unsigned long length, start1;
1868 int i, mask, len;
1869 uint8_t *p;
1871 start &= TARGET_PAGE_MASK;
1872 end = TARGET_PAGE_ALIGN(end);
1874 length = end - start;
1875 if (length == 0)
1876 return;
1877 len = length >> TARGET_PAGE_BITS;
1878 #ifdef CONFIG_KQEMU
1879 /* XXX: should not depend on cpu context */
1880 env = first_cpu;
1881 if (env->kqemu_enabled) {
1882 ram_addr_t addr;
1883 addr = start;
1884 for(i = 0; i < len; i++) {
1885 kqemu_set_notdirty(env, addr);
1886 addr += TARGET_PAGE_SIZE;
1889 #endif
1890 mask = ~dirty_flags;
1891 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1892 for(i = 0; i < len; i++)
1893 p[i] &= mask;
1895 /* we modify the TLB cache so that the dirty bit will be set again
1896 when accessing the range */
1897 start1 = (unsigned long)qemu_get_ram_ptr(start);
1898 /* Chek that we don't span multiple blocks - this breaks the
1899 address comparisons below. */
1900 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1901 != (end - 1) - start) {
1902 abort();
1905 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1906 int mmu_idx;
1907 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1908 for(i = 0; i < CPU_TLB_SIZE; i++)
1909 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1910 start1, length);
1915 int cpu_physical_memory_set_dirty_tracking(int enable)
1917 if (kvm_enabled()) {
1918 return kvm_set_migration_log(enable);
1920 return 0;
1923 int cpu_physical_memory_get_dirty_tracking(void)
1925 return in_migration;
1928 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1929 target_phys_addr_t end_addr)
1931 int ret = 0;
1933 if (kvm_enabled())
1934 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1935 return ret;
1938 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1940 ram_addr_t ram_addr;
1941 void *p;
1943 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1944 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1945 + tlb_entry->addend);
1946 ram_addr = qemu_ram_addr_from_host(p);
1947 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1948 tlb_entry->addr_write |= TLB_NOTDIRTY;
1953 /* update the TLB according to the current state of the dirty bits */
1954 void cpu_tlb_update_dirty(CPUState *env)
1956 int i;
1957 int mmu_idx;
1958 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1959 for(i = 0; i < CPU_TLB_SIZE; i++)
1960 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1964 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1966 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1967 tlb_entry->addr_write = vaddr;
1970 /* update the TLB corresponding to virtual page vaddr
1971 so that it is no longer dirty */
1972 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1974 int i;
1975 int mmu_idx;
1977 vaddr &= TARGET_PAGE_MASK;
1978 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1979 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1980 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1983 /* add a new TLB entry. At most one entry for a given virtual address
1984 is permitted. Return 0 if OK or 2 if the page could not be mapped
1985 (can only happen in non SOFTMMU mode for I/O pages or pages
1986 conflicting with the host address space). */
1987 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1988 target_phys_addr_t paddr, int prot,
1989 int mmu_idx, int is_softmmu)
1991 PhysPageDesc *p;
1992 unsigned long pd;
1993 unsigned int index;
1994 target_ulong address;
1995 target_ulong code_address;
1996 target_phys_addr_t addend;
1997 int ret;
1998 CPUTLBEntry *te;
1999 CPUWatchpoint *wp;
2000 target_phys_addr_t iotlb;
2002 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2003 if (!p) {
2004 pd = IO_MEM_UNASSIGNED;
2005 } else {
2006 pd = p->phys_offset;
2008 #if defined(DEBUG_TLB)
2009 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2010 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2011 #endif
2013 ret = 0;
2014 address = vaddr;
2015 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2016 /* IO memory case (romd handled later) */
2017 address |= TLB_MMIO;
2019 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2020 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2021 /* Normal RAM. */
2022 iotlb = pd & TARGET_PAGE_MASK;
2023 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2024 iotlb |= IO_MEM_NOTDIRTY;
2025 else
2026 iotlb |= IO_MEM_ROM;
2027 } else {
2028 /* IO handlers are currently passed a physical address.
2029 It would be nice to pass an offset from the base address
2030 of that region. This would avoid having to special case RAM,
2031 and avoid full address decoding in every device.
2032 We can't use the high bits of pd for this because
2033 IO_MEM_ROMD uses these as a ram address. */
2034 iotlb = (pd & ~TARGET_PAGE_MASK);
2035 if (p) {
2036 iotlb += p->region_offset;
2037 } else {
2038 iotlb += paddr;
2042 code_address = address;
2043 /* Make accesses to pages with watchpoints go via the
2044 watchpoint trap routines. */
2045 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2046 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2047 iotlb = io_mem_watch + paddr;
2048 /* TODO: The memory case can be optimized by not trapping
2049 reads of pages with a write breakpoint. */
2050 address |= TLB_MMIO;
2054 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2055 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2056 te = &env->tlb_table[mmu_idx][index];
2057 te->addend = addend - vaddr;
2058 if (prot & PAGE_READ) {
2059 te->addr_read = address;
2060 } else {
2061 te->addr_read = -1;
2064 if (prot & PAGE_EXEC) {
2065 te->addr_code = code_address;
2066 } else {
2067 te->addr_code = -1;
2069 if (prot & PAGE_WRITE) {
2070 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2071 (pd & IO_MEM_ROMD)) {
2072 /* Write access calls the I/O callback. */
2073 te->addr_write = address | TLB_MMIO;
2074 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2075 !cpu_physical_memory_is_dirty(pd)) {
2076 te->addr_write = address | TLB_NOTDIRTY;
2077 } else {
2078 te->addr_write = address;
2080 } else {
2081 te->addr_write = -1;
2083 return ret;
2086 #else
2088 void tlb_flush(CPUState *env, int flush_global)
2092 void tlb_flush_page(CPUState *env, target_ulong addr)
2096 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2097 target_phys_addr_t paddr, int prot,
2098 int mmu_idx, int is_softmmu)
2100 return 0;
2104 * Walks guest process memory "regions" one by one
2105 * and calls callback function 'fn' for each region.
2107 int walk_memory_regions(void *priv,
2108 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2110 unsigned long start, end;
2111 PageDesc *p = NULL;
2112 int i, j, prot, prot1;
2113 int rc = 0;
2115 start = end = -1;
2116 prot = 0;
2118 for (i = 0; i <= L1_SIZE; i++) {
2119 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2120 for (j = 0; j < L2_SIZE; j++) {
2121 prot1 = (p == NULL) ? 0 : p[j].flags;
2123 * "region" is one continuous chunk of memory
2124 * that has same protection flags set.
2126 if (prot1 != prot) {
2127 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2128 if (start != -1) {
2129 rc = (*fn)(priv, start, end, prot);
2130 /* callback can stop iteration by returning != 0 */
2131 if (rc != 0)
2132 return (rc);
2134 if (prot1 != 0)
2135 start = end;
2136 else
2137 start = -1;
2138 prot = prot1;
2140 if (p == NULL)
2141 break;
2144 return (rc);
2147 static int dump_region(void *priv, unsigned long start,
2148 unsigned long end, unsigned long prot)
2150 FILE *f = (FILE *)priv;
2152 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2153 start, end, end - start,
2154 ((prot & PAGE_READ) ? 'r' : '-'),
2155 ((prot & PAGE_WRITE) ? 'w' : '-'),
2156 ((prot & PAGE_EXEC) ? 'x' : '-'));
2158 return (0);
2161 /* dump memory mappings */
2162 void page_dump(FILE *f)
2164 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2165 "start", "end", "size", "prot");
2166 walk_memory_regions(f, dump_region);
2169 int page_get_flags(target_ulong address)
2171 PageDesc *p;
2173 p = page_find(address >> TARGET_PAGE_BITS);
2174 if (!p)
2175 return 0;
2176 return p->flags;
2179 /* modify the flags of a page and invalidate the code if
2180 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2181 depending on PAGE_WRITE */
2182 void page_set_flags(target_ulong start, target_ulong end, int flags)
2184 PageDesc *p;
2185 target_ulong addr;
2187 /* mmap_lock should already be held. */
2188 start = start & TARGET_PAGE_MASK;
2189 end = TARGET_PAGE_ALIGN(end);
2190 if (flags & PAGE_WRITE)
2191 flags |= PAGE_WRITE_ORG;
2192 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2193 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2194 /* We may be called for host regions that are outside guest
2195 address space. */
2196 if (!p)
2197 return;
2198 /* if the write protection is set, then we invalidate the code
2199 inside */
2200 if (!(p->flags & PAGE_WRITE) &&
2201 (flags & PAGE_WRITE) &&
2202 p->first_tb) {
2203 tb_invalidate_phys_page(addr, 0, NULL);
2205 p->flags = flags;
2209 int page_check_range(target_ulong start, target_ulong len, int flags)
2211 PageDesc *p;
2212 target_ulong end;
2213 target_ulong addr;
2215 if (start + len < start)
2216 /* we've wrapped around */
2217 return -1;
2219 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2220 start = start & TARGET_PAGE_MASK;
2222 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2223 p = page_find(addr >> TARGET_PAGE_BITS);
2224 if( !p )
2225 return -1;
2226 if( !(p->flags & PAGE_VALID) )
2227 return -1;
2229 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2230 return -1;
2231 if (flags & PAGE_WRITE) {
2232 if (!(p->flags & PAGE_WRITE_ORG))
2233 return -1;
2234 /* unprotect the page if it was put read-only because it
2235 contains translated code */
2236 if (!(p->flags & PAGE_WRITE)) {
2237 if (!page_unprotect(addr, 0, NULL))
2238 return -1;
2240 return 0;
2243 return 0;
2246 /* called from signal handler: invalidate the code and unprotect the
2247 page. Return TRUE if the fault was successfully handled. */
2248 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2250 unsigned int page_index, prot, pindex;
2251 PageDesc *p, *p1;
2252 target_ulong host_start, host_end, addr;
2254 /* Technically this isn't safe inside a signal handler. However we
2255 know this only ever happens in a synchronous SEGV handler, so in
2256 practice it seems to be ok. */
2257 mmap_lock();
2259 host_start = address & qemu_host_page_mask;
2260 page_index = host_start >> TARGET_PAGE_BITS;
2261 p1 = page_find(page_index);
2262 if (!p1) {
2263 mmap_unlock();
2264 return 0;
2266 host_end = host_start + qemu_host_page_size;
2267 p = p1;
2268 prot = 0;
2269 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2270 prot |= p->flags;
2271 p++;
2273 /* if the page was really writable, then we change its
2274 protection back to writable */
2275 if (prot & PAGE_WRITE_ORG) {
2276 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2277 if (!(p1[pindex].flags & PAGE_WRITE)) {
2278 mprotect((void *)g2h(host_start), qemu_host_page_size,
2279 (prot & PAGE_BITS) | PAGE_WRITE);
2280 p1[pindex].flags |= PAGE_WRITE;
2281 /* and since the content will be modified, we must invalidate
2282 the corresponding translated code. */
2283 tb_invalidate_phys_page(address, pc, puc);
2284 #ifdef DEBUG_TB_CHECK
2285 tb_invalidate_check(address);
2286 #endif
2287 mmap_unlock();
2288 return 1;
2291 mmap_unlock();
2292 return 0;
2295 static inline void tlb_set_dirty(CPUState *env,
2296 unsigned long addr, target_ulong vaddr)
2299 #endif /* defined(CONFIG_USER_ONLY) */
2301 #if !defined(CONFIG_USER_ONLY)
2303 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2304 ram_addr_t memory, ram_addr_t region_offset);
2305 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2306 ram_addr_t orig_memory, ram_addr_t region_offset);
2307 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2308 need_subpage) \
2309 do { \
2310 if (addr > start_addr) \
2311 start_addr2 = 0; \
2312 else { \
2313 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2314 if (start_addr2 > 0) \
2315 need_subpage = 1; \
2318 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2319 end_addr2 = TARGET_PAGE_SIZE - 1; \
2320 else { \
2321 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2322 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2323 need_subpage = 1; \
2325 } while (0)
2327 /* register physical memory. 'size' must be a multiple of the target
2328 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2329 io memory page. The address used when calling the IO function is
2330 the offset from the start of the region, plus region_offset. Both
2331 start_addr and region_offset are rounded down to a page boundary
2332 before calculating this offset. This should not be a problem unless
2333 the low bits of start_addr and region_offset differ. */
2334 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2335 ram_addr_t size,
2336 ram_addr_t phys_offset,
2337 ram_addr_t region_offset)
2339 target_phys_addr_t addr, end_addr;
2340 PhysPageDesc *p;
2341 CPUState *env;
2342 ram_addr_t orig_size = size;
2343 void *subpage;
2345 #ifdef CONFIG_KQEMU
2346 /* XXX: should not depend on cpu context */
2347 env = first_cpu;
2348 if (env->kqemu_enabled) {
2349 kqemu_set_phys_mem(start_addr, size, phys_offset);
2351 #endif
2352 if (kvm_enabled())
2353 kvm_set_phys_mem(start_addr, size, phys_offset);
2355 if (phys_offset == IO_MEM_UNASSIGNED) {
2356 region_offset = start_addr;
2358 region_offset &= TARGET_PAGE_MASK;
2359 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2360 end_addr = start_addr + (target_phys_addr_t)size;
2361 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2362 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2363 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2364 ram_addr_t orig_memory = p->phys_offset;
2365 target_phys_addr_t start_addr2, end_addr2;
2366 int need_subpage = 0;
2368 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2369 need_subpage);
2370 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2371 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2372 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2373 &p->phys_offset, orig_memory,
2374 p->region_offset);
2375 } else {
2376 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2377 >> IO_MEM_SHIFT];
2379 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2380 region_offset);
2381 p->region_offset = 0;
2382 } else {
2383 p->phys_offset = phys_offset;
2384 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2385 (phys_offset & IO_MEM_ROMD))
2386 phys_offset += TARGET_PAGE_SIZE;
2388 } else {
2389 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2390 p->phys_offset = phys_offset;
2391 p->region_offset = region_offset;
2392 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2393 (phys_offset & IO_MEM_ROMD)) {
2394 phys_offset += TARGET_PAGE_SIZE;
2395 } else {
2396 target_phys_addr_t start_addr2, end_addr2;
2397 int need_subpage = 0;
2399 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2400 end_addr2, need_subpage);
2402 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2403 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2404 &p->phys_offset, IO_MEM_UNASSIGNED,
2405 addr & TARGET_PAGE_MASK);
2406 subpage_register(subpage, start_addr2, end_addr2,
2407 phys_offset, region_offset);
2408 p->region_offset = 0;
2412 region_offset += TARGET_PAGE_SIZE;
2415 /* since each CPU stores ram addresses in its TLB cache, we must
2416 reset the modified entries */
2417 /* XXX: slow ! */
2418 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2419 tlb_flush(env, 1);
2423 /* XXX: temporary until new memory mapping API */
2424 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2426 PhysPageDesc *p;
2428 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2429 if (!p)
2430 return IO_MEM_UNASSIGNED;
2431 return p->phys_offset;
2434 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2436 if (kvm_enabled())
2437 kvm_coalesce_mmio_region(addr, size);
2440 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2442 if (kvm_enabled())
2443 kvm_uncoalesce_mmio_region(addr, size);
2446 #ifdef CONFIG_KQEMU
2447 /* XXX: better than nothing */
2448 static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2450 ram_addr_t addr;
2451 if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2452 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2453 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2454 abort();
2456 addr = last_ram_offset;
2457 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2458 return addr;
2460 #endif
2462 #ifdef __linux__
2464 #include <sys/vfs.h>
2466 #define HUGETLBFS_MAGIC 0x958458f6
2468 static long gethugepagesize(const char *path)
2470 struct statfs fs;
2471 int ret;
2473 do {
2474 ret = statfs(path, &fs);
2475 } while (ret != 0 && errno == EINTR);
2477 if (ret != 0) {
2478 perror("statfs");
2479 return 0;
2482 if (fs.f_type != HUGETLBFS_MAGIC)
2483 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2485 return fs.f_bsize;
2488 static void *file_ram_alloc(ram_addr_t memory, const char *path)
2490 char *filename;
2491 void *area;
2492 int fd;
2493 #ifdef MAP_POPULATE
2494 int flags;
2495 #endif
2496 unsigned long hpagesize;
2497 extern int mem_prealloc;
2499 if (!path) {
2500 return NULL;
2503 hpagesize = gethugepagesize(path);
2504 if (!hpagesize) {
2505 return NULL;
2508 if (memory < hpagesize) {
2509 return NULL;
2512 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2513 fprintf(stderr, "host lacks mmu notifiers, disabling --mem-path\n");
2514 return NULL;
2517 if (asprintf(&filename, "%s/kvm.XXXXXX", path) == -1) {
2518 return NULL;
2521 fd = mkstemp(filename);
2522 if (fd < 0) {
2523 perror("mkstemp");
2524 free(filename);
2525 return NULL;
2527 unlink(filename);
2528 free(filename);
2530 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2533 * ftruncate is not supported by hugetlbfs in older
2534 * hosts, so don't bother checking for errors.
2535 * If anything goes wrong with it under other filesystems,
2536 * mmap will fail.
2538 ftruncate(fd, memory);
2540 #ifdef MAP_POPULATE
2541 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2542 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2543 * to sidestep this quirk.
2545 flags = mem_prealloc ? MAP_POPULATE|MAP_SHARED : MAP_PRIVATE;
2546 area = mmap(0, memory, PROT_READ|PROT_WRITE, flags, fd, 0);
2547 #else
2548 area = mmap(0, memory, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
2549 #endif
2550 if (area == MAP_FAILED) {
2551 perror("alloc_mem_area: can't mmap hugetlbfs pages");
2552 close(fd);
2553 return (NULL);
2555 return area;
2558 #else
2560 static void *file_ram_alloc(ram_addr_t memory, const char *path)
2562 return NULL;
2565 #endif
2567 extern const char *mem_path;
2569 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2571 RAMBlock *new_block;
2573 #ifdef CONFIG_KQEMU
2574 if (kqemu_phys_ram_base) {
2575 return kqemu_ram_alloc(size);
2577 #endif
2579 size = TARGET_PAGE_ALIGN(size);
2580 new_block = qemu_malloc(sizeof(*new_block));
2582 new_block->host = file_ram_alloc(size, mem_path);
2583 if (!new_block->host) {
2584 new_block->host = qemu_vmalloc(size);
2585 #ifdef MADV_MERGEABLE
2586 madvise(new_block->host, size, MADV_MERGEABLE);
2587 #endif
2589 new_block->offset = last_ram_offset;
2590 new_block->length = size;
2592 new_block->next = ram_blocks;
2593 ram_blocks = new_block;
2595 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2596 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2597 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2598 0xff, size >> TARGET_PAGE_BITS);
2600 last_ram_offset += size;
2602 if (kvm_enabled())
2603 kvm_setup_guest_memory(new_block->host, size);
2605 return new_block->offset;
2608 void qemu_ram_free(ram_addr_t addr)
2610 /* TODO: implement this. */
2613 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2614 With the exception of the softmmu code in this file, this should
2615 only be used for local memory (e.g. video ram) that the device owns,
2616 and knows it isn't going to access beyond the end of the block.
2618 It should not be used for general purpose DMA.
2619 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2621 void *qemu_get_ram_ptr(ram_addr_t addr)
2623 RAMBlock *prev;
2624 RAMBlock **prevp;
2625 RAMBlock *block;
2627 #ifdef CONFIG_KQEMU
2628 if (kqemu_phys_ram_base) {
2629 return kqemu_phys_ram_base + addr;
2631 #endif
2633 prev = NULL;
2634 prevp = &ram_blocks;
2635 block = ram_blocks;
2636 while (block && (block->offset > addr
2637 || block->offset + block->length <= addr)) {
2638 if (prev)
2639 prevp = &prev->next;
2640 prev = block;
2641 block = block->next;
2643 if (!block) {
2644 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2645 abort();
2647 /* Move this entry to to start of the list. */
2648 if (prev) {
2649 prev->next = block->next;
2650 block->next = *prevp;
2651 *prevp = block;
2653 return block->host + (addr - block->offset);
2656 /* Some of the softmmu routines need to translate from a host pointer
2657 (typically a TLB entry) back to a ram offset. */
2658 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2660 RAMBlock *prev;
2661 RAMBlock **prevp;
2662 RAMBlock *block;
2663 uint8_t *host = ptr;
2665 #ifdef CONFIG_KQEMU
2666 if (kqemu_phys_ram_base) {
2667 return host - kqemu_phys_ram_base;
2669 #endif
2671 prev = NULL;
2672 prevp = &ram_blocks;
2673 block = ram_blocks;
2674 while (block && (block->host > host
2675 || block->host + block->length <= host)) {
2676 if (prev)
2677 prevp = &prev->next;
2678 prev = block;
2679 block = block->next;
2681 if (!block) {
2682 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2683 abort();
2685 return block->offset + (host - block->host);
2688 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2690 #ifdef DEBUG_UNASSIGNED
2691 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2692 #endif
2693 #if defined(TARGET_SPARC)
2694 do_unassigned_access(addr, 0, 0, 0, 1);
2695 #endif
2696 return 0;
2699 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2701 #ifdef DEBUG_UNASSIGNED
2702 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2703 #endif
2704 #if defined(TARGET_SPARC)
2705 do_unassigned_access(addr, 0, 0, 0, 2);
2706 #endif
2707 return 0;
2710 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2712 #ifdef DEBUG_UNASSIGNED
2713 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2714 #endif
2715 #if defined(TARGET_SPARC)
2716 do_unassigned_access(addr, 0, 0, 0, 4);
2717 #endif
2718 return 0;
2721 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2723 #ifdef DEBUG_UNASSIGNED
2724 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2725 #endif
2726 #if defined(TARGET_SPARC)
2727 do_unassigned_access(addr, 1, 0, 0, 1);
2728 #endif
2731 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2733 #ifdef DEBUG_UNASSIGNED
2734 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2735 #endif
2736 #if defined(TARGET_SPARC)
2737 do_unassigned_access(addr, 1, 0, 0, 2);
2738 #endif
2741 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2743 #ifdef DEBUG_UNASSIGNED
2744 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2745 #endif
2746 #if defined(TARGET_SPARC)
2747 do_unassigned_access(addr, 1, 0, 0, 4);
2748 #endif
2751 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2752 unassigned_mem_readb,
2753 unassigned_mem_readw,
2754 unassigned_mem_readl,
2757 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2758 unassigned_mem_writeb,
2759 unassigned_mem_writew,
2760 unassigned_mem_writel,
2763 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2764 uint32_t val)
2766 int dirty_flags;
2767 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2768 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2769 #if !defined(CONFIG_USER_ONLY)
2770 tb_invalidate_phys_page_fast(ram_addr, 1);
2771 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2772 #endif
2774 stb_p(qemu_get_ram_ptr(ram_addr), val);
2775 #ifdef CONFIG_KQEMU
2776 if (cpu_single_env->kqemu_enabled &&
2777 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2778 kqemu_modify_page(cpu_single_env, ram_addr);
2779 #endif
2780 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2781 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2782 /* we remove the notdirty callback only if the code has been
2783 flushed */
2784 if (dirty_flags == 0xff)
2785 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2788 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2789 uint32_t val)
2791 int dirty_flags;
2792 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2793 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2794 #if !defined(CONFIG_USER_ONLY)
2795 tb_invalidate_phys_page_fast(ram_addr, 2);
2796 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2797 #endif
2799 stw_p(qemu_get_ram_ptr(ram_addr), val);
2800 #ifdef CONFIG_KQEMU
2801 if (cpu_single_env->kqemu_enabled &&
2802 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2803 kqemu_modify_page(cpu_single_env, ram_addr);
2804 #endif
2805 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2806 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2807 /* we remove the notdirty callback only if the code has been
2808 flushed */
2809 if (dirty_flags == 0xff)
2810 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2813 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2814 uint32_t val)
2816 int dirty_flags;
2817 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2818 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2819 #if !defined(CONFIG_USER_ONLY)
2820 tb_invalidate_phys_page_fast(ram_addr, 4);
2821 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2822 #endif
2824 stl_p(qemu_get_ram_ptr(ram_addr), val);
2825 #ifdef CONFIG_KQEMU
2826 if (cpu_single_env->kqemu_enabled &&
2827 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2828 kqemu_modify_page(cpu_single_env, ram_addr);
2829 #endif
2830 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2831 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2832 /* we remove the notdirty callback only if the code has been
2833 flushed */
2834 if (dirty_flags == 0xff)
2835 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2838 static CPUReadMemoryFunc *error_mem_read[3] = {
2839 NULL, /* never used */
2840 NULL, /* never used */
2841 NULL, /* never used */
2844 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2845 notdirty_mem_writeb,
2846 notdirty_mem_writew,
2847 notdirty_mem_writel,
2850 /* Generate a debug exception if a watchpoint has been hit. */
2851 static void check_watchpoint(int offset, int len_mask, int flags)
2853 CPUState *env = cpu_single_env;
2854 target_ulong pc, cs_base;
2855 TranslationBlock *tb;
2856 target_ulong vaddr;
2857 CPUWatchpoint *wp;
2858 int cpu_flags;
2860 if (env->watchpoint_hit) {
2861 /* We re-entered the check after replacing the TB. Now raise
2862 * the debug interrupt so that is will trigger after the
2863 * current instruction. */
2864 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2865 return;
2867 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2868 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2869 if ((vaddr == (wp->vaddr & len_mask) ||
2870 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2871 wp->flags |= BP_WATCHPOINT_HIT;
2872 if (!env->watchpoint_hit) {
2873 env->watchpoint_hit = wp;
2874 tb = tb_find_pc(env->mem_io_pc);
2875 if (!tb) {
2876 cpu_abort(env, "check_watchpoint: could not find TB for "
2877 "pc=%p", (void *)env->mem_io_pc);
2879 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2880 tb_phys_invalidate(tb, -1);
2881 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2882 env->exception_index = EXCP_DEBUG;
2883 } else {
2884 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2885 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2887 cpu_resume_from_signal(env, NULL);
2889 } else {
2890 wp->flags &= ~BP_WATCHPOINT_HIT;
2895 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2896 so these check for a hit then pass through to the normal out-of-line
2897 phys routines. */
2898 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2900 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2901 return ldub_phys(addr);
2904 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2906 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2907 return lduw_phys(addr);
2910 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2912 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2913 return ldl_phys(addr);
2916 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2917 uint32_t val)
2919 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2920 stb_phys(addr, val);
2923 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2924 uint32_t val)
2926 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2927 stw_phys(addr, val);
2930 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2931 uint32_t val)
2933 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2934 stl_phys(addr, val);
2937 static CPUReadMemoryFunc *watch_mem_read[3] = {
2938 watch_mem_readb,
2939 watch_mem_readw,
2940 watch_mem_readl,
2943 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2944 watch_mem_writeb,
2945 watch_mem_writew,
2946 watch_mem_writel,
2949 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2950 unsigned int len)
2952 uint32_t ret;
2953 unsigned int idx;
2955 idx = SUBPAGE_IDX(addr);
2956 #if defined(DEBUG_SUBPAGE)
2957 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2958 mmio, len, addr, idx);
2959 #endif
2960 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2961 addr + mmio->region_offset[idx][0][len]);
2963 return ret;
2966 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2967 uint32_t value, unsigned int len)
2969 unsigned int idx;
2971 idx = SUBPAGE_IDX(addr);
2972 #if defined(DEBUG_SUBPAGE)
2973 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2974 mmio, len, addr, idx, value);
2975 #endif
2976 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2977 addr + mmio->region_offset[idx][1][len],
2978 value);
2981 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2983 #if defined(DEBUG_SUBPAGE)
2984 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2985 #endif
2987 return subpage_readlen(opaque, addr, 0);
2990 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2991 uint32_t value)
2993 #if defined(DEBUG_SUBPAGE)
2994 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2995 #endif
2996 subpage_writelen(opaque, addr, value, 0);
2999 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3001 #if defined(DEBUG_SUBPAGE)
3002 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3003 #endif
3005 return subpage_readlen(opaque, addr, 1);
3008 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3009 uint32_t value)
3011 #if defined(DEBUG_SUBPAGE)
3012 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3013 #endif
3014 subpage_writelen(opaque, addr, value, 1);
3017 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3019 #if defined(DEBUG_SUBPAGE)
3020 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3021 #endif
3023 return subpage_readlen(opaque, addr, 2);
3026 static void subpage_writel (void *opaque,
3027 target_phys_addr_t addr, uint32_t value)
3029 #if defined(DEBUG_SUBPAGE)
3030 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3031 #endif
3032 subpage_writelen(opaque, addr, value, 2);
3035 static CPUReadMemoryFunc *subpage_read[] = {
3036 &subpage_readb,
3037 &subpage_readw,
3038 &subpage_readl,
3041 static CPUWriteMemoryFunc *subpage_write[] = {
3042 &subpage_writeb,
3043 &subpage_writew,
3044 &subpage_writel,
3047 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3048 ram_addr_t memory, ram_addr_t region_offset)
3050 int idx, eidx;
3051 unsigned int i;
3053 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3054 return -1;
3055 idx = SUBPAGE_IDX(start);
3056 eidx = SUBPAGE_IDX(end);
3057 #if defined(DEBUG_SUBPAGE)
3058 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3059 mmio, start, end, idx, eidx, memory);
3060 #endif
3061 memory >>= IO_MEM_SHIFT;
3062 for (; idx <= eidx; idx++) {
3063 for (i = 0; i < 4; i++) {
3064 if (io_mem_read[memory][i]) {
3065 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3066 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3067 mmio->region_offset[idx][0][i] = region_offset;
3069 if (io_mem_write[memory][i]) {
3070 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3071 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3072 mmio->region_offset[idx][1][i] = region_offset;
3077 return 0;
3080 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3081 ram_addr_t orig_memory, ram_addr_t region_offset)
3083 subpage_t *mmio;
3084 int subpage_memory;
3086 mmio = qemu_mallocz(sizeof(subpage_t));
3088 mmio->base = base;
3089 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3090 #if defined(DEBUG_SUBPAGE)
3091 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3092 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3093 #endif
3094 *phys = subpage_memory | IO_MEM_SUBPAGE;
3095 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3096 region_offset);
3098 return mmio;
3101 static int get_free_io_mem_idx(void)
3103 int i;
3105 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3106 if (!io_mem_used[i]) {
3107 io_mem_used[i] = 1;
3108 return i;
3111 return -1;
3114 /* mem_read and mem_write are arrays of functions containing the
3115 function to access byte (index 0), word (index 1) and dword (index
3116 2). Functions can be omitted with a NULL function pointer.
3117 If io_index is non zero, the corresponding io zone is
3118 modified. If it is zero, a new io zone is allocated. The return
3119 value can be used with cpu_register_physical_memory(). (-1) is
3120 returned if error. */
3121 static int cpu_register_io_memory_fixed(int io_index,
3122 CPUReadMemoryFunc **mem_read,
3123 CPUWriteMemoryFunc **mem_write,
3124 void *opaque)
3126 int i, subwidth = 0;
3128 if (io_index <= 0) {
3129 io_index = get_free_io_mem_idx();
3130 if (io_index == -1)
3131 return io_index;
3132 } else {
3133 io_index >>= IO_MEM_SHIFT;
3134 if (io_index >= IO_MEM_NB_ENTRIES)
3135 return -1;
3138 for(i = 0;i < 3; i++) {
3139 if (!mem_read[i] || !mem_write[i])
3140 subwidth = IO_MEM_SUBWIDTH;
3141 io_mem_read[io_index][i] = mem_read[i];
3142 io_mem_write[io_index][i] = mem_write[i];
3144 io_mem_opaque[io_index] = opaque;
3145 return (io_index << IO_MEM_SHIFT) | subwidth;
3148 int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
3149 CPUWriteMemoryFunc **mem_write,
3150 void *opaque)
3152 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3155 void cpu_unregister_io_memory(int io_table_address)
3157 int i;
3158 int io_index = io_table_address >> IO_MEM_SHIFT;
3160 for (i=0;i < 3; i++) {
3161 io_mem_read[io_index][i] = unassigned_mem_read[i];
3162 io_mem_write[io_index][i] = unassigned_mem_write[i];
3164 io_mem_opaque[io_index] = NULL;
3165 io_mem_used[io_index] = 0;
3168 static void io_mem_init(void)
3170 int i;
3172 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3173 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3174 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3175 for (i=0; i<5; i++)
3176 io_mem_used[i] = 1;
3178 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3179 watch_mem_write, NULL);
3180 #ifdef CONFIG_KQEMU
3181 if (kqemu_phys_ram_base) {
3182 /* alloc dirty bits array */
3183 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3184 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3186 #endif
3189 #endif /* !defined(CONFIG_USER_ONLY) */
3191 /* physical memory access (slow version, mainly for debug) */
3192 #if defined(CONFIG_USER_ONLY)
3193 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3194 int len, int is_write)
3196 int l, flags;
3197 target_ulong page;
3198 void * p;
3200 while (len > 0) {
3201 page = addr & TARGET_PAGE_MASK;
3202 l = (page + TARGET_PAGE_SIZE) - addr;
3203 if (l > len)
3204 l = len;
3205 flags = page_get_flags(page);
3206 if (!(flags & PAGE_VALID))
3207 return;
3208 if (is_write) {
3209 if (!(flags & PAGE_WRITE))
3210 return;
3211 /* XXX: this code should not depend on lock_user */
3212 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3213 /* FIXME - should this return an error rather than just fail? */
3214 return;
3215 memcpy(p, buf, l);
3216 unlock_user(p, addr, l);
3217 } else {
3218 if (!(flags & PAGE_READ))
3219 return;
3220 /* XXX: this code should not depend on lock_user */
3221 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3222 /* FIXME - should this return an error rather than just fail? */
3223 return;
3224 memcpy(buf, p, l);
3225 unlock_user(p, addr, 0);
3227 len -= l;
3228 buf += l;
3229 addr += l;
3233 #else
3234 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3235 int len, int is_write)
3237 int l, io_index;
3238 uint8_t *ptr;
3239 uint32_t val;
3240 target_phys_addr_t page;
3241 unsigned long pd;
3242 PhysPageDesc *p;
3244 while (len > 0) {
3245 page = addr & TARGET_PAGE_MASK;
3246 l = (page + TARGET_PAGE_SIZE) - addr;
3247 if (l > len)
3248 l = len;
3249 p = phys_page_find(page >> TARGET_PAGE_BITS);
3250 if (!p) {
3251 pd = IO_MEM_UNASSIGNED;
3252 } else {
3253 pd = p->phys_offset;
3256 if (is_write) {
3257 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3258 target_phys_addr_t addr1 = addr;
3259 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3260 if (p)
3261 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3262 /* XXX: could force cpu_single_env to NULL to avoid
3263 potential bugs */
3264 if (l >= 4 && ((addr1 & 3) == 0)) {
3265 /* 32 bit write access */
3266 val = ldl_p(buf);
3267 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3268 l = 4;
3269 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3270 /* 16 bit write access */
3271 val = lduw_p(buf);
3272 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3273 l = 2;
3274 } else {
3275 /* 8 bit write access */
3276 val = ldub_p(buf);
3277 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3278 l = 1;
3280 } else {
3281 unsigned long addr1;
3282 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3283 /* RAM case */
3284 ptr = qemu_get_ram_ptr(addr1);
3285 memcpy(ptr, buf, l);
3286 if (!cpu_physical_memory_is_dirty(addr1)) {
3287 /* invalidate code */
3288 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3289 /* set dirty bit */
3290 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3291 (0xff & ~CODE_DIRTY_FLAG);
3293 /* qemu doesn't execute guest code directly, but kvm does
3294 therefore flush instruction caches */
3295 if (kvm_enabled())
3296 flush_icache_range((unsigned long)ptr,
3297 ((unsigned long)ptr)+l);
3299 } else {
3300 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3301 !(pd & IO_MEM_ROMD)) {
3302 target_phys_addr_t addr1 = addr;
3303 /* I/O case */
3304 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3305 if (p)
3306 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3307 if (l >= 4 && ((addr1 & 3) == 0)) {
3308 /* 32 bit read access */
3309 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3310 stl_p(buf, val);
3311 l = 4;
3312 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3313 /* 16 bit read access */
3314 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3315 stw_p(buf, val);
3316 l = 2;
3317 } else {
3318 /* 8 bit read access */
3319 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3320 stb_p(buf, val);
3321 l = 1;
3323 } else {
3324 /* RAM case */
3325 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3326 (addr & ~TARGET_PAGE_MASK);
3327 memcpy(buf, ptr, l);
3330 len -= l;
3331 buf += l;
3332 addr += l;
3336 /* used for ROM loading : can write in RAM and ROM */
3337 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3338 const uint8_t *buf, int len)
3340 int l;
3341 uint8_t *ptr;
3342 target_phys_addr_t page;
3343 unsigned long pd;
3344 PhysPageDesc *p;
3346 while (len > 0) {
3347 page = addr & TARGET_PAGE_MASK;
3348 l = (page + TARGET_PAGE_SIZE) - addr;
3349 if (l > len)
3350 l = len;
3351 p = phys_page_find(page >> TARGET_PAGE_BITS);
3352 if (!p) {
3353 pd = IO_MEM_UNASSIGNED;
3354 } else {
3355 pd = p->phys_offset;
3358 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3359 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3360 !(pd & IO_MEM_ROMD)) {
3361 /* do nothing */
3362 } else {
3363 unsigned long addr1;
3364 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3365 /* ROM/RAM case */
3366 ptr = qemu_get_ram_ptr(addr1);
3367 memcpy(ptr, buf, l);
3369 len -= l;
3370 buf += l;
3371 addr += l;
3375 typedef struct {
3376 void *buffer;
3377 target_phys_addr_t addr;
3378 target_phys_addr_t len;
3379 } BounceBuffer;
3381 static BounceBuffer bounce;
3383 typedef struct MapClient {
3384 void *opaque;
3385 void (*callback)(void *opaque);
3386 LIST_ENTRY(MapClient) link;
3387 } MapClient;
3389 static LIST_HEAD(map_client_list, MapClient) map_client_list
3390 = LIST_HEAD_INITIALIZER(map_client_list);
3392 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3394 MapClient *client = qemu_malloc(sizeof(*client));
3396 client->opaque = opaque;
3397 client->callback = callback;
3398 LIST_INSERT_HEAD(&map_client_list, client, link);
3399 return client;
3402 void cpu_unregister_map_client(void *_client)
3404 MapClient *client = (MapClient *)_client;
3406 LIST_REMOVE(client, link);
3407 qemu_free(client);
3410 static void cpu_notify_map_clients(void)
3412 MapClient *client;
3414 while (!LIST_EMPTY(&map_client_list)) {
3415 client = LIST_FIRST(&map_client_list);
3416 client->callback(client->opaque);
3417 cpu_unregister_map_client(client);
3421 /* Map a physical memory region into a host virtual address.
3422 * May map a subset of the requested range, given by and returned in *plen.
3423 * May return NULL if resources needed to perform the mapping are exhausted.
3424 * Use only for reads OR writes - not for read-modify-write operations.
3425 * Use cpu_register_map_client() to know when retrying the map operation is
3426 * likely to succeed.
3428 void *cpu_physical_memory_map(target_phys_addr_t addr,
3429 target_phys_addr_t *plen,
3430 int is_write)
3432 target_phys_addr_t len = *plen;
3433 target_phys_addr_t done = 0;
3434 int l;
3435 uint8_t *ret = NULL;
3436 uint8_t *ptr;
3437 target_phys_addr_t page;
3438 unsigned long pd;
3439 PhysPageDesc *p;
3440 unsigned long addr1;
3442 while (len > 0) {
3443 page = addr & TARGET_PAGE_MASK;
3444 l = (page + TARGET_PAGE_SIZE) - addr;
3445 if (l > len)
3446 l = len;
3447 p = phys_page_find(page >> TARGET_PAGE_BITS);
3448 if (!p) {
3449 pd = IO_MEM_UNASSIGNED;
3450 } else {
3451 pd = p->phys_offset;
3454 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3455 if (done || bounce.buffer) {
3456 break;
3458 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3459 bounce.addr = addr;
3460 bounce.len = l;
3461 if (!is_write) {
3462 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3464 ptr = bounce.buffer;
3465 } else {
3466 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3467 ptr = qemu_get_ram_ptr(addr1);
3469 if (!done) {
3470 ret = ptr;
3471 } else if (ret + done != ptr) {
3472 break;
3475 len -= l;
3476 addr += l;
3477 done += l;
3479 *plen = done;
3480 return ret;
3483 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3484 * Will also mark the memory as dirty if is_write == 1. access_len gives
3485 * the amount of memory that was actually read or written by the caller.
3487 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3488 int is_write, target_phys_addr_t access_len)
3490 unsigned long flush_len = (unsigned long)access_len;
3492 if (buffer != bounce.buffer) {
3493 if (is_write) {
3494 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3495 while (access_len) {
3496 unsigned l;
3497 l = TARGET_PAGE_SIZE;
3498 if (l > access_len)
3499 l = access_len;
3500 if (!cpu_physical_memory_is_dirty(addr1)) {
3501 /* invalidate code */
3502 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3503 /* set dirty bit */
3504 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3505 (0xff & ~CODE_DIRTY_FLAG);
3507 addr1 += l;
3508 access_len -= l;
3510 dma_flush_range((unsigned long)buffer,
3511 (unsigned long)buffer + flush_len);
3513 return;
3515 if (is_write) {
3516 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3518 qemu_free(bounce.buffer);
3519 bounce.buffer = NULL;
3520 cpu_notify_map_clients();
3523 /* warning: addr must be aligned */
3524 uint32_t ldl_phys(target_phys_addr_t addr)
3526 int io_index;
3527 uint8_t *ptr;
3528 uint32_t val;
3529 unsigned long pd;
3530 PhysPageDesc *p;
3532 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3533 if (!p) {
3534 pd = IO_MEM_UNASSIGNED;
3535 } else {
3536 pd = p->phys_offset;
3539 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3540 !(pd & IO_MEM_ROMD)) {
3541 /* I/O case */
3542 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3543 if (p)
3544 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3545 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3546 } else {
3547 /* RAM case */
3548 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3549 (addr & ~TARGET_PAGE_MASK);
3550 val = ldl_p(ptr);
3552 return val;
3555 /* warning: addr must be aligned */
3556 uint64_t ldq_phys(target_phys_addr_t addr)
3558 int io_index;
3559 uint8_t *ptr;
3560 uint64_t val;
3561 unsigned long pd;
3562 PhysPageDesc *p;
3564 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3565 if (!p) {
3566 pd = IO_MEM_UNASSIGNED;
3567 } else {
3568 pd = p->phys_offset;
3571 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3572 !(pd & IO_MEM_ROMD)) {
3573 /* I/O case */
3574 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3575 if (p)
3576 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3577 #ifdef TARGET_WORDS_BIGENDIAN
3578 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3579 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3580 #else
3581 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3582 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3583 #endif
3584 } else {
3585 /* RAM case */
3586 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3587 (addr & ~TARGET_PAGE_MASK);
3588 val = ldq_p(ptr);
3590 return val;
3593 /* XXX: optimize */
3594 uint32_t ldub_phys(target_phys_addr_t addr)
3596 uint8_t val;
3597 cpu_physical_memory_read(addr, &val, 1);
3598 return val;
3601 /* XXX: optimize */
3602 uint32_t lduw_phys(target_phys_addr_t addr)
3604 uint16_t val;
3605 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3606 return tswap16(val);
3609 /* warning: addr must be aligned. The ram page is not masked as dirty
3610 and the code inside is not invalidated. It is useful if the dirty
3611 bits are used to track modified PTEs */
3612 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3614 int io_index;
3615 uint8_t *ptr;
3616 unsigned long pd;
3617 PhysPageDesc *p;
3619 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3620 if (!p) {
3621 pd = IO_MEM_UNASSIGNED;
3622 } else {
3623 pd = p->phys_offset;
3626 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3627 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3628 if (p)
3629 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3630 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3631 } else {
3632 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3633 ptr = qemu_get_ram_ptr(addr1);
3634 stl_p(ptr, val);
3636 if (unlikely(in_migration)) {
3637 if (!cpu_physical_memory_is_dirty(addr1)) {
3638 /* invalidate code */
3639 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3640 /* set dirty bit */
3641 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3642 (0xff & ~CODE_DIRTY_FLAG);
3648 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3650 int io_index;
3651 uint8_t *ptr;
3652 unsigned long pd;
3653 PhysPageDesc *p;
3655 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3656 if (!p) {
3657 pd = IO_MEM_UNASSIGNED;
3658 } else {
3659 pd = p->phys_offset;
3662 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3663 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3664 if (p)
3665 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3666 #ifdef TARGET_WORDS_BIGENDIAN
3667 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3668 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3669 #else
3670 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3671 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3672 #endif
3673 } else {
3674 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3675 (addr & ~TARGET_PAGE_MASK);
3676 stq_p(ptr, val);
3680 /* warning: addr must be aligned */
3681 void stl_phys(target_phys_addr_t addr, uint32_t val)
3683 int io_index;
3684 uint8_t *ptr;
3685 unsigned long pd;
3686 PhysPageDesc *p;
3688 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3689 if (!p) {
3690 pd = IO_MEM_UNASSIGNED;
3691 } else {
3692 pd = p->phys_offset;
3695 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3696 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3697 if (p)
3698 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3699 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3700 } else {
3701 unsigned long addr1;
3702 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3703 /* RAM case */
3704 ptr = qemu_get_ram_ptr(addr1);
3705 stl_p(ptr, val);
3706 if (!cpu_physical_memory_is_dirty(addr1)) {
3707 /* invalidate code */
3708 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3709 /* set dirty bit */
3710 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3711 (0xff & ~CODE_DIRTY_FLAG);
3716 /* XXX: optimize */
3717 void stb_phys(target_phys_addr_t addr, uint32_t val)
3719 uint8_t v = val;
3720 cpu_physical_memory_write(addr, &v, 1);
3723 /* XXX: optimize */
3724 void stw_phys(target_phys_addr_t addr, uint32_t val)
3726 uint16_t v = tswap16(val);
3727 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3730 /* XXX: optimize */
3731 void stq_phys(target_phys_addr_t addr, uint64_t val)
3733 val = tswap64(val);
3734 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3737 #endif
3739 /* virtual memory access for debug (includes writing to ROM) */
3740 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3741 uint8_t *buf, int len, int is_write)
3743 int l;
3744 target_phys_addr_t phys_addr;
3745 target_ulong page;
3747 while (len > 0) {
3748 page = addr & TARGET_PAGE_MASK;
3749 phys_addr = cpu_get_phys_page_debug(env, page);
3750 /* if no physical page mapped, return an error */
3751 if (phys_addr == -1)
3752 return -1;
3753 l = (page + TARGET_PAGE_SIZE) - addr;
3754 if (l > len)
3755 l = len;
3756 phys_addr += (addr & ~TARGET_PAGE_MASK);
3757 #if !defined(CONFIG_USER_ONLY)
3758 if (is_write)
3759 cpu_physical_memory_write_rom(phys_addr, buf, l);
3760 else
3761 #endif
3762 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3763 len -= l;
3764 buf += l;
3765 addr += l;
3767 return 0;
3770 /* in deterministic execution mode, instructions doing device I/Os
3771 must be at the end of the TB */
3772 void cpu_io_recompile(CPUState *env, void *retaddr)
3774 TranslationBlock *tb;
3775 uint32_t n, cflags;
3776 target_ulong pc, cs_base;
3777 uint64_t flags;
3779 tb = tb_find_pc((unsigned long)retaddr);
3780 if (!tb) {
3781 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3782 retaddr);
3784 n = env->icount_decr.u16.low + tb->icount;
3785 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3786 /* Calculate how many instructions had been executed before the fault
3787 occurred. */
3788 n = n - env->icount_decr.u16.low;
3789 /* Generate a new TB ending on the I/O insn. */
3790 n++;
3791 /* On MIPS and SH, delay slot instructions can only be restarted if
3792 they were already the first instruction in the TB. If this is not
3793 the first instruction in a TB then re-execute the preceding
3794 branch. */
3795 #if defined(TARGET_MIPS)
3796 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3797 env->active_tc.PC -= 4;
3798 env->icount_decr.u16.low++;
3799 env->hflags &= ~MIPS_HFLAG_BMASK;
3801 #elif defined(TARGET_SH4)
3802 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3803 && n > 1) {
3804 env->pc -= 2;
3805 env->icount_decr.u16.low++;
3806 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3808 #endif
3809 /* This should never happen. */
3810 if (n > CF_COUNT_MASK)
3811 cpu_abort(env, "TB too big during recompile");
3813 cflags = n | CF_LAST_IO;
3814 pc = tb->pc;
3815 cs_base = tb->cs_base;
3816 flags = tb->flags;
3817 tb_phys_invalidate(tb, -1);
3818 /* FIXME: In theory this could raise an exception. In practice
3819 we have already translated the block once so it's probably ok. */
3820 tb_gen_code(env, pc, cs_base, flags, cflags);
3821 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3822 the first in the TB) then we end up generating a whole new TB and
3823 repeating the fault, which is horribly inefficient.
3824 Better would be to execute just this insn uncached, or generate a
3825 second new TB. */
3826 cpu_resume_from_signal(env, NULL);
3829 void dump_exec_info(FILE *f,
3830 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3832 int i, target_code_size, max_target_code_size;
3833 int direct_jmp_count, direct_jmp2_count, cross_page;
3834 TranslationBlock *tb;
3836 target_code_size = 0;
3837 max_target_code_size = 0;
3838 cross_page = 0;
3839 direct_jmp_count = 0;
3840 direct_jmp2_count = 0;
3841 for(i = 0; i < nb_tbs; i++) {
3842 tb = &tbs[i];
3843 target_code_size += tb->size;
3844 if (tb->size > max_target_code_size)
3845 max_target_code_size = tb->size;
3846 if (tb->page_addr[1] != -1)
3847 cross_page++;
3848 if (tb->tb_next_offset[0] != 0xffff) {
3849 direct_jmp_count++;
3850 if (tb->tb_next_offset[1] != 0xffff) {
3851 direct_jmp2_count++;
3855 /* XXX: avoid using doubles ? */
3856 cpu_fprintf(f, "Translation buffer state:\n");
3857 cpu_fprintf(f, "gen code size %ld/%ld\n",
3858 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3859 cpu_fprintf(f, "TB count %d/%d\n",
3860 nb_tbs, code_gen_max_blocks);
3861 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3862 nb_tbs ? target_code_size / nb_tbs : 0,
3863 max_target_code_size);
3864 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3865 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3866 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3867 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3868 cross_page,
3869 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3870 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3871 direct_jmp_count,
3872 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3873 direct_jmp2_count,
3874 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3875 cpu_fprintf(f, "\nStatistics:\n");
3876 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3877 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3878 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3879 tcg_dump_info(f, cpu_fprintf);
3882 #if !defined(CONFIG_USER_ONLY)
3884 #define MMUSUFFIX _cmmu
3885 #define GETPC() NULL
3886 #define env cpu_single_env
3887 #define SOFTMMU_CODE_ACCESS
3889 #define SHIFT 0
3890 #include "softmmu_template.h"
3892 #define SHIFT 1
3893 #include "softmmu_template.h"
3895 #define SHIFT 2
3896 #include "softmmu_template.h"
3898 #define SHIFT 3
3899 #include "softmmu_template.h"
3901 #undef env
3903 #endif