Remove uses of ram.last_offset (aka last_ram_offset)
[qemu.git] / exec.c
bloba6b3f21a153c311e80e22ecc3030d2bae2a3b4e1
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <unistd.h>
32 #include <inttypes.h>
34 #include "cpu.h"
35 #include "exec-all.h"
36 #include "qemu-common.h"
37 #include "tcg.h"
38 #include "hw/hw.h"
39 #include "osdep.h"
40 #include "kvm.h"
41 #include "qemu-timer.h"
42 #if defined(CONFIG_USER_ONLY)
43 #include <qemu.h>
44 #include <signal.h>
45 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
46 #include <sys/param.h>
47 #if __FreeBSD_version >= 700104
48 #define HAVE_KINFO_GETVMMAP
49 #define sigqueue sigqueue_freebsd /* avoid redefinition */
50 #include <sys/time.h>
51 #include <sys/proc.h>
52 #include <machine/profile.h>
53 #define _KERNEL
54 #include <sys/user.h>
55 #undef _KERNEL
56 #undef sigqueue
57 #include <libutil.h>
58 #endif
59 #endif
60 #endif
62 //#define DEBUG_TB_INVALIDATE
63 //#define DEBUG_FLUSH
64 //#define DEBUG_TLB
65 //#define DEBUG_UNASSIGNED
67 /* make various TB consistency checks */
68 //#define DEBUG_TB_CHECK
69 //#define DEBUG_TLB_CHECK
71 //#define DEBUG_IOPORT
72 //#define DEBUG_SUBPAGE
74 #if !defined(CONFIG_USER_ONLY)
75 /* TB consistency checks only implemented for usermode emulation. */
76 #undef DEBUG_TB_CHECK
77 #endif
79 #define SMC_BITMAP_USE_THRESHOLD 10
81 static TranslationBlock *tbs;
82 int code_gen_max_blocks;
83 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84 static int nb_tbs;
85 /* any access to the tbs or the page table must use this lock */
86 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88 #if defined(__arm__) || defined(__sparc_v9__)
89 /* The prologue must be reachable with a direct jump. ARM and Sparc64
90 have limited branch ranges (possibly also PPC) so place it in a
91 section close to code segment. */
92 #define code_gen_section \
93 __attribute__((__section__(".gen_code"))) \
94 __attribute__((aligned (32)))
95 #elif defined(_WIN32)
96 /* Maximum alignment for Win32 is 16. */
97 #define code_gen_section \
98 __attribute__((aligned (16)))
99 #else
100 #define code_gen_section \
101 __attribute__((aligned (32)))
102 #endif
104 uint8_t code_gen_prologue[1024] code_gen_section;
105 static uint8_t *code_gen_buffer;
106 static unsigned long code_gen_buffer_size;
107 /* threshold to flush the translated code buffer */
108 static unsigned long code_gen_buffer_max_size;
109 uint8_t *code_gen_ptr;
111 #if !defined(CONFIG_USER_ONLY)
112 int phys_ram_fd;
113 static int in_migration;
115 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
116 #endif
118 CPUState *first_cpu;
119 /* current CPU in the current thread. It is only valid inside
120 cpu_exec() */
121 CPUState *cpu_single_env;
122 /* 0 = Do not count executed instructions.
123 1 = Precise instruction counting.
124 2 = Adaptive rate instruction counting. */
125 int use_icount = 0;
126 /* Current instruction counter. While executing translated code this may
127 include some instructions that have not yet been executed. */
128 int64_t qemu_icount;
130 typedef struct PageDesc {
131 /* list of TBs intersecting this ram page */
132 TranslationBlock *first_tb;
133 /* in order to optimize self modifying code, we count the number
134 of lookups we do to a given page to use a bitmap */
135 unsigned int code_write_count;
136 uint8_t *code_bitmap;
137 #if defined(CONFIG_USER_ONLY)
138 unsigned long flags;
139 #endif
140 } PageDesc;
142 /* In system mode we want L1_MAP to be based on ram offsets,
143 while in user mode we want it to be based on virtual addresses. */
144 #if !defined(CONFIG_USER_ONLY)
145 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
146 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
147 #else
148 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
149 #endif
150 #else
151 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
152 #endif
154 /* Size of the L2 (and L3, etc) page tables. */
155 #define L2_BITS 10
156 #define L2_SIZE (1 << L2_BITS)
158 /* The bits remaining after N lower levels of page tables. */
159 #define P_L1_BITS_REM \
160 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
161 #define V_L1_BITS_REM \
162 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
164 /* Size of the L1 page table. Avoid silly small sizes. */
165 #if P_L1_BITS_REM < 4
166 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
167 #else
168 #define P_L1_BITS P_L1_BITS_REM
169 #endif
171 #if V_L1_BITS_REM < 4
172 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
173 #else
174 #define V_L1_BITS V_L1_BITS_REM
175 #endif
177 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
178 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
180 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
181 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
183 unsigned long qemu_real_host_page_size;
184 unsigned long qemu_host_page_bits;
185 unsigned long qemu_host_page_size;
186 unsigned long qemu_host_page_mask;
188 /* This is a multi-level map on the virtual address space.
189 The bottom level has pointers to PageDesc. */
190 static void *l1_map[V_L1_SIZE];
192 #if !defined(CONFIG_USER_ONLY)
193 typedef struct PhysPageDesc {
194 /* offset in host memory of the page + io_index in the low bits */
195 ram_addr_t phys_offset;
196 ram_addr_t region_offset;
197 } PhysPageDesc;
199 /* This is a multi-level map on the physical address space.
200 The bottom level has pointers to PhysPageDesc. */
201 static void *l1_phys_map[P_L1_SIZE];
203 static void io_mem_init(void);
205 /* io memory support */
206 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
207 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
208 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
209 static char io_mem_used[IO_MEM_NB_ENTRIES];
210 static int io_mem_watch;
211 #endif
213 /* log support */
214 #ifdef WIN32
215 static const char *logfilename = "qemu.log";
216 #else
217 static const char *logfilename = "/tmp/qemu.log";
218 #endif
219 FILE *logfile;
220 int loglevel;
221 static int log_append = 0;
223 /* statistics */
224 #if !defined(CONFIG_USER_ONLY)
225 static int tlb_flush_count;
226 #endif
227 static int tb_flush_count;
228 static int tb_phys_invalidate_count;
230 #ifdef _WIN32
231 static void map_exec(void *addr, long size)
233 DWORD old_protect;
234 VirtualProtect(addr, size,
235 PAGE_EXECUTE_READWRITE, &old_protect);
238 #else
239 static void map_exec(void *addr, long size)
241 unsigned long start, end, page_size;
243 page_size = getpagesize();
244 start = (unsigned long)addr;
245 start &= ~(page_size - 1);
247 end = (unsigned long)addr + size;
248 end += page_size - 1;
249 end &= ~(page_size - 1);
251 mprotect((void *)start, end - start,
252 PROT_READ | PROT_WRITE | PROT_EXEC);
254 #endif
256 static void page_init(void)
258 /* NOTE: we can always suppose that qemu_host_page_size >=
259 TARGET_PAGE_SIZE */
260 #ifdef _WIN32
262 SYSTEM_INFO system_info;
264 GetSystemInfo(&system_info);
265 qemu_real_host_page_size = system_info.dwPageSize;
267 #else
268 qemu_real_host_page_size = getpagesize();
269 #endif
270 if (qemu_host_page_size == 0)
271 qemu_host_page_size = qemu_real_host_page_size;
272 if (qemu_host_page_size < TARGET_PAGE_SIZE)
273 qemu_host_page_size = TARGET_PAGE_SIZE;
274 qemu_host_page_bits = 0;
275 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
276 qemu_host_page_bits++;
277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
279 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
281 #ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
299 } else {
300 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
303 #endif
307 free(freep);
308 mmap_unlock();
310 #else
311 FILE *f;
313 last_brk = (unsigned long)sbrk(0);
315 f = fopen("/compat/linux/proc/self/maps", "r");
316 if (f) {
317 mmap_lock();
319 do {
320 unsigned long startaddr, endaddr;
321 int n;
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
335 } while (!feof(f));
337 fclose(f);
338 mmap_unlock();
340 #endif
342 #endif
345 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
347 PageDesc *pd;
348 void **lp;
349 int i;
351 #if defined(CONFIG_USER_ONLY)
352 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
353 # define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
357 } while (0)
358 #else
359 # define ALLOC(P, SIZE) \
360 do { P = qemu_mallocz(SIZE); } while (0)
361 #endif
363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
390 #undef ALLOC
392 return pd + (index & (L2_SIZE - 1));
395 static inline PageDesc *page_find(tb_page_addr_t index)
397 return page_find_alloc(index, 0);
400 #if !defined(CONFIG_USER_ONLY)
401 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
403 PhysPageDesc *pd;
404 void **lp;
405 int i;
407 /* Level 1. Always allocated. */
408 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
410 /* Level 2..N-1. */
411 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
412 void **p = *lp;
413 if (p == NULL) {
414 if (!alloc) {
415 return NULL;
417 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
419 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
422 pd = *lp;
423 if (pd == NULL) {
424 int i;
426 if (!alloc) {
427 return NULL;
430 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
432 for (i = 0; i < L2_SIZE; i++) {
433 pd[i].phys_offset = IO_MEM_UNASSIGNED;
434 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
438 return pd + (index & (L2_SIZE - 1));
441 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
443 return phys_page_find_alloc(index, 0);
446 static void tlb_protect_code(ram_addr_t ram_addr);
447 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
448 target_ulong vaddr);
449 #define mmap_lock() do { } while(0)
450 #define mmap_unlock() do { } while(0)
451 #endif
453 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
455 #if defined(CONFIG_USER_ONLY)
456 /* Currently it is not recommended to allocate big chunks of data in
457 user mode. It will change when a dedicated libc will be used */
458 #define USE_STATIC_CODE_GEN_BUFFER
459 #endif
461 #ifdef USE_STATIC_CODE_GEN_BUFFER
462 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
463 __attribute__((aligned (CODE_GEN_ALIGN)));
464 #endif
466 static void code_gen_alloc(unsigned long tb_size)
468 #ifdef USE_STATIC_CODE_GEN_BUFFER
469 code_gen_buffer = static_code_gen_buffer;
470 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
471 map_exec(code_gen_buffer, code_gen_buffer_size);
472 #else
473 code_gen_buffer_size = tb_size;
474 if (code_gen_buffer_size == 0) {
475 #if defined(CONFIG_USER_ONLY)
476 /* in user mode, phys_ram_size is not meaningful */
477 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
478 #else
479 /* XXX: needs adjustments */
480 code_gen_buffer_size = (unsigned long)(ram_size / 4);
481 #endif
483 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
484 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
485 /* The code gen buffer location may have constraints depending on
486 the host cpu and OS */
487 #if defined(__linux__)
489 int flags;
490 void *start = NULL;
492 flags = MAP_PRIVATE | MAP_ANONYMOUS;
493 #if defined(__x86_64__)
494 flags |= MAP_32BIT;
495 /* Cannot map more than that */
496 if (code_gen_buffer_size > (800 * 1024 * 1024))
497 code_gen_buffer_size = (800 * 1024 * 1024);
498 #elif defined(__sparc_v9__)
499 // Map the buffer below 2G, so we can use direct calls and branches
500 flags |= MAP_FIXED;
501 start = (void *) 0x60000000UL;
502 if (code_gen_buffer_size > (512 * 1024 * 1024))
503 code_gen_buffer_size = (512 * 1024 * 1024);
504 #elif defined(__arm__)
505 /* Map the buffer below 32M, so we can use direct calls and branches */
506 flags |= MAP_FIXED;
507 start = (void *) 0x01000000UL;
508 if (code_gen_buffer_size > 16 * 1024 * 1024)
509 code_gen_buffer_size = 16 * 1024 * 1024;
510 #elif defined(__s390x__)
511 /* Map the buffer so that we can use direct calls and branches. */
512 /* We have a +- 4GB range on the branches; leave some slop. */
513 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
514 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
516 start = (void *)0x90000000UL;
517 #endif
518 code_gen_buffer = mmap(start, code_gen_buffer_size,
519 PROT_WRITE | PROT_READ | PROT_EXEC,
520 flags, -1, 0);
521 if (code_gen_buffer == MAP_FAILED) {
522 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
523 exit(1);
526 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
528 int flags;
529 void *addr = NULL;
530 flags = MAP_PRIVATE | MAP_ANONYMOUS;
531 #if defined(__x86_64__)
532 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
533 * 0x40000000 is free */
534 flags |= MAP_FIXED;
535 addr = (void *)0x40000000;
536 /* Cannot map more than that */
537 if (code_gen_buffer_size > (800 * 1024 * 1024))
538 code_gen_buffer_size = (800 * 1024 * 1024);
539 #endif
540 code_gen_buffer = mmap(addr, code_gen_buffer_size,
541 PROT_WRITE | PROT_READ | PROT_EXEC,
542 flags, -1, 0);
543 if (code_gen_buffer == MAP_FAILED) {
544 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
545 exit(1);
548 #else
549 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
550 map_exec(code_gen_buffer, code_gen_buffer_size);
551 #endif
552 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
553 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
554 code_gen_buffer_max_size = code_gen_buffer_size -
555 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
556 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
557 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
560 /* Must be called before using the QEMU cpus. 'tb_size' is the size
561 (in bytes) allocated to the translation buffer. Zero means default
562 size. */
563 void cpu_exec_init_all(unsigned long tb_size)
565 cpu_gen_init();
566 code_gen_alloc(tb_size);
567 code_gen_ptr = code_gen_buffer;
568 page_init();
569 #if !defined(CONFIG_USER_ONLY)
570 io_mem_init();
571 #endif
572 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
573 /* There's no guest base to take into account, so go ahead and
574 initialize the prologue now. */
575 tcg_prologue_init(&tcg_ctx);
576 #endif
579 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
581 static int cpu_common_post_load(void *opaque, int version_id)
583 CPUState *env = opaque;
585 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
586 version_id is increased. */
587 env->interrupt_request &= ~0x01;
588 tlb_flush(env, 1);
590 return 0;
593 static const VMStateDescription vmstate_cpu_common = {
594 .name = "cpu_common",
595 .version_id = 1,
596 .minimum_version_id = 1,
597 .minimum_version_id_old = 1,
598 .post_load = cpu_common_post_load,
599 .fields = (VMStateField []) {
600 VMSTATE_UINT32(halted, CPUState),
601 VMSTATE_UINT32(interrupt_request, CPUState),
602 VMSTATE_END_OF_LIST()
605 #endif
607 CPUState *qemu_get_cpu(int cpu)
609 CPUState *env = first_cpu;
611 while (env) {
612 if (env->cpu_index == cpu)
613 break;
614 env = env->next_cpu;
617 return env;
620 void cpu_exec_init(CPUState *env)
622 CPUState **penv;
623 int cpu_index;
625 #if defined(CONFIG_USER_ONLY)
626 cpu_list_lock();
627 #endif
628 env->next_cpu = NULL;
629 penv = &first_cpu;
630 cpu_index = 0;
631 while (*penv != NULL) {
632 penv = &(*penv)->next_cpu;
633 cpu_index++;
635 env->cpu_index = cpu_index;
636 env->numa_node = 0;
637 QTAILQ_INIT(&env->breakpoints);
638 QTAILQ_INIT(&env->watchpoints);
639 *penv = env;
640 #if defined(CONFIG_USER_ONLY)
641 cpu_list_unlock();
642 #endif
643 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
644 vmstate_register(cpu_index, &vmstate_cpu_common, env);
645 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
646 cpu_save, cpu_load, env);
647 #endif
650 static inline void invalidate_page_bitmap(PageDesc *p)
652 if (p->code_bitmap) {
653 qemu_free(p->code_bitmap);
654 p->code_bitmap = NULL;
656 p->code_write_count = 0;
659 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
661 static void page_flush_tb_1 (int level, void **lp)
663 int i;
665 if (*lp == NULL) {
666 return;
668 if (level == 0) {
669 PageDesc *pd = *lp;
670 for (i = 0; i < L2_SIZE; ++i) {
671 pd[i].first_tb = NULL;
672 invalidate_page_bitmap(pd + i);
674 } else {
675 void **pp = *lp;
676 for (i = 0; i < L2_SIZE; ++i) {
677 page_flush_tb_1 (level - 1, pp + i);
682 static void page_flush_tb(void)
684 int i;
685 for (i = 0; i < V_L1_SIZE; i++) {
686 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
690 /* flush all the translation blocks */
691 /* XXX: tb_flush is currently not thread safe */
692 void tb_flush(CPUState *env1)
694 CPUState *env;
695 #if defined(DEBUG_FLUSH)
696 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
697 (unsigned long)(code_gen_ptr - code_gen_buffer),
698 nb_tbs, nb_tbs > 0 ?
699 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
700 #endif
701 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
702 cpu_abort(env1, "Internal error: code buffer overflow\n");
704 nb_tbs = 0;
706 for(env = first_cpu; env != NULL; env = env->next_cpu) {
707 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
710 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
711 page_flush_tb();
713 code_gen_ptr = code_gen_buffer;
714 /* XXX: flush processor icache at this point if cache flush is
715 expensive */
716 tb_flush_count++;
719 #ifdef DEBUG_TB_CHECK
721 static void tb_invalidate_check(target_ulong address)
723 TranslationBlock *tb;
724 int i;
725 address &= TARGET_PAGE_MASK;
726 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
727 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
728 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
729 address >= tb->pc + tb->size)) {
730 printf("ERROR invalidate: address=" TARGET_FMT_lx
731 " PC=%08lx size=%04x\n",
732 address, (long)tb->pc, tb->size);
738 /* verify that all the pages have correct rights for code */
739 static void tb_page_check(void)
741 TranslationBlock *tb;
742 int i, flags1, flags2;
744 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
745 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
746 flags1 = page_get_flags(tb->pc);
747 flags2 = page_get_flags(tb->pc + tb->size - 1);
748 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
749 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
750 (long)tb->pc, tb->size, flags1, flags2);
756 #endif
758 /* invalidate one TB */
759 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
760 int next_offset)
762 TranslationBlock *tb1;
763 for(;;) {
764 tb1 = *ptb;
765 if (tb1 == tb) {
766 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
767 break;
769 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
773 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
775 TranslationBlock *tb1;
776 unsigned int n1;
778 for(;;) {
779 tb1 = *ptb;
780 n1 = (long)tb1 & 3;
781 tb1 = (TranslationBlock *)((long)tb1 & ~3);
782 if (tb1 == tb) {
783 *ptb = tb1->page_next[n1];
784 break;
786 ptb = &tb1->page_next[n1];
790 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
792 TranslationBlock *tb1, **ptb;
793 unsigned int n1;
795 ptb = &tb->jmp_next[n];
796 tb1 = *ptb;
797 if (tb1) {
798 /* find tb(n) in circular list */
799 for(;;) {
800 tb1 = *ptb;
801 n1 = (long)tb1 & 3;
802 tb1 = (TranslationBlock *)((long)tb1 & ~3);
803 if (n1 == n && tb1 == tb)
804 break;
805 if (n1 == 2) {
806 ptb = &tb1->jmp_first;
807 } else {
808 ptb = &tb1->jmp_next[n1];
811 /* now we can suppress tb(n) from the list */
812 *ptb = tb->jmp_next[n];
814 tb->jmp_next[n] = NULL;
818 /* reset the jump entry 'n' of a TB so that it is not chained to
819 another TB */
820 static inline void tb_reset_jump(TranslationBlock *tb, int n)
822 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
825 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
827 CPUState *env;
828 PageDesc *p;
829 unsigned int h, n1;
830 tb_page_addr_t phys_pc;
831 TranslationBlock *tb1, *tb2;
833 /* remove the TB from the hash list */
834 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
835 h = tb_phys_hash_func(phys_pc);
836 tb_remove(&tb_phys_hash[h], tb,
837 offsetof(TranslationBlock, phys_hash_next));
839 /* remove the TB from the page list */
840 if (tb->page_addr[0] != page_addr) {
841 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
842 tb_page_remove(&p->first_tb, tb);
843 invalidate_page_bitmap(p);
845 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
846 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
847 tb_page_remove(&p->first_tb, tb);
848 invalidate_page_bitmap(p);
851 tb_invalidated_flag = 1;
853 /* remove the TB from the hash list */
854 h = tb_jmp_cache_hash_func(tb->pc);
855 for(env = first_cpu; env != NULL; env = env->next_cpu) {
856 if (env->tb_jmp_cache[h] == tb)
857 env->tb_jmp_cache[h] = NULL;
860 /* suppress this TB from the two jump lists */
861 tb_jmp_remove(tb, 0);
862 tb_jmp_remove(tb, 1);
864 /* suppress any remaining jumps to this TB */
865 tb1 = tb->jmp_first;
866 for(;;) {
867 n1 = (long)tb1 & 3;
868 if (n1 == 2)
869 break;
870 tb1 = (TranslationBlock *)((long)tb1 & ~3);
871 tb2 = tb1->jmp_next[n1];
872 tb_reset_jump(tb1, n1);
873 tb1->jmp_next[n1] = NULL;
874 tb1 = tb2;
876 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
878 tb_phys_invalidate_count++;
881 static inline void set_bits(uint8_t *tab, int start, int len)
883 int end, mask, end1;
885 end = start + len;
886 tab += start >> 3;
887 mask = 0xff << (start & 7);
888 if ((start & ~7) == (end & ~7)) {
889 if (start < end) {
890 mask &= ~(0xff << (end & 7));
891 *tab |= mask;
893 } else {
894 *tab++ |= mask;
895 start = (start + 8) & ~7;
896 end1 = end & ~7;
897 while (start < end1) {
898 *tab++ = 0xff;
899 start += 8;
901 if (start < end) {
902 mask = ~(0xff << (end & 7));
903 *tab |= mask;
908 static void build_page_bitmap(PageDesc *p)
910 int n, tb_start, tb_end;
911 TranslationBlock *tb;
913 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
915 tb = p->first_tb;
916 while (tb != NULL) {
917 n = (long)tb & 3;
918 tb = (TranslationBlock *)((long)tb & ~3);
919 /* NOTE: this is subtle as a TB may span two physical pages */
920 if (n == 0) {
921 /* NOTE: tb_end may be after the end of the page, but
922 it is not a problem */
923 tb_start = tb->pc & ~TARGET_PAGE_MASK;
924 tb_end = tb_start + tb->size;
925 if (tb_end > TARGET_PAGE_SIZE)
926 tb_end = TARGET_PAGE_SIZE;
927 } else {
928 tb_start = 0;
929 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
931 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
932 tb = tb->page_next[n];
936 TranslationBlock *tb_gen_code(CPUState *env,
937 target_ulong pc, target_ulong cs_base,
938 int flags, int cflags)
940 TranslationBlock *tb;
941 uint8_t *tc_ptr;
942 tb_page_addr_t phys_pc, phys_page2;
943 target_ulong virt_page2;
944 int code_gen_size;
946 phys_pc = get_page_addr_code(env, pc);
947 tb = tb_alloc(pc);
948 if (!tb) {
949 /* flush must be done */
950 tb_flush(env);
951 /* cannot fail at this point */
952 tb = tb_alloc(pc);
953 /* Don't forget to invalidate previous TB info. */
954 tb_invalidated_flag = 1;
956 tc_ptr = code_gen_ptr;
957 tb->tc_ptr = tc_ptr;
958 tb->cs_base = cs_base;
959 tb->flags = flags;
960 tb->cflags = cflags;
961 cpu_gen_code(env, tb, &code_gen_size);
962 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
964 /* check next page if needed */
965 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
966 phys_page2 = -1;
967 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
968 phys_page2 = get_page_addr_code(env, virt_page2);
970 tb_link_page(tb, phys_pc, phys_page2);
971 return tb;
974 /* invalidate all TBs which intersect with the target physical page
975 starting in range [start;end[. NOTE: start and end must refer to
976 the same physical page. 'is_cpu_write_access' should be true if called
977 from a real cpu write access: the virtual CPU will exit the current
978 TB if code is modified inside this TB. */
979 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
980 int is_cpu_write_access)
982 TranslationBlock *tb, *tb_next, *saved_tb;
983 CPUState *env = cpu_single_env;
984 tb_page_addr_t tb_start, tb_end;
985 PageDesc *p;
986 int n;
987 #ifdef TARGET_HAS_PRECISE_SMC
988 int current_tb_not_found = is_cpu_write_access;
989 TranslationBlock *current_tb = NULL;
990 int current_tb_modified = 0;
991 target_ulong current_pc = 0;
992 target_ulong current_cs_base = 0;
993 int current_flags = 0;
994 #endif /* TARGET_HAS_PRECISE_SMC */
996 p = page_find(start >> TARGET_PAGE_BITS);
997 if (!p)
998 return;
999 if (!p->code_bitmap &&
1000 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1001 is_cpu_write_access) {
1002 /* build code bitmap */
1003 build_page_bitmap(p);
1006 /* we remove all the TBs in the range [start, end[ */
1007 /* XXX: see if in some cases it could be faster to invalidate all the code */
1008 tb = p->first_tb;
1009 while (tb != NULL) {
1010 n = (long)tb & 3;
1011 tb = (TranslationBlock *)((long)tb & ~3);
1012 tb_next = tb->page_next[n];
1013 /* NOTE: this is subtle as a TB may span two physical pages */
1014 if (n == 0) {
1015 /* NOTE: tb_end may be after the end of the page, but
1016 it is not a problem */
1017 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1018 tb_end = tb_start + tb->size;
1019 } else {
1020 tb_start = tb->page_addr[1];
1021 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1023 if (!(tb_end <= start || tb_start >= end)) {
1024 #ifdef TARGET_HAS_PRECISE_SMC
1025 if (current_tb_not_found) {
1026 current_tb_not_found = 0;
1027 current_tb = NULL;
1028 if (env->mem_io_pc) {
1029 /* now we have a real cpu fault */
1030 current_tb = tb_find_pc(env->mem_io_pc);
1033 if (current_tb == tb &&
1034 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1035 /* If we are modifying the current TB, we must stop
1036 its execution. We could be more precise by checking
1037 that the modification is after the current PC, but it
1038 would require a specialized function to partially
1039 restore the CPU state */
1041 current_tb_modified = 1;
1042 cpu_restore_state(current_tb, env,
1043 env->mem_io_pc, NULL);
1044 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1045 &current_flags);
1047 #endif /* TARGET_HAS_PRECISE_SMC */
1048 /* we need to do that to handle the case where a signal
1049 occurs while doing tb_phys_invalidate() */
1050 saved_tb = NULL;
1051 if (env) {
1052 saved_tb = env->current_tb;
1053 env->current_tb = NULL;
1055 tb_phys_invalidate(tb, -1);
1056 if (env) {
1057 env->current_tb = saved_tb;
1058 if (env->interrupt_request && env->current_tb)
1059 cpu_interrupt(env, env->interrupt_request);
1062 tb = tb_next;
1064 #if !defined(CONFIG_USER_ONLY)
1065 /* if no code remaining, no need to continue to use slow writes */
1066 if (!p->first_tb) {
1067 invalidate_page_bitmap(p);
1068 if (is_cpu_write_access) {
1069 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1072 #endif
1073 #ifdef TARGET_HAS_PRECISE_SMC
1074 if (current_tb_modified) {
1075 /* we generate a block containing just the instruction
1076 modifying the memory. It will ensure that it cannot modify
1077 itself */
1078 env->current_tb = NULL;
1079 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1080 cpu_resume_from_signal(env, NULL);
1082 #endif
1085 /* len must be <= 8 and start must be a multiple of len */
1086 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1088 PageDesc *p;
1089 int offset, b;
1090 #if 0
1091 if (1) {
1092 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1093 cpu_single_env->mem_io_vaddr, len,
1094 cpu_single_env->eip,
1095 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1097 #endif
1098 p = page_find(start >> TARGET_PAGE_BITS);
1099 if (!p)
1100 return;
1101 if (p->code_bitmap) {
1102 offset = start & ~TARGET_PAGE_MASK;
1103 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1104 if (b & ((1 << len) - 1))
1105 goto do_invalidate;
1106 } else {
1107 do_invalidate:
1108 tb_invalidate_phys_page_range(start, start + len, 1);
1112 #if !defined(CONFIG_SOFTMMU)
1113 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1114 unsigned long pc, void *puc)
1116 TranslationBlock *tb;
1117 PageDesc *p;
1118 int n;
1119 #ifdef TARGET_HAS_PRECISE_SMC
1120 TranslationBlock *current_tb = NULL;
1121 CPUState *env = cpu_single_env;
1122 int current_tb_modified = 0;
1123 target_ulong current_pc = 0;
1124 target_ulong current_cs_base = 0;
1125 int current_flags = 0;
1126 #endif
1128 addr &= TARGET_PAGE_MASK;
1129 p = page_find(addr >> TARGET_PAGE_BITS);
1130 if (!p)
1131 return;
1132 tb = p->first_tb;
1133 #ifdef TARGET_HAS_PRECISE_SMC
1134 if (tb && pc != 0) {
1135 current_tb = tb_find_pc(pc);
1137 #endif
1138 while (tb != NULL) {
1139 n = (long)tb & 3;
1140 tb = (TranslationBlock *)((long)tb & ~3);
1141 #ifdef TARGET_HAS_PRECISE_SMC
1142 if (current_tb == tb &&
1143 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1144 /* If we are modifying the current TB, we must stop
1145 its execution. We could be more precise by checking
1146 that the modification is after the current PC, but it
1147 would require a specialized function to partially
1148 restore the CPU state */
1150 current_tb_modified = 1;
1151 cpu_restore_state(current_tb, env, pc, puc);
1152 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1153 &current_flags);
1155 #endif /* TARGET_HAS_PRECISE_SMC */
1156 tb_phys_invalidate(tb, addr);
1157 tb = tb->page_next[n];
1159 p->first_tb = NULL;
1160 #ifdef TARGET_HAS_PRECISE_SMC
1161 if (current_tb_modified) {
1162 /* we generate a block containing just the instruction
1163 modifying the memory. It will ensure that it cannot modify
1164 itself */
1165 env->current_tb = NULL;
1166 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1167 cpu_resume_from_signal(env, puc);
1169 #endif
1171 #endif
1173 /* add the tb in the target page and protect it if necessary */
1174 static inline void tb_alloc_page(TranslationBlock *tb,
1175 unsigned int n, tb_page_addr_t page_addr)
1177 PageDesc *p;
1178 TranslationBlock *last_first_tb;
1180 tb->page_addr[n] = page_addr;
1181 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1182 tb->page_next[n] = p->first_tb;
1183 last_first_tb = p->first_tb;
1184 p->first_tb = (TranslationBlock *)((long)tb | n);
1185 invalidate_page_bitmap(p);
1187 #if defined(TARGET_HAS_SMC) || 1
1189 #if defined(CONFIG_USER_ONLY)
1190 if (p->flags & PAGE_WRITE) {
1191 target_ulong addr;
1192 PageDesc *p2;
1193 int prot;
1195 /* force the host page as non writable (writes will have a
1196 page fault + mprotect overhead) */
1197 page_addr &= qemu_host_page_mask;
1198 prot = 0;
1199 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1200 addr += TARGET_PAGE_SIZE) {
1202 p2 = page_find (addr >> TARGET_PAGE_BITS);
1203 if (!p2)
1204 continue;
1205 prot |= p2->flags;
1206 p2->flags &= ~PAGE_WRITE;
1208 mprotect(g2h(page_addr), qemu_host_page_size,
1209 (prot & PAGE_BITS) & ~PAGE_WRITE);
1210 #ifdef DEBUG_TB_INVALIDATE
1211 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1212 page_addr);
1213 #endif
1215 #else
1216 /* if some code is already present, then the pages are already
1217 protected. So we handle the case where only the first TB is
1218 allocated in a physical page */
1219 if (!last_first_tb) {
1220 tlb_protect_code(page_addr);
1222 #endif
1224 #endif /* TARGET_HAS_SMC */
1227 /* Allocate a new translation block. Flush the translation buffer if
1228 too many translation blocks or too much generated code. */
1229 TranslationBlock *tb_alloc(target_ulong pc)
1231 TranslationBlock *tb;
1233 if (nb_tbs >= code_gen_max_blocks ||
1234 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1235 return NULL;
1236 tb = &tbs[nb_tbs++];
1237 tb->pc = pc;
1238 tb->cflags = 0;
1239 return tb;
1242 void tb_free(TranslationBlock *tb)
1244 /* In practice this is mostly used for single use temporary TB
1245 Ignore the hard cases and just back up if this TB happens to
1246 be the last one generated. */
1247 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1248 code_gen_ptr = tb->tc_ptr;
1249 nb_tbs--;
1253 /* add a new TB and link it to the physical page tables. phys_page2 is
1254 (-1) to indicate that only one page contains the TB. */
1255 void tb_link_page(TranslationBlock *tb,
1256 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1258 unsigned int h;
1259 TranslationBlock **ptb;
1261 /* Grab the mmap lock to stop another thread invalidating this TB
1262 before we are done. */
1263 mmap_lock();
1264 /* add in the physical hash table */
1265 h = tb_phys_hash_func(phys_pc);
1266 ptb = &tb_phys_hash[h];
1267 tb->phys_hash_next = *ptb;
1268 *ptb = tb;
1270 /* add in the page list */
1271 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1272 if (phys_page2 != -1)
1273 tb_alloc_page(tb, 1, phys_page2);
1274 else
1275 tb->page_addr[1] = -1;
1277 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1278 tb->jmp_next[0] = NULL;
1279 tb->jmp_next[1] = NULL;
1281 /* init original jump addresses */
1282 if (tb->tb_next_offset[0] != 0xffff)
1283 tb_reset_jump(tb, 0);
1284 if (tb->tb_next_offset[1] != 0xffff)
1285 tb_reset_jump(tb, 1);
1287 #ifdef DEBUG_TB_CHECK
1288 tb_page_check();
1289 #endif
1290 mmap_unlock();
1293 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1294 tb[1].tc_ptr. Return NULL if not found */
1295 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1297 int m_min, m_max, m;
1298 unsigned long v;
1299 TranslationBlock *tb;
1301 if (nb_tbs <= 0)
1302 return NULL;
1303 if (tc_ptr < (unsigned long)code_gen_buffer ||
1304 tc_ptr >= (unsigned long)code_gen_ptr)
1305 return NULL;
1306 /* binary search (cf Knuth) */
1307 m_min = 0;
1308 m_max = nb_tbs - 1;
1309 while (m_min <= m_max) {
1310 m = (m_min + m_max) >> 1;
1311 tb = &tbs[m];
1312 v = (unsigned long)tb->tc_ptr;
1313 if (v == tc_ptr)
1314 return tb;
1315 else if (tc_ptr < v) {
1316 m_max = m - 1;
1317 } else {
1318 m_min = m + 1;
1321 return &tbs[m_max];
1324 static void tb_reset_jump_recursive(TranslationBlock *tb);
1326 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1328 TranslationBlock *tb1, *tb_next, **ptb;
1329 unsigned int n1;
1331 tb1 = tb->jmp_next[n];
1332 if (tb1 != NULL) {
1333 /* find head of list */
1334 for(;;) {
1335 n1 = (long)tb1 & 3;
1336 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1337 if (n1 == 2)
1338 break;
1339 tb1 = tb1->jmp_next[n1];
1341 /* we are now sure now that tb jumps to tb1 */
1342 tb_next = tb1;
1344 /* remove tb from the jmp_first list */
1345 ptb = &tb_next->jmp_first;
1346 for(;;) {
1347 tb1 = *ptb;
1348 n1 = (long)tb1 & 3;
1349 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1350 if (n1 == n && tb1 == tb)
1351 break;
1352 ptb = &tb1->jmp_next[n1];
1354 *ptb = tb->jmp_next[n];
1355 tb->jmp_next[n] = NULL;
1357 /* suppress the jump to next tb in generated code */
1358 tb_reset_jump(tb, n);
1360 /* suppress jumps in the tb on which we could have jumped */
1361 tb_reset_jump_recursive(tb_next);
1365 static void tb_reset_jump_recursive(TranslationBlock *tb)
1367 tb_reset_jump_recursive2(tb, 0);
1368 tb_reset_jump_recursive2(tb, 1);
1371 #if defined(TARGET_HAS_ICE)
1372 #if defined(CONFIG_USER_ONLY)
1373 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1375 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1377 #else
1378 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1380 target_phys_addr_t addr;
1381 target_ulong pd;
1382 ram_addr_t ram_addr;
1383 PhysPageDesc *p;
1385 addr = cpu_get_phys_page_debug(env, pc);
1386 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1387 if (!p) {
1388 pd = IO_MEM_UNASSIGNED;
1389 } else {
1390 pd = p->phys_offset;
1392 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1393 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1395 #endif
1396 #endif /* TARGET_HAS_ICE */
1398 #if defined(CONFIG_USER_ONLY)
1399 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1404 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1405 int flags, CPUWatchpoint **watchpoint)
1407 return -ENOSYS;
1409 #else
1410 /* Add a watchpoint. */
1411 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1412 int flags, CPUWatchpoint **watchpoint)
1414 target_ulong len_mask = ~(len - 1);
1415 CPUWatchpoint *wp;
1417 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1418 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1419 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1420 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1421 return -EINVAL;
1423 wp = qemu_malloc(sizeof(*wp));
1425 wp->vaddr = addr;
1426 wp->len_mask = len_mask;
1427 wp->flags = flags;
1429 /* keep all GDB-injected watchpoints in front */
1430 if (flags & BP_GDB)
1431 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1432 else
1433 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1435 tlb_flush_page(env, addr);
1437 if (watchpoint)
1438 *watchpoint = wp;
1439 return 0;
1442 /* Remove a specific watchpoint. */
1443 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1444 int flags)
1446 target_ulong len_mask = ~(len - 1);
1447 CPUWatchpoint *wp;
1449 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1450 if (addr == wp->vaddr && len_mask == wp->len_mask
1451 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1452 cpu_watchpoint_remove_by_ref(env, wp);
1453 return 0;
1456 return -ENOENT;
1459 /* Remove a specific watchpoint by reference. */
1460 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1462 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1464 tlb_flush_page(env, watchpoint->vaddr);
1466 qemu_free(watchpoint);
1469 /* Remove all matching watchpoints. */
1470 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1472 CPUWatchpoint *wp, *next;
1474 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1475 if (wp->flags & mask)
1476 cpu_watchpoint_remove_by_ref(env, wp);
1479 #endif
1481 /* Add a breakpoint. */
1482 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1483 CPUBreakpoint **breakpoint)
1485 #if defined(TARGET_HAS_ICE)
1486 CPUBreakpoint *bp;
1488 bp = qemu_malloc(sizeof(*bp));
1490 bp->pc = pc;
1491 bp->flags = flags;
1493 /* keep all GDB-injected breakpoints in front */
1494 if (flags & BP_GDB)
1495 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1496 else
1497 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1499 breakpoint_invalidate(env, pc);
1501 if (breakpoint)
1502 *breakpoint = bp;
1503 return 0;
1504 #else
1505 return -ENOSYS;
1506 #endif
1509 /* Remove a specific breakpoint. */
1510 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1512 #if defined(TARGET_HAS_ICE)
1513 CPUBreakpoint *bp;
1515 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1516 if (bp->pc == pc && bp->flags == flags) {
1517 cpu_breakpoint_remove_by_ref(env, bp);
1518 return 0;
1521 return -ENOENT;
1522 #else
1523 return -ENOSYS;
1524 #endif
1527 /* Remove a specific breakpoint by reference. */
1528 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1530 #if defined(TARGET_HAS_ICE)
1531 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1533 breakpoint_invalidate(env, breakpoint->pc);
1535 qemu_free(breakpoint);
1536 #endif
1539 /* Remove all matching breakpoints. */
1540 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1542 #if defined(TARGET_HAS_ICE)
1543 CPUBreakpoint *bp, *next;
1545 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1546 if (bp->flags & mask)
1547 cpu_breakpoint_remove_by_ref(env, bp);
1549 #endif
1552 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1553 CPU loop after each instruction */
1554 void cpu_single_step(CPUState *env, int enabled)
1556 #if defined(TARGET_HAS_ICE)
1557 if (env->singlestep_enabled != enabled) {
1558 env->singlestep_enabled = enabled;
1559 if (kvm_enabled())
1560 kvm_update_guest_debug(env, 0);
1561 else {
1562 /* must flush all the translated code to avoid inconsistencies */
1563 /* XXX: only flush what is necessary */
1564 tb_flush(env);
1567 #endif
1570 /* enable or disable low levels log */
1571 void cpu_set_log(int log_flags)
1573 loglevel = log_flags;
1574 if (loglevel && !logfile) {
1575 logfile = fopen(logfilename, log_append ? "a" : "w");
1576 if (!logfile) {
1577 perror(logfilename);
1578 _exit(1);
1580 #if !defined(CONFIG_SOFTMMU)
1581 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1583 static char logfile_buf[4096];
1584 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1586 #elif !defined(_WIN32)
1587 /* Win32 doesn't support line-buffering and requires size >= 2 */
1588 setvbuf(logfile, NULL, _IOLBF, 0);
1589 #endif
1590 log_append = 1;
1592 if (!loglevel && logfile) {
1593 fclose(logfile);
1594 logfile = NULL;
1598 void cpu_set_log_filename(const char *filename)
1600 logfilename = strdup(filename);
1601 if (logfile) {
1602 fclose(logfile);
1603 logfile = NULL;
1605 cpu_set_log(loglevel);
1608 static void cpu_unlink_tb(CPUState *env)
1610 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1611 problem and hope the cpu will stop of its own accord. For userspace
1612 emulation this often isn't actually as bad as it sounds. Often
1613 signals are used primarily to interrupt blocking syscalls. */
1614 TranslationBlock *tb;
1615 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1617 spin_lock(&interrupt_lock);
1618 tb = env->current_tb;
1619 /* if the cpu is currently executing code, we must unlink it and
1620 all the potentially executing TB */
1621 if (tb) {
1622 env->current_tb = NULL;
1623 tb_reset_jump_recursive(tb);
1625 spin_unlock(&interrupt_lock);
1628 /* mask must never be zero, except for A20 change call */
1629 void cpu_interrupt(CPUState *env, int mask)
1631 int old_mask;
1633 old_mask = env->interrupt_request;
1634 env->interrupt_request |= mask;
1636 #ifndef CONFIG_USER_ONLY
1638 * If called from iothread context, wake the target cpu in
1639 * case its halted.
1641 if (!qemu_cpu_self(env)) {
1642 qemu_cpu_kick(env);
1643 return;
1645 #endif
1647 if (use_icount) {
1648 env->icount_decr.u16.high = 0xffff;
1649 #ifndef CONFIG_USER_ONLY
1650 if (!can_do_io(env)
1651 && (mask & ~old_mask) != 0) {
1652 cpu_abort(env, "Raised interrupt while not in I/O function");
1654 #endif
1655 } else {
1656 cpu_unlink_tb(env);
1660 void cpu_reset_interrupt(CPUState *env, int mask)
1662 env->interrupt_request &= ~mask;
1665 void cpu_exit(CPUState *env)
1667 env->exit_request = 1;
1668 cpu_unlink_tb(env);
1671 const CPULogItem cpu_log_items[] = {
1672 { CPU_LOG_TB_OUT_ASM, "out_asm",
1673 "show generated host assembly code for each compiled TB" },
1674 { CPU_LOG_TB_IN_ASM, "in_asm",
1675 "show target assembly code for each compiled TB" },
1676 { CPU_LOG_TB_OP, "op",
1677 "show micro ops for each compiled TB" },
1678 { CPU_LOG_TB_OP_OPT, "op_opt",
1679 "show micro ops "
1680 #ifdef TARGET_I386
1681 "before eflags optimization and "
1682 #endif
1683 "after liveness analysis" },
1684 { CPU_LOG_INT, "int",
1685 "show interrupts/exceptions in short format" },
1686 { CPU_LOG_EXEC, "exec",
1687 "show trace before each executed TB (lots of logs)" },
1688 { CPU_LOG_TB_CPU, "cpu",
1689 "show CPU state before block translation" },
1690 #ifdef TARGET_I386
1691 { CPU_LOG_PCALL, "pcall",
1692 "show protected mode far calls/returns/exceptions" },
1693 { CPU_LOG_RESET, "cpu_reset",
1694 "show CPU state before CPU resets" },
1695 #endif
1696 #ifdef DEBUG_IOPORT
1697 { CPU_LOG_IOPORT, "ioport",
1698 "show all i/o ports accesses" },
1699 #endif
1700 { 0, NULL, NULL },
1703 #ifndef CONFIG_USER_ONLY
1704 static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1705 = QLIST_HEAD_INITIALIZER(memory_client_list);
1707 static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1708 ram_addr_t size,
1709 ram_addr_t phys_offset)
1711 CPUPhysMemoryClient *client;
1712 QLIST_FOREACH(client, &memory_client_list, list) {
1713 client->set_memory(client, start_addr, size, phys_offset);
1717 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1718 target_phys_addr_t end)
1720 CPUPhysMemoryClient *client;
1721 QLIST_FOREACH(client, &memory_client_list, list) {
1722 int r = client->sync_dirty_bitmap(client, start, end);
1723 if (r < 0)
1724 return r;
1726 return 0;
1729 static int cpu_notify_migration_log(int enable)
1731 CPUPhysMemoryClient *client;
1732 QLIST_FOREACH(client, &memory_client_list, list) {
1733 int r = client->migration_log(client, enable);
1734 if (r < 0)
1735 return r;
1737 return 0;
1740 static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1741 int level, void **lp)
1743 int i;
1745 if (*lp == NULL) {
1746 return;
1748 if (level == 0) {
1749 PhysPageDesc *pd = *lp;
1750 for (i = 0; i < L2_SIZE; ++i) {
1751 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1752 client->set_memory(client, pd[i].region_offset,
1753 TARGET_PAGE_SIZE, pd[i].phys_offset);
1756 } else {
1757 void **pp = *lp;
1758 for (i = 0; i < L2_SIZE; ++i) {
1759 phys_page_for_each_1(client, level - 1, pp + i);
1764 static void phys_page_for_each(CPUPhysMemoryClient *client)
1766 int i;
1767 for (i = 0; i < P_L1_SIZE; ++i) {
1768 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1769 l1_phys_map + 1);
1773 void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1775 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1776 phys_page_for_each(client);
1779 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1781 QLIST_REMOVE(client, list);
1783 #endif
1785 static int cmp1(const char *s1, int n, const char *s2)
1787 if (strlen(s2) != n)
1788 return 0;
1789 return memcmp(s1, s2, n) == 0;
1792 /* takes a comma separated list of log masks. Return 0 if error. */
1793 int cpu_str_to_log_mask(const char *str)
1795 const CPULogItem *item;
1796 int mask;
1797 const char *p, *p1;
1799 p = str;
1800 mask = 0;
1801 for(;;) {
1802 p1 = strchr(p, ',');
1803 if (!p1)
1804 p1 = p + strlen(p);
1805 if(cmp1(p,p1-p,"all")) {
1806 for(item = cpu_log_items; item->mask != 0; item++) {
1807 mask |= item->mask;
1809 } else {
1810 for(item = cpu_log_items; item->mask != 0; item++) {
1811 if (cmp1(p, p1 - p, item->name))
1812 goto found;
1814 return 0;
1816 found:
1817 mask |= item->mask;
1818 if (*p1 != ',')
1819 break;
1820 p = p1 + 1;
1822 return mask;
1825 void cpu_abort(CPUState *env, const char *fmt, ...)
1827 va_list ap;
1828 va_list ap2;
1830 va_start(ap, fmt);
1831 va_copy(ap2, ap);
1832 fprintf(stderr, "qemu: fatal: ");
1833 vfprintf(stderr, fmt, ap);
1834 fprintf(stderr, "\n");
1835 #ifdef TARGET_I386
1836 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1837 #else
1838 cpu_dump_state(env, stderr, fprintf, 0);
1839 #endif
1840 if (qemu_log_enabled()) {
1841 qemu_log("qemu: fatal: ");
1842 qemu_log_vprintf(fmt, ap2);
1843 qemu_log("\n");
1844 #ifdef TARGET_I386
1845 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1846 #else
1847 log_cpu_state(env, 0);
1848 #endif
1849 qemu_log_flush();
1850 qemu_log_close();
1852 va_end(ap2);
1853 va_end(ap);
1854 #if defined(CONFIG_USER_ONLY)
1856 struct sigaction act;
1857 sigfillset(&act.sa_mask);
1858 act.sa_handler = SIG_DFL;
1859 sigaction(SIGABRT, &act, NULL);
1861 #endif
1862 abort();
1865 CPUState *cpu_copy(CPUState *env)
1867 CPUState *new_env = cpu_init(env->cpu_model_str);
1868 CPUState *next_cpu = new_env->next_cpu;
1869 int cpu_index = new_env->cpu_index;
1870 #if defined(TARGET_HAS_ICE)
1871 CPUBreakpoint *bp;
1872 CPUWatchpoint *wp;
1873 #endif
1875 memcpy(new_env, env, sizeof(CPUState));
1877 /* Preserve chaining and index. */
1878 new_env->next_cpu = next_cpu;
1879 new_env->cpu_index = cpu_index;
1881 /* Clone all break/watchpoints.
1882 Note: Once we support ptrace with hw-debug register access, make sure
1883 BP_CPU break/watchpoints are handled correctly on clone. */
1884 QTAILQ_INIT(&env->breakpoints);
1885 QTAILQ_INIT(&env->watchpoints);
1886 #if defined(TARGET_HAS_ICE)
1887 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1888 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1890 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1891 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1892 wp->flags, NULL);
1894 #endif
1896 return new_env;
1899 #if !defined(CONFIG_USER_ONLY)
1901 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1903 unsigned int i;
1905 /* Discard jump cache entries for any tb which might potentially
1906 overlap the flushed page. */
1907 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1908 memset (&env->tb_jmp_cache[i], 0,
1909 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1911 i = tb_jmp_cache_hash_page(addr);
1912 memset (&env->tb_jmp_cache[i], 0,
1913 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1916 static CPUTLBEntry s_cputlb_empty_entry = {
1917 .addr_read = -1,
1918 .addr_write = -1,
1919 .addr_code = -1,
1920 .addend = -1,
1923 /* NOTE: if flush_global is true, also flush global entries (not
1924 implemented yet) */
1925 void tlb_flush(CPUState *env, int flush_global)
1927 int i;
1929 #if defined(DEBUG_TLB)
1930 printf("tlb_flush:\n");
1931 #endif
1932 /* must reset current TB so that interrupts cannot modify the
1933 links while we are modifying them */
1934 env->current_tb = NULL;
1936 for(i = 0; i < CPU_TLB_SIZE; i++) {
1937 int mmu_idx;
1938 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1939 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1943 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1945 env->tlb_flush_addr = -1;
1946 env->tlb_flush_mask = 0;
1947 tlb_flush_count++;
1950 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1952 if (addr == (tlb_entry->addr_read &
1953 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1954 addr == (tlb_entry->addr_write &
1955 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1956 addr == (tlb_entry->addr_code &
1957 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1958 *tlb_entry = s_cputlb_empty_entry;
1962 void tlb_flush_page(CPUState *env, target_ulong addr)
1964 int i;
1965 int mmu_idx;
1967 #if defined(DEBUG_TLB)
1968 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1969 #endif
1970 /* Check if we need to flush due to large pages. */
1971 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1972 #if defined(DEBUG_TLB)
1973 printf("tlb_flush_page: forced full flush ("
1974 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1975 env->tlb_flush_addr, env->tlb_flush_mask);
1976 #endif
1977 tlb_flush(env, 1);
1978 return;
1980 /* must reset current TB so that interrupts cannot modify the
1981 links while we are modifying them */
1982 env->current_tb = NULL;
1984 addr &= TARGET_PAGE_MASK;
1985 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1986 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1987 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1989 tlb_flush_jmp_cache(env, addr);
1992 /* update the TLBs so that writes to code in the virtual page 'addr'
1993 can be detected */
1994 static void tlb_protect_code(ram_addr_t ram_addr)
1996 cpu_physical_memory_reset_dirty(ram_addr,
1997 ram_addr + TARGET_PAGE_SIZE,
1998 CODE_DIRTY_FLAG);
2001 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2002 tested for self modifying code */
2003 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2004 target_ulong vaddr)
2006 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2009 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2010 unsigned long start, unsigned long length)
2012 unsigned long addr;
2013 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2014 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2015 if ((addr - start) < length) {
2016 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2021 /* Note: start and end must be within the same ram block. */
2022 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2023 int dirty_flags)
2025 CPUState *env;
2026 unsigned long length, start1;
2027 int i;
2029 start &= TARGET_PAGE_MASK;
2030 end = TARGET_PAGE_ALIGN(end);
2032 length = end - start;
2033 if (length == 0)
2034 return;
2035 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2037 /* we modify the TLB cache so that the dirty bit will be set again
2038 when accessing the range */
2039 start1 = (unsigned long)qemu_get_ram_ptr(start);
2040 /* Chek that we don't span multiple blocks - this breaks the
2041 address comparisons below. */
2042 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2043 != (end - 1) - start) {
2044 abort();
2047 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2048 int mmu_idx;
2049 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2050 for(i = 0; i < CPU_TLB_SIZE; i++)
2051 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2052 start1, length);
2057 int cpu_physical_memory_set_dirty_tracking(int enable)
2059 int ret = 0;
2060 in_migration = enable;
2061 ret = cpu_notify_migration_log(!!enable);
2062 return ret;
2065 int cpu_physical_memory_get_dirty_tracking(void)
2067 return in_migration;
2070 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2071 target_phys_addr_t end_addr)
2073 int ret;
2075 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2076 return ret;
2079 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2081 ram_addr_t ram_addr;
2082 void *p;
2084 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2085 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2086 + tlb_entry->addend);
2087 ram_addr = qemu_ram_addr_from_host(p);
2088 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2089 tlb_entry->addr_write |= TLB_NOTDIRTY;
2094 /* update the TLB according to the current state of the dirty bits */
2095 void cpu_tlb_update_dirty(CPUState *env)
2097 int i;
2098 int mmu_idx;
2099 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2100 for(i = 0; i < CPU_TLB_SIZE; i++)
2101 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2105 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2107 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2108 tlb_entry->addr_write = vaddr;
2111 /* update the TLB corresponding to virtual page vaddr
2112 so that it is no longer dirty */
2113 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2115 int i;
2116 int mmu_idx;
2118 vaddr &= TARGET_PAGE_MASK;
2119 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2120 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2121 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2124 /* Our TLB does not support large pages, so remember the area covered by
2125 large pages and trigger a full TLB flush if these are invalidated. */
2126 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2127 target_ulong size)
2129 target_ulong mask = ~(size - 1);
2131 if (env->tlb_flush_addr == (target_ulong)-1) {
2132 env->tlb_flush_addr = vaddr & mask;
2133 env->tlb_flush_mask = mask;
2134 return;
2136 /* Extend the existing region to include the new page.
2137 This is a compromise between unnecessary flushes and the cost
2138 of maintaining a full variable size TLB. */
2139 mask &= env->tlb_flush_mask;
2140 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2141 mask <<= 1;
2143 env->tlb_flush_addr &= mask;
2144 env->tlb_flush_mask = mask;
2147 /* Add a new TLB entry. At most one entry for a given virtual address
2148 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2149 supplied size is only used by tlb_flush_page. */
2150 void tlb_set_page(CPUState *env, target_ulong vaddr,
2151 target_phys_addr_t paddr, int prot,
2152 int mmu_idx, target_ulong size)
2154 PhysPageDesc *p;
2155 unsigned long pd;
2156 unsigned int index;
2157 target_ulong address;
2158 target_ulong code_address;
2159 unsigned long addend;
2160 CPUTLBEntry *te;
2161 CPUWatchpoint *wp;
2162 target_phys_addr_t iotlb;
2164 assert(size >= TARGET_PAGE_SIZE);
2165 if (size != TARGET_PAGE_SIZE) {
2166 tlb_add_large_page(env, vaddr, size);
2168 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2169 if (!p) {
2170 pd = IO_MEM_UNASSIGNED;
2171 } else {
2172 pd = p->phys_offset;
2174 #if defined(DEBUG_TLB)
2175 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2176 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2177 #endif
2179 address = vaddr;
2180 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2181 /* IO memory case (romd handled later) */
2182 address |= TLB_MMIO;
2184 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2185 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2186 /* Normal RAM. */
2187 iotlb = pd & TARGET_PAGE_MASK;
2188 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2189 iotlb |= IO_MEM_NOTDIRTY;
2190 else
2191 iotlb |= IO_MEM_ROM;
2192 } else {
2193 /* IO handlers are currently passed a physical address.
2194 It would be nice to pass an offset from the base address
2195 of that region. This would avoid having to special case RAM,
2196 and avoid full address decoding in every device.
2197 We can't use the high bits of pd for this because
2198 IO_MEM_ROMD uses these as a ram address. */
2199 iotlb = (pd & ~TARGET_PAGE_MASK);
2200 if (p) {
2201 iotlb += p->region_offset;
2202 } else {
2203 iotlb += paddr;
2207 code_address = address;
2208 /* Make accesses to pages with watchpoints go via the
2209 watchpoint trap routines. */
2210 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2211 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2212 /* Avoid trapping reads of pages with a write breakpoint. */
2213 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2214 iotlb = io_mem_watch + paddr;
2215 address |= TLB_MMIO;
2216 break;
2221 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2222 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2223 te = &env->tlb_table[mmu_idx][index];
2224 te->addend = addend - vaddr;
2225 if (prot & PAGE_READ) {
2226 te->addr_read = address;
2227 } else {
2228 te->addr_read = -1;
2231 if (prot & PAGE_EXEC) {
2232 te->addr_code = code_address;
2233 } else {
2234 te->addr_code = -1;
2236 if (prot & PAGE_WRITE) {
2237 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2238 (pd & IO_MEM_ROMD)) {
2239 /* Write access calls the I/O callback. */
2240 te->addr_write = address | TLB_MMIO;
2241 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2242 !cpu_physical_memory_is_dirty(pd)) {
2243 te->addr_write = address | TLB_NOTDIRTY;
2244 } else {
2245 te->addr_write = address;
2247 } else {
2248 te->addr_write = -1;
2252 #else
2254 void tlb_flush(CPUState *env, int flush_global)
2258 void tlb_flush_page(CPUState *env, target_ulong addr)
2263 * Walks guest process memory "regions" one by one
2264 * and calls callback function 'fn' for each region.
2267 struct walk_memory_regions_data
2269 walk_memory_regions_fn fn;
2270 void *priv;
2271 unsigned long start;
2272 int prot;
2275 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2276 abi_ulong end, int new_prot)
2278 if (data->start != -1ul) {
2279 int rc = data->fn(data->priv, data->start, end, data->prot);
2280 if (rc != 0) {
2281 return rc;
2285 data->start = (new_prot ? end : -1ul);
2286 data->prot = new_prot;
2288 return 0;
2291 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2292 abi_ulong base, int level, void **lp)
2294 abi_ulong pa;
2295 int i, rc;
2297 if (*lp == NULL) {
2298 return walk_memory_regions_end(data, base, 0);
2301 if (level == 0) {
2302 PageDesc *pd = *lp;
2303 for (i = 0; i < L2_SIZE; ++i) {
2304 int prot = pd[i].flags;
2306 pa = base | (i << TARGET_PAGE_BITS);
2307 if (prot != data->prot) {
2308 rc = walk_memory_regions_end(data, pa, prot);
2309 if (rc != 0) {
2310 return rc;
2314 } else {
2315 void **pp = *lp;
2316 for (i = 0; i < L2_SIZE; ++i) {
2317 pa = base | ((abi_ulong)i <<
2318 (TARGET_PAGE_BITS + L2_BITS * level));
2319 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2320 if (rc != 0) {
2321 return rc;
2326 return 0;
2329 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2331 struct walk_memory_regions_data data;
2332 unsigned long i;
2334 data.fn = fn;
2335 data.priv = priv;
2336 data.start = -1ul;
2337 data.prot = 0;
2339 for (i = 0; i < V_L1_SIZE; i++) {
2340 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2341 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2342 if (rc != 0) {
2343 return rc;
2347 return walk_memory_regions_end(&data, 0, 0);
2350 static int dump_region(void *priv, abi_ulong start,
2351 abi_ulong end, unsigned long prot)
2353 FILE *f = (FILE *)priv;
2355 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2356 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2357 start, end, end - start,
2358 ((prot & PAGE_READ) ? 'r' : '-'),
2359 ((prot & PAGE_WRITE) ? 'w' : '-'),
2360 ((prot & PAGE_EXEC) ? 'x' : '-'));
2362 return (0);
2365 /* dump memory mappings */
2366 void page_dump(FILE *f)
2368 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2369 "start", "end", "size", "prot");
2370 walk_memory_regions(f, dump_region);
2373 int page_get_flags(target_ulong address)
2375 PageDesc *p;
2377 p = page_find(address >> TARGET_PAGE_BITS);
2378 if (!p)
2379 return 0;
2380 return p->flags;
2383 /* Modify the flags of a page and invalidate the code if necessary.
2384 The flag PAGE_WRITE_ORG is positioned automatically depending
2385 on PAGE_WRITE. The mmap_lock should already be held. */
2386 void page_set_flags(target_ulong start, target_ulong end, int flags)
2388 target_ulong addr, len;
2390 /* This function should never be called with addresses outside the
2391 guest address space. If this assert fires, it probably indicates
2392 a missing call to h2g_valid. */
2393 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2394 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2395 #endif
2396 assert(start < end);
2398 start = start & TARGET_PAGE_MASK;
2399 end = TARGET_PAGE_ALIGN(end);
2401 if (flags & PAGE_WRITE) {
2402 flags |= PAGE_WRITE_ORG;
2405 for (addr = start, len = end - start;
2406 len != 0;
2407 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2408 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2410 /* If the write protection bit is set, then we invalidate
2411 the code inside. */
2412 if (!(p->flags & PAGE_WRITE) &&
2413 (flags & PAGE_WRITE) &&
2414 p->first_tb) {
2415 tb_invalidate_phys_page(addr, 0, NULL);
2417 p->flags = flags;
2421 int page_check_range(target_ulong start, target_ulong len, int flags)
2423 PageDesc *p;
2424 target_ulong end;
2425 target_ulong addr;
2427 /* This function should never be called with addresses outside the
2428 guest address space. If this assert fires, it probably indicates
2429 a missing call to h2g_valid. */
2430 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2431 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2432 #endif
2434 if (len == 0) {
2435 return 0;
2437 if (start + len - 1 < start) {
2438 /* We've wrapped around. */
2439 return -1;
2442 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2443 start = start & TARGET_PAGE_MASK;
2445 for (addr = start, len = end - start;
2446 len != 0;
2447 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2448 p = page_find(addr >> TARGET_PAGE_BITS);
2449 if( !p )
2450 return -1;
2451 if( !(p->flags & PAGE_VALID) )
2452 return -1;
2454 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2455 return -1;
2456 if (flags & PAGE_WRITE) {
2457 if (!(p->flags & PAGE_WRITE_ORG))
2458 return -1;
2459 /* unprotect the page if it was put read-only because it
2460 contains translated code */
2461 if (!(p->flags & PAGE_WRITE)) {
2462 if (!page_unprotect(addr, 0, NULL))
2463 return -1;
2465 return 0;
2468 return 0;
2471 /* called from signal handler: invalidate the code and unprotect the
2472 page. Return TRUE if the fault was successfully handled. */
2473 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2475 unsigned int prot;
2476 PageDesc *p;
2477 target_ulong host_start, host_end, addr;
2479 /* Technically this isn't safe inside a signal handler. However we
2480 know this only ever happens in a synchronous SEGV handler, so in
2481 practice it seems to be ok. */
2482 mmap_lock();
2484 p = page_find(address >> TARGET_PAGE_BITS);
2485 if (!p) {
2486 mmap_unlock();
2487 return 0;
2490 /* if the page was really writable, then we change its
2491 protection back to writable */
2492 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2493 host_start = address & qemu_host_page_mask;
2494 host_end = host_start + qemu_host_page_size;
2496 prot = 0;
2497 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2498 p = page_find(addr >> TARGET_PAGE_BITS);
2499 p->flags |= PAGE_WRITE;
2500 prot |= p->flags;
2502 /* and since the content will be modified, we must invalidate
2503 the corresponding translated code. */
2504 tb_invalidate_phys_page(addr, pc, puc);
2505 #ifdef DEBUG_TB_CHECK
2506 tb_invalidate_check(addr);
2507 #endif
2509 mprotect((void *)g2h(host_start), qemu_host_page_size,
2510 prot & PAGE_BITS);
2512 mmap_unlock();
2513 return 1;
2515 mmap_unlock();
2516 return 0;
2519 static inline void tlb_set_dirty(CPUState *env,
2520 unsigned long addr, target_ulong vaddr)
2523 #endif /* defined(CONFIG_USER_ONLY) */
2525 #if !defined(CONFIG_USER_ONLY)
2527 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2528 typedef struct subpage_t {
2529 target_phys_addr_t base;
2530 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2531 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2532 } subpage_t;
2534 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2535 ram_addr_t memory, ram_addr_t region_offset);
2536 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2537 ram_addr_t orig_memory,
2538 ram_addr_t region_offset);
2539 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2540 need_subpage) \
2541 do { \
2542 if (addr > start_addr) \
2543 start_addr2 = 0; \
2544 else { \
2545 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2546 if (start_addr2 > 0) \
2547 need_subpage = 1; \
2550 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2551 end_addr2 = TARGET_PAGE_SIZE - 1; \
2552 else { \
2553 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2554 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2555 need_subpage = 1; \
2557 } while (0)
2559 /* register physical memory.
2560 For RAM, 'size' must be a multiple of the target page size.
2561 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2562 io memory page. The address used when calling the IO function is
2563 the offset from the start of the region, plus region_offset. Both
2564 start_addr and region_offset are rounded down to a page boundary
2565 before calculating this offset. This should not be a problem unless
2566 the low bits of start_addr and region_offset differ. */
2567 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2568 ram_addr_t size,
2569 ram_addr_t phys_offset,
2570 ram_addr_t region_offset)
2572 target_phys_addr_t addr, end_addr;
2573 PhysPageDesc *p;
2574 CPUState *env;
2575 ram_addr_t orig_size = size;
2576 subpage_t *subpage;
2578 cpu_notify_set_memory(start_addr, size, phys_offset);
2580 if (phys_offset == IO_MEM_UNASSIGNED) {
2581 region_offset = start_addr;
2583 region_offset &= TARGET_PAGE_MASK;
2584 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2585 end_addr = start_addr + (target_phys_addr_t)size;
2586 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2587 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2588 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2589 ram_addr_t orig_memory = p->phys_offset;
2590 target_phys_addr_t start_addr2, end_addr2;
2591 int need_subpage = 0;
2593 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2594 need_subpage);
2595 if (need_subpage) {
2596 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2597 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2598 &p->phys_offset, orig_memory,
2599 p->region_offset);
2600 } else {
2601 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2602 >> IO_MEM_SHIFT];
2604 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2605 region_offset);
2606 p->region_offset = 0;
2607 } else {
2608 p->phys_offset = phys_offset;
2609 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2610 (phys_offset & IO_MEM_ROMD))
2611 phys_offset += TARGET_PAGE_SIZE;
2613 } else {
2614 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2615 p->phys_offset = phys_offset;
2616 p->region_offset = region_offset;
2617 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2618 (phys_offset & IO_MEM_ROMD)) {
2619 phys_offset += TARGET_PAGE_SIZE;
2620 } else {
2621 target_phys_addr_t start_addr2, end_addr2;
2622 int need_subpage = 0;
2624 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2625 end_addr2, need_subpage);
2627 if (need_subpage) {
2628 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2629 &p->phys_offset, IO_MEM_UNASSIGNED,
2630 addr & TARGET_PAGE_MASK);
2631 subpage_register(subpage, start_addr2, end_addr2,
2632 phys_offset, region_offset);
2633 p->region_offset = 0;
2637 region_offset += TARGET_PAGE_SIZE;
2640 /* since each CPU stores ram addresses in its TLB cache, we must
2641 reset the modified entries */
2642 /* XXX: slow ! */
2643 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2644 tlb_flush(env, 1);
2648 /* XXX: temporary until new memory mapping API */
2649 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2651 PhysPageDesc *p;
2653 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2654 if (!p)
2655 return IO_MEM_UNASSIGNED;
2656 return p->phys_offset;
2659 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2661 if (kvm_enabled())
2662 kvm_coalesce_mmio_region(addr, size);
2665 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2667 if (kvm_enabled())
2668 kvm_uncoalesce_mmio_region(addr, size);
2671 void qemu_flush_coalesced_mmio_buffer(void)
2673 if (kvm_enabled())
2674 kvm_flush_coalesced_mmio_buffer();
2677 #if defined(__linux__) && !defined(TARGET_S390X)
2679 #include <sys/vfs.h>
2681 #define HUGETLBFS_MAGIC 0x958458f6
2683 static long gethugepagesize(const char *path)
2685 struct statfs fs;
2686 int ret;
2688 do {
2689 ret = statfs(path, &fs);
2690 } while (ret != 0 && errno == EINTR);
2692 if (ret != 0) {
2693 perror(path);
2694 return 0;
2697 if (fs.f_type != HUGETLBFS_MAGIC)
2698 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2700 return fs.f_bsize;
2703 static void *file_ram_alloc(ram_addr_t memory, const char *path)
2705 char *filename;
2706 void *area;
2707 int fd;
2708 #ifdef MAP_POPULATE
2709 int flags;
2710 #endif
2711 unsigned long hpagesize;
2713 hpagesize = gethugepagesize(path);
2714 if (!hpagesize) {
2715 return NULL;
2718 if (memory < hpagesize) {
2719 return NULL;
2722 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2723 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2724 return NULL;
2727 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2728 return NULL;
2731 fd = mkstemp(filename);
2732 if (fd < 0) {
2733 perror("unable to create backing store for hugepages");
2734 free(filename);
2735 return NULL;
2737 unlink(filename);
2738 free(filename);
2740 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2743 * ftruncate is not supported by hugetlbfs in older
2744 * hosts, so don't bother bailing out on errors.
2745 * If anything goes wrong with it under other filesystems,
2746 * mmap will fail.
2748 if (ftruncate(fd, memory))
2749 perror("ftruncate");
2751 #ifdef MAP_POPULATE
2752 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2753 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2754 * to sidestep this quirk.
2756 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2757 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2758 #else
2759 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2760 #endif
2761 if (area == MAP_FAILED) {
2762 perror("file_ram_alloc: can't mmap RAM pages");
2763 close(fd);
2764 return (NULL);
2766 return area;
2768 #endif
2770 static ram_addr_t find_ram_offset(ram_addr_t size)
2772 RAMBlock *block;
2773 ram_addr_t last = 0;
2775 QLIST_FOREACH(block, &ram_list.blocks, next)
2776 last = MAX(last, block->offset + block->length);
2778 return last;
2781 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2783 RAMBlock *new_block;
2785 size = TARGET_PAGE_ALIGN(size);
2786 new_block = qemu_malloc(sizeof(*new_block));
2788 if (mem_path) {
2789 #if defined (__linux__) && !defined(TARGET_S390X)
2790 new_block->host = file_ram_alloc(size, mem_path);
2791 if (!new_block->host) {
2792 new_block->host = qemu_vmalloc(size);
2793 #ifdef MADV_MERGEABLE
2794 madvise(new_block->host, size, MADV_MERGEABLE);
2795 #endif
2797 #else
2798 fprintf(stderr, "-mem-path option unsupported\n");
2799 exit(1);
2800 #endif
2801 } else {
2802 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2803 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2804 new_block->host = mmap((void*)0x1000000, size,
2805 PROT_EXEC|PROT_READ|PROT_WRITE,
2806 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2807 #else
2808 new_block->host = qemu_vmalloc(size);
2809 #endif
2810 #ifdef MADV_MERGEABLE
2811 madvise(new_block->host, size, MADV_MERGEABLE);
2812 #endif
2814 new_block->offset = find_ram_offset(size);
2815 new_block->length = size;
2817 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2819 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2820 (new_block->offset + size) >> TARGET_PAGE_BITS);
2821 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2822 0xff, size >> TARGET_PAGE_BITS);
2824 if (kvm_enabled())
2825 kvm_setup_guest_memory(new_block->host, size);
2827 return new_block->offset;
2830 void qemu_ram_free(ram_addr_t addr)
2832 /* TODO: implement this. */
2835 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2836 With the exception of the softmmu code in this file, this should
2837 only be used for local memory (e.g. video ram) that the device owns,
2838 and knows it isn't going to access beyond the end of the block.
2840 It should not be used for general purpose DMA.
2841 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2843 void *qemu_get_ram_ptr(ram_addr_t addr)
2845 RAMBlock *block;
2847 QLIST_FOREACH(block, &ram_list.blocks, next) {
2848 if (addr - block->offset < block->length) {
2849 QLIST_REMOVE(block, next);
2850 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2851 return block->host + (addr - block->offset);
2855 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2856 abort();
2858 return NULL;
2861 /* Some of the softmmu routines need to translate from a host pointer
2862 (typically a TLB entry) back to a ram offset. */
2863 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2865 RAMBlock *block;
2866 uint8_t *host = ptr;
2868 QLIST_FOREACH(block, &ram_list.blocks, next) {
2869 if (host - block->host < block->length) {
2870 return block->offset + (host - block->host);
2874 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2875 abort();
2877 return 0;
2880 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2882 #ifdef DEBUG_UNASSIGNED
2883 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2884 #endif
2885 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2886 do_unassigned_access(addr, 0, 0, 0, 1);
2887 #endif
2888 return 0;
2891 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2893 #ifdef DEBUG_UNASSIGNED
2894 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2895 #endif
2896 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2897 do_unassigned_access(addr, 0, 0, 0, 2);
2898 #endif
2899 return 0;
2902 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2904 #ifdef DEBUG_UNASSIGNED
2905 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2906 #endif
2907 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2908 do_unassigned_access(addr, 0, 0, 0, 4);
2909 #endif
2910 return 0;
2913 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2915 #ifdef DEBUG_UNASSIGNED
2916 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2917 #endif
2918 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2919 do_unassigned_access(addr, 1, 0, 0, 1);
2920 #endif
2923 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2925 #ifdef DEBUG_UNASSIGNED
2926 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2927 #endif
2928 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2929 do_unassigned_access(addr, 1, 0, 0, 2);
2930 #endif
2933 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2935 #ifdef DEBUG_UNASSIGNED
2936 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2937 #endif
2938 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2939 do_unassigned_access(addr, 1, 0, 0, 4);
2940 #endif
2943 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2944 unassigned_mem_readb,
2945 unassigned_mem_readw,
2946 unassigned_mem_readl,
2949 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2950 unassigned_mem_writeb,
2951 unassigned_mem_writew,
2952 unassigned_mem_writel,
2955 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2956 uint32_t val)
2958 int dirty_flags;
2959 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2960 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2961 #if !defined(CONFIG_USER_ONLY)
2962 tb_invalidate_phys_page_fast(ram_addr, 1);
2963 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2964 #endif
2966 stb_p(qemu_get_ram_ptr(ram_addr), val);
2967 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2968 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2969 /* we remove the notdirty callback only if the code has been
2970 flushed */
2971 if (dirty_flags == 0xff)
2972 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2975 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2976 uint32_t val)
2978 int dirty_flags;
2979 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2980 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2981 #if !defined(CONFIG_USER_ONLY)
2982 tb_invalidate_phys_page_fast(ram_addr, 2);
2983 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2984 #endif
2986 stw_p(qemu_get_ram_ptr(ram_addr), val);
2987 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2988 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2989 /* we remove the notdirty callback only if the code has been
2990 flushed */
2991 if (dirty_flags == 0xff)
2992 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2995 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2996 uint32_t val)
2998 int dirty_flags;
2999 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3000 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3001 #if !defined(CONFIG_USER_ONLY)
3002 tb_invalidate_phys_page_fast(ram_addr, 4);
3003 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3004 #endif
3006 stl_p(qemu_get_ram_ptr(ram_addr), val);
3007 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3008 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3009 /* we remove the notdirty callback only if the code has been
3010 flushed */
3011 if (dirty_flags == 0xff)
3012 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3015 static CPUReadMemoryFunc * const error_mem_read[3] = {
3016 NULL, /* never used */
3017 NULL, /* never used */
3018 NULL, /* never used */
3021 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3022 notdirty_mem_writeb,
3023 notdirty_mem_writew,
3024 notdirty_mem_writel,
3027 /* Generate a debug exception if a watchpoint has been hit. */
3028 static void check_watchpoint(int offset, int len_mask, int flags)
3030 CPUState *env = cpu_single_env;
3031 target_ulong pc, cs_base;
3032 TranslationBlock *tb;
3033 target_ulong vaddr;
3034 CPUWatchpoint *wp;
3035 int cpu_flags;
3037 if (env->watchpoint_hit) {
3038 /* We re-entered the check after replacing the TB. Now raise
3039 * the debug interrupt so that is will trigger after the
3040 * current instruction. */
3041 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3042 return;
3044 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3045 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3046 if ((vaddr == (wp->vaddr & len_mask) ||
3047 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3048 wp->flags |= BP_WATCHPOINT_HIT;
3049 if (!env->watchpoint_hit) {
3050 env->watchpoint_hit = wp;
3051 tb = tb_find_pc(env->mem_io_pc);
3052 if (!tb) {
3053 cpu_abort(env, "check_watchpoint: could not find TB for "
3054 "pc=%p", (void *)env->mem_io_pc);
3056 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3057 tb_phys_invalidate(tb, -1);
3058 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3059 env->exception_index = EXCP_DEBUG;
3060 } else {
3061 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3062 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3064 cpu_resume_from_signal(env, NULL);
3066 } else {
3067 wp->flags &= ~BP_WATCHPOINT_HIT;
3072 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3073 so these check for a hit then pass through to the normal out-of-line
3074 phys routines. */
3075 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3077 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3078 return ldub_phys(addr);
3081 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3083 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3084 return lduw_phys(addr);
3087 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3089 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3090 return ldl_phys(addr);
3093 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3094 uint32_t val)
3096 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3097 stb_phys(addr, val);
3100 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3101 uint32_t val)
3103 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3104 stw_phys(addr, val);
3107 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3108 uint32_t val)
3110 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3111 stl_phys(addr, val);
3114 static CPUReadMemoryFunc * const watch_mem_read[3] = {
3115 watch_mem_readb,
3116 watch_mem_readw,
3117 watch_mem_readl,
3120 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3121 watch_mem_writeb,
3122 watch_mem_writew,
3123 watch_mem_writel,
3126 static inline uint32_t subpage_readlen (subpage_t *mmio,
3127 target_phys_addr_t addr,
3128 unsigned int len)
3130 unsigned int idx = SUBPAGE_IDX(addr);
3131 #if defined(DEBUG_SUBPAGE)
3132 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3133 mmio, len, addr, idx);
3134 #endif
3136 addr += mmio->region_offset[idx];
3137 idx = mmio->sub_io_index[idx];
3138 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3141 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3142 uint32_t value, unsigned int len)
3144 unsigned int idx = SUBPAGE_IDX(addr);
3145 #if defined(DEBUG_SUBPAGE)
3146 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3147 __func__, mmio, len, addr, idx, value);
3148 #endif
3150 addr += mmio->region_offset[idx];
3151 idx = mmio->sub_io_index[idx];
3152 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3155 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3157 return subpage_readlen(opaque, addr, 0);
3160 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3161 uint32_t value)
3163 subpage_writelen(opaque, addr, value, 0);
3166 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3168 return subpage_readlen(opaque, addr, 1);
3171 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3172 uint32_t value)
3174 subpage_writelen(opaque, addr, value, 1);
3177 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3179 return subpage_readlen(opaque, addr, 2);
3182 static void subpage_writel (void *opaque, target_phys_addr_t addr,
3183 uint32_t value)
3185 subpage_writelen(opaque, addr, value, 2);
3188 static CPUReadMemoryFunc * const subpage_read[] = {
3189 &subpage_readb,
3190 &subpage_readw,
3191 &subpage_readl,
3194 static CPUWriteMemoryFunc * const subpage_write[] = {
3195 &subpage_writeb,
3196 &subpage_writew,
3197 &subpage_writel,
3200 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3201 ram_addr_t memory, ram_addr_t region_offset)
3203 int idx, eidx;
3205 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3206 return -1;
3207 idx = SUBPAGE_IDX(start);
3208 eidx = SUBPAGE_IDX(end);
3209 #if defined(DEBUG_SUBPAGE)
3210 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3211 mmio, start, end, idx, eidx, memory);
3212 #endif
3213 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3214 for (; idx <= eidx; idx++) {
3215 mmio->sub_io_index[idx] = memory;
3216 mmio->region_offset[idx] = region_offset;
3219 return 0;
3222 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3223 ram_addr_t orig_memory,
3224 ram_addr_t region_offset)
3226 subpage_t *mmio;
3227 int subpage_memory;
3229 mmio = qemu_mallocz(sizeof(subpage_t));
3231 mmio->base = base;
3232 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3233 #if defined(DEBUG_SUBPAGE)
3234 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3235 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3236 #endif
3237 *phys = subpage_memory | IO_MEM_SUBPAGE;
3238 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3240 return mmio;
3243 static int get_free_io_mem_idx(void)
3245 int i;
3247 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3248 if (!io_mem_used[i]) {
3249 io_mem_used[i] = 1;
3250 return i;
3252 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3253 return -1;
3256 /* mem_read and mem_write are arrays of functions containing the
3257 function to access byte (index 0), word (index 1) and dword (index
3258 2). Functions can be omitted with a NULL function pointer.
3259 If io_index is non zero, the corresponding io zone is
3260 modified. If it is zero, a new io zone is allocated. The return
3261 value can be used with cpu_register_physical_memory(). (-1) is
3262 returned if error. */
3263 static int cpu_register_io_memory_fixed(int io_index,
3264 CPUReadMemoryFunc * const *mem_read,
3265 CPUWriteMemoryFunc * const *mem_write,
3266 void *opaque)
3268 int i;
3270 if (io_index <= 0) {
3271 io_index = get_free_io_mem_idx();
3272 if (io_index == -1)
3273 return io_index;
3274 } else {
3275 io_index >>= IO_MEM_SHIFT;
3276 if (io_index >= IO_MEM_NB_ENTRIES)
3277 return -1;
3280 for (i = 0; i < 3; ++i) {
3281 io_mem_read[io_index][i]
3282 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3284 for (i = 0; i < 3; ++i) {
3285 io_mem_write[io_index][i]
3286 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3288 io_mem_opaque[io_index] = opaque;
3290 return (io_index << IO_MEM_SHIFT);
3293 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3294 CPUWriteMemoryFunc * const *mem_write,
3295 void *opaque)
3297 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3300 void cpu_unregister_io_memory(int io_table_address)
3302 int i;
3303 int io_index = io_table_address >> IO_MEM_SHIFT;
3305 for (i=0;i < 3; i++) {
3306 io_mem_read[io_index][i] = unassigned_mem_read[i];
3307 io_mem_write[io_index][i] = unassigned_mem_write[i];
3309 io_mem_opaque[io_index] = NULL;
3310 io_mem_used[io_index] = 0;
3313 static void io_mem_init(void)
3315 int i;
3317 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3318 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3319 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3320 for (i=0; i<5; i++)
3321 io_mem_used[i] = 1;
3323 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3324 watch_mem_write, NULL);
3327 #endif /* !defined(CONFIG_USER_ONLY) */
3329 /* physical memory access (slow version, mainly for debug) */
3330 #if defined(CONFIG_USER_ONLY)
3331 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3332 uint8_t *buf, int len, int is_write)
3334 int l, flags;
3335 target_ulong page;
3336 void * p;
3338 while (len > 0) {
3339 page = addr & TARGET_PAGE_MASK;
3340 l = (page + TARGET_PAGE_SIZE) - addr;
3341 if (l > len)
3342 l = len;
3343 flags = page_get_flags(page);
3344 if (!(flags & PAGE_VALID))
3345 return -1;
3346 if (is_write) {
3347 if (!(flags & PAGE_WRITE))
3348 return -1;
3349 /* XXX: this code should not depend on lock_user */
3350 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3351 return -1;
3352 memcpy(p, buf, l);
3353 unlock_user(p, addr, l);
3354 } else {
3355 if (!(flags & PAGE_READ))
3356 return -1;
3357 /* XXX: this code should not depend on lock_user */
3358 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3359 return -1;
3360 memcpy(buf, p, l);
3361 unlock_user(p, addr, 0);
3363 len -= l;
3364 buf += l;
3365 addr += l;
3367 return 0;
3370 #else
3371 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3372 int len, int is_write)
3374 int l, io_index;
3375 uint8_t *ptr;
3376 uint32_t val;
3377 target_phys_addr_t page;
3378 unsigned long pd;
3379 PhysPageDesc *p;
3381 while (len > 0) {
3382 page = addr & TARGET_PAGE_MASK;
3383 l = (page + TARGET_PAGE_SIZE) - addr;
3384 if (l > len)
3385 l = len;
3386 p = phys_page_find(page >> TARGET_PAGE_BITS);
3387 if (!p) {
3388 pd = IO_MEM_UNASSIGNED;
3389 } else {
3390 pd = p->phys_offset;
3393 if (is_write) {
3394 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3395 target_phys_addr_t addr1 = addr;
3396 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3397 if (p)
3398 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3399 /* XXX: could force cpu_single_env to NULL to avoid
3400 potential bugs */
3401 if (l >= 4 && ((addr1 & 3) == 0)) {
3402 /* 32 bit write access */
3403 val = ldl_p(buf);
3404 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3405 l = 4;
3406 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3407 /* 16 bit write access */
3408 val = lduw_p(buf);
3409 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3410 l = 2;
3411 } else {
3412 /* 8 bit write access */
3413 val = ldub_p(buf);
3414 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3415 l = 1;
3417 } else {
3418 unsigned long addr1;
3419 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3420 /* RAM case */
3421 ptr = qemu_get_ram_ptr(addr1);
3422 memcpy(ptr, buf, l);
3423 if (!cpu_physical_memory_is_dirty(addr1)) {
3424 /* invalidate code */
3425 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3426 /* set dirty bit */
3427 cpu_physical_memory_set_dirty_flags(
3428 addr1, (0xff & ~CODE_DIRTY_FLAG));
3431 } else {
3432 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3433 !(pd & IO_MEM_ROMD)) {
3434 target_phys_addr_t addr1 = addr;
3435 /* I/O case */
3436 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3437 if (p)
3438 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3439 if (l >= 4 && ((addr1 & 3) == 0)) {
3440 /* 32 bit read access */
3441 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3442 stl_p(buf, val);
3443 l = 4;
3444 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3445 /* 16 bit read access */
3446 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3447 stw_p(buf, val);
3448 l = 2;
3449 } else {
3450 /* 8 bit read access */
3451 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3452 stb_p(buf, val);
3453 l = 1;
3455 } else {
3456 /* RAM case */
3457 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3458 (addr & ~TARGET_PAGE_MASK);
3459 memcpy(buf, ptr, l);
3462 len -= l;
3463 buf += l;
3464 addr += l;
3468 /* used for ROM loading : can write in RAM and ROM */
3469 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3470 const uint8_t *buf, int len)
3472 int l;
3473 uint8_t *ptr;
3474 target_phys_addr_t page;
3475 unsigned long pd;
3476 PhysPageDesc *p;
3478 while (len > 0) {
3479 page = addr & TARGET_PAGE_MASK;
3480 l = (page + TARGET_PAGE_SIZE) - addr;
3481 if (l > len)
3482 l = len;
3483 p = phys_page_find(page >> TARGET_PAGE_BITS);
3484 if (!p) {
3485 pd = IO_MEM_UNASSIGNED;
3486 } else {
3487 pd = p->phys_offset;
3490 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3491 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3492 !(pd & IO_MEM_ROMD)) {
3493 /* do nothing */
3494 } else {
3495 unsigned long addr1;
3496 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3497 /* ROM/RAM case */
3498 ptr = qemu_get_ram_ptr(addr1);
3499 memcpy(ptr, buf, l);
3501 len -= l;
3502 buf += l;
3503 addr += l;
3507 typedef struct {
3508 void *buffer;
3509 target_phys_addr_t addr;
3510 target_phys_addr_t len;
3511 } BounceBuffer;
3513 static BounceBuffer bounce;
3515 typedef struct MapClient {
3516 void *opaque;
3517 void (*callback)(void *opaque);
3518 QLIST_ENTRY(MapClient) link;
3519 } MapClient;
3521 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3522 = QLIST_HEAD_INITIALIZER(map_client_list);
3524 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3526 MapClient *client = qemu_malloc(sizeof(*client));
3528 client->opaque = opaque;
3529 client->callback = callback;
3530 QLIST_INSERT_HEAD(&map_client_list, client, link);
3531 return client;
3534 void cpu_unregister_map_client(void *_client)
3536 MapClient *client = (MapClient *)_client;
3538 QLIST_REMOVE(client, link);
3539 qemu_free(client);
3542 static void cpu_notify_map_clients(void)
3544 MapClient *client;
3546 while (!QLIST_EMPTY(&map_client_list)) {
3547 client = QLIST_FIRST(&map_client_list);
3548 client->callback(client->opaque);
3549 cpu_unregister_map_client(client);
3553 /* Map a physical memory region into a host virtual address.
3554 * May map a subset of the requested range, given by and returned in *plen.
3555 * May return NULL if resources needed to perform the mapping are exhausted.
3556 * Use only for reads OR writes - not for read-modify-write operations.
3557 * Use cpu_register_map_client() to know when retrying the map operation is
3558 * likely to succeed.
3560 void *cpu_physical_memory_map(target_phys_addr_t addr,
3561 target_phys_addr_t *plen,
3562 int is_write)
3564 target_phys_addr_t len = *plen;
3565 target_phys_addr_t done = 0;
3566 int l;
3567 uint8_t *ret = NULL;
3568 uint8_t *ptr;
3569 target_phys_addr_t page;
3570 unsigned long pd;
3571 PhysPageDesc *p;
3572 unsigned long addr1;
3574 while (len > 0) {
3575 page = addr & TARGET_PAGE_MASK;
3576 l = (page + TARGET_PAGE_SIZE) - addr;
3577 if (l > len)
3578 l = len;
3579 p = phys_page_find(page >> TARGET_PAGE_BITS);
3580 if (!p) {
3581 pd = IO_MEM_UNASSIGNED;
3582 } else {
3583 pd = p->phys_offset;
3586 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3587 if (done || bounce.buffer) {
3588 break;
3590 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3591 bounce.addr = addr;
3592 bounce.len = l;
3593 if (!is_write) {
3594 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3596 ptr = bounce.buffer;
3597 } else {
3598 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3599 ptr = qemu_get_ram_ptr(addr1);
3601 if (!done) {
3602 ret = ptr;
3603 } else if (ret + done != ptr) {
3604 break;
3607 len -= l;
3608 addr += l;
3609 done += l;
3611 *plen = done;
3612 return ret;
3615 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3616 * Will also mark the memory as dirty if is_write == 1. access_len gives
3617 * the amount of memory that was actually read or written by the caller.
3619 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3620 int is_write, target_phys_addr_t access_len)
3622 if (buffer != bounce.buffer) {
3623 if (is_write) {
3624 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3625 while (access_len) {
3626 unsigned l;
3627 l = TARGET_PAGE_SIZE;
3628 if (l > access_len)
3629 l = access_len;
3630 if (!cpu_physical_memory_is_dirty(addr1)) {
3631 /* invalidate code */
3632 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3633 /* set dirty bit */
3634 cpu_physical_memory_set_dirty_flags(
3635 addr1, (0xff & ~CODE_DIRTY_FLAG));
3637 addr1 += l;
3638 access_len -= l;
3641 return;
3643 if (is_write) {
3644 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3646 qemu_vfree(bounce.buffer);
3647 bounce.buffer = NULL;
3648 cpu_notify_map_clients();
3651 /* warning: addr must be aligned */
3652 uint32_t ldl_phys(target_phys_addr_t addr)
3654 int io_index;
3655 uint8_t *ptr;
3656 uint32_t val;
3657 unsigned long pd;
3658 PhysPageDesc *p;
3660 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3661 if (!p) {
3662 pd = IO_MEM_UNASSIGNED;
3663 } else {
3664 pd = p->phys_offset;
3667 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3668 !(pd & IO_MEM_ROMD)) {
3669 /* I/O case */
3670 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3671 if (p)
3672 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3673 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3674 } else {
3675 /* RAM case */
3676 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3677 (addr & ~TARGET_PAGE_MASK);
3678 val = ldl_p(ptr);
3680 return val;
3683 /* warning: addr must be aligned */
3684 uint64_t ldq_phys(target_phys_addr_t addr)
3686 int io_index;
3687 uint8_t *ptr;
3688 uint64_t val;
3689 unsigned long pd;
3690 PhysPageDesc *p;
3692 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3693 if (!p) {
3694 pd = IO_MEM_UNASSIGNED;
3695 } else {
3696 pd = p->phys_offset;
3699 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3700 !(pd & IO_MEM_ROMD)) {
3701 /* I/O case */
3702 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3703 if (p)
3704 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3705 #ifdef TARGET_WORDS_BIGENDIAN
3706 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3707 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3708 #else
3709 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3710 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3711 #endif
3712 } else {
3713 /* RAM case */
3714 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3715 (addr & ~TARGET_PAGE_MASK);
3716 val = ldq_p(ptr);
3718 return val;
3721 /* XXX: optimize */
3722 uint32_t ldub_phys(target_phys_addr_t addr)
3724 uint8_t val;
3725 cpu_physical_memory_read(addr, &val, 1);
3726 return val;
3729 /* warning: addr must be aligned */
3730 uint32_t lduw_phys(target_phys_addr_t addr)
3732 int io_index;
3733 uint8_t *ptr;
3734 uint64_t val;
3735 unsigned long pd;
3736 PhysPageDesc *p;
3738 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3739 if (!p) {
3740 pd = IO_MEM_UNASSIGNED;
3741 } else {
3742 pd = p->phys_offset;
3745 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3746 !(pd & IO_MEM_ROMD)) {
3747 /* I/O case */
3748 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3749 if (p)
3750 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3751 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3752 } else {
3753 /* RAM case */
3754 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3755 (addr & ~TARGET_PAGE_MASK);
3756 val = lduw_p(ptr);
3758 return val;
3761 /* warning: addr must be aligned. The ram page is not masked as dirty
3762 and the code inside is not invalidated. It is useful if the dirty
3763 bits are used to track modified PTEs */
3764 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3766 int io_index;
3767 uint8_t *ptr;
3768 unsigned long pd;
3769 PhysPageDesc *p;
3771 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3772 if (!p) {
3773 pd = IO_MEM_UNASSIGNED;
3774 } else {
3775 pd = p->phys_offset;
3778 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3779 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3780 if (p)
3781 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3782 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3783 } else {
3784 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3785 ptr = qemu_get_ram_ptr(addr1);
3786 stl_p(ptr, val);
3788 if (unlikely(in_migration)) {
3789 if (!cpu_physical_memory_is_dirty(addr1)) {
3790 /* invalidate code */
3791 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3792 /* set dirty bit */
3793 cpu_physical_memory_set_dirty_flags(
3794 addr1, (0xff & ~CODE_DIRTY_FLAG));
3800 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3802 int io_index;
3803 uint8_t *ptr;
3804 unsigned long pd;
3805 PhysPageDesc *p;
3807 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3808 if (!p) {
3809 pd = IO_MEM_UNASSIGNED;
3810 } else {
3811 pd = p->phys_offset;
3814 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3815 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3816 if (p)
3817 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3818 #ifdef TARGET_WORDS_BIGENDIAN
3819 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3820 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3821 #else
3822 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3823 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3824 #endif
3825 } else {
3826 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3827 (addr & ~TARGET_PAGE_MASK);
3828 stq_p(ptr, val);
3832 /* warning: addr must be aligned */
3833 void stl_phys(target_phys_addr_t addr, uint32_t val)
3835 int io_index;
3836 uint8_t *ptr;
3837 unsigned long pd;
3838 PhysPageDesc *p;
3840 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3841 if (!p) {
3842 pd = IO_MEM_UNASSIGNED;
3843 } else {
3844 pd = p->phys_offset;
3847 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3848 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3849 if (p)
3850 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3851 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3852 } else {
3853 unsigned long addr1;
3854 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3855 /* RAM case */
3856 ptr = qemu_get_ram_ptr(addr1);
3857 stl_p(ptr, val);
3858 if (!cpu_physical_memory_is_dirty(addr1)) {
3859 /* invalidate code */
3860 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3861 /* set dirty bit */
3862 cpu_physical_memory_set_dirty_flags(addr1,
3863 (0xff & ~CODE_DIRTY_FLAG));
3868 /* XXX: optimize */
3869 void stb_phys(target_phys_addr_t addr, uint32_t val)
3871 uint8_t v = val;
3872 cpu_physical_memory_write(addr, &v, 1);
3875 /* warning: addr must be aligned */
3876 void stw_phys(target_phys_addr_t addr, uint32_t val)
3878 int io_index;
3879 uint8_t *ptr;
3880 unsigned long pd;
3881 PhysPageDesc *p;
3883 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3884 if (!p) {
3885 pd = IO_MEM_UNASSIGNED;
3886 } else {
3887 pd = p->phys_offset;
3890 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3891 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3892 if (p)
3893 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3894 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3895 } else {
3896 unsigned long addr1;
3897 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3898 /* RAM case */
3899 ptr = qemu_get_ram_ptr(addr1);
3900 stw_p(ptr, val);
3901 if (!cpu_physical_memory_is_dirty(addr1)) {
3902 /* invalidate code */
3903 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3904 /* set dirty bit */
3905 cpu_physical_memory_set_dirty_flags(addr1,
3906 (0xff & ~CODE_DIRTY_FLAG));
3911 /* XXX: optimize */
3912 void stq_phys(target_phys_addr_t addr, uint64_t val)
3914 val = tswap64(val);
3915 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3918 /* virtual memory access for debug (includes writing to ROM) */
3919 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3920 uint8_t *buf, int len, int is_write)
3922 int l;
3923 target_phys_addr_t phys_addr;
3924 target_ulong page;
3926 while (len > 0) {
3927 page = addr & TARGET_PAGE_MASK;
3928 phys_addr = cpu_get_phys_page_debug(env, page);
3929 /* if no physical page mapped, return an error */
3930 if (phys_addr == -1)
3931 return -1;
3932 l = (page + TARGET_PAGE_SIZE) - addr;
3933 if (l > len)
3934 l = len;
3935 phys_addr += (addr & ~TARGET_PAGE_MASK);
3936 if (is_write)
3937 cpu_physical_memory_write_rom(phys_addr, buf, l);
3938 else
3939 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3940 len -= l;
3941 buf += l;
3942 addr += l;
3944 return 0;
3946 #endif
3948 /* in deterministic execution mode, instructions doing device I/Os
3949 must be at the end of the TB */
3950 void cpu_io_recompile(CPUState *env, void *retaddr)
3952 TranslationBlock *tb;
3953 uint32_t n, cflags;
3954 target_ulong pc, cs_base;
3955 uint64_t flags;
3957 tb = tb_find_pc((unsigned long)retaddr);
3958 if (!tb) {
3959 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3960 retaddr);
3962 n = env->icount_decr.u16.low + tb->icount;
3963 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3964 /* Calculate how many instructions had been executed before the fault
3965 occurred. */
3966 n = n - env->icount_decr.u16.low;
3967 /* Generate a new TB ending on the I/O insn. */
3968 n++;
3969 /* On MIPS and SH, delay slot instructions can only be restarted if
3970 they were already the first instruction in the TB. If this is not
3971 the first instruction in a TB then re-execute the preceding
3972 branch. */
3973 #if defined(TARGET_MIPS)
3974 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3975 env->active_tc.PC -= 4;
3976 env->icount_decr.u16.low++;
3977 env->hflags &= ~MIPS_HFLAG_BMASK;
3979 #elif defined(TARGET_SH4)
3980 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3981 && n > 1) {
3982 env->pc -= 2;
3983 env->icount_decr.u16.low++;
3984 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3986 #endif
3987 /* This should never happen. */
3988 if (n > CF_COUNT_MASK)
3989 cpu_abort(env, "TB too big during recompile");
3991 cflags = n | CF_LAST_IO;
3992 pc = tb->pc;
3993 cs_base = tb->cs_base;
3994 flags = tb->flags;
3995 tb_phys_invalidate(tb, -1);
3996 /* FIXME: In theory this could raise an exception. In practice
3997 we have already translated the block once so it's probably ok. */
3998 tb_gen_code(env, pc, cs_base, flags, cflags);
3999 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4000 the first in the TB) then we end up generating a whole new TB and
4001 repeating the fault, which is horribly inefficient.
4002 Better would be to execute just this insn uncached, or generate a
4003 second new TB. */
4004 cpu_resume_from_signal(env, NULL);
4007 #if !defined(CONFIG_USER_ONLY)
4009 void dump_exec_info(FILE *f,
4010 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4012 int i, target_code_size, max_target_code_size;
4013 int direct_jmp_count, direct_jmp2_count, cross_page;
4014 TranslationBlock *tb;
4016 target_code_size = 0;
4017 max_target_code_size = 0;
4018 cross_page = 0;
4019 direct_jmp_count = 0;
4020 direct_jmp2_count = 0;
4021 for(i = 0; i < nb_tbs; i++) {
4022 tb = &tbs[i];
4023 target_code_size += tb->size;
4024 if (tb->size > max_target_code_size)
4025 max_target_code_size = tb->size;
4026 if (tb->page_addr[1] != -1)
4027 cross_page++;
4028 if (tb->tb_next_offset[0] != 0xffff) {
4029 direct_jmp_count++;
4030 if (tb->tb_next_offset[1] != 0xffff) {
4031 direct_jmp2_count++;
4035 /* XXX: avoid using doubles ? */
4036 cpu_fprintf(f, "Translation buffer state:\n");
4037 cpu_fprintf(f, "gen code size %ld/%ld\n",
4038 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4039 cpu_fprintf(f, "TB count %d/%d\n",
4040 nb_tbs, code_gen_max_blocks);
4041 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4042 nb_tbs ? target_code_size / nb_tbs : 0,
4043 max_target_code_size);
4044 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4045 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4046 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4047 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4048 cross_page,
4049 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4050 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4051 direct_jmp_count,
4052 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4053 direct_jmp2_count,
4054 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4055 cpu_fprintf(f, "\nStatistics:\n");
4056 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4057 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4058 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4059 tcg_dump_info(f, cpu_fprintf);
4062 #define MMUSUFFIX _cmmu
4063 #define GETPC() NULL
4064 #define env cpu_single_env
4065 #define SOFTMMU_CODE_ACCESS
4067 #define SHIFT 0
4068 #include "softmmu_template.h"
4070 #define SHIFT 1
4071 #include "softmmu_template.h"
4073 #define SHIFT 2
4074 #include "softmmu_template.h"
4076 #define SHIFT 3
4077 #include "softmmu_template.h"
4079 #undef env
4081 #endif