device-assignment: Byte-wise ROM read
[qemu-kvm/stefanha.git] / exec.c
blobb14bad93fc6e8476c9fd41402694b6ebc5b6ab46
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <unistd.h>
32 #include <inttypes.h>
34 #include "cpu.h"
35 #include "exec-all.h"
36 #include "qemu-common.h"
37 #include "cache-utils.h"
39 #if !defined(TARGET_IA64)
40 #include "tcg.h"
41 #endif
42 #include "qemu-kvm.h"
44 #include "hw/hw.h"
45 #include "hw/qdev.h"
46 #include "osdep.h"
47 #include "kvm.h"
48 #include "qemu-timer.h"
49 #if defined(CONFIG_USER_ONLY)
50 #include <qemu.h>
51 #include <signal.h>
52 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
53 #include <sys/param.h>
54 #if __FreeBSD_version >= 700104
55 #define HAVE_KINFO_GETVMMAP
56 #define sigqueue sigqueue_freebsd /* avoid redefinition */
57 #include <sys/time.h>
58 #include <sys/proc.h>
59 #include <machine/profile.h>
60 #define _KERNEL
61 #include <sys/user.h>
62 #undef _KERNEL
63 #undef sigqueue
64 #include <libutil.h>
65 #endif
66 #endif
67 #endif
69 //#define DEBUG_TB_INVALIDATE
70 //#define DEBUG_FLUSH
71 //#define DEBUG_TLB
72 //#define DEBUG_UNASSIGNED
74 /* make various TB consistency checks */
75 //#define DEBUG_TB_CHECK
76 //#define DEBUG_TLB_CHECK
78 //#define DEBUG_IOPORT
79 //#define DEBUG_SUBPAGE
81 #if !defined(CONFIG_USER_ONLY)
82 /* TB consistency checks only implemented for usermode emulation. */
83 #undef DEBUG_TB_CHECK
84 #endif
86 #define SMC_BITMAP_USE_THRESHOLD 10
88 static TranslationBlock *tbs;
89 static int code_gen_max_blocks;
90 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
91 static int nb_tbs;
92 /* any access to the tbs or the page table must use this lock */
93 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
95 #if defined(__arm__) || defined(__sparc_v9__)
96 /* The prologue must be reachable with a direct jump. ARM and Sparc64
97 have limited branch ranges (possibly also PPC) so place it in a
98 section close to code segment. */
99 #define code_gen_section \
100 __attribute__((__section__(".gen_code"))) \
101 __attribute__((aligned (32)))
102 #elif defined(_WIN32)
103 /* Maximum alignment for Win32 is 16. */
104 #define code_gen_section \
105 __attribute__((aligned (16)))
106 #else
107 #define code_gen_section \
108 __attribute__((aligned (32)))
109 #endif
111 uint8_t code_gen_prologue[1024] code_gen_section;
112 static uint8_t *code_gen_buffer;
113 static unsigned long code_gen_buffer_size;
114 /* threshold to flush the translated code buffer */
115 static unsigned long code_gen_buffer_max_size;
116 static uint8_t *code_gen_ptr;
118 #if !defined(CONFIG_USER_ONLY)
119 int phys_ram_fd;
120 static int in_migration;
122 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
123 #endif
125 CPUState *first_cpu;
126 /* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
128 CPUState *cpu_single_env;
129 /* 0 = Do not count executed instructions.
130 1 = Precise instruction counting.
131 2 = Adaptive rate instruction counting. */
132 int use_icount = 0;
133 /* Current instruction counter. While executing translated code this may
134 include some instructions that have not yet been executed. */
135 int64_t qemu_icount;
137 typedef struct PageDesc {
138 /* list of TBs intersecting this ram page */
139 TranslationBlock *first_tb;
140 /* in order to optimize self modifying code, we count the number
141 of lookups we do to a given page to use a bitmap */
142 unsigned int code_write_count;
143 uint8_t *code_bitmap;
144 #if defined(CONFIG_USER_ONLY)
145 unsigned long flags;
146 #endif
147 } PageDesc;
149 /* In system mode we want L1_MAP to be based on ram offsets,
150 while in user mode we want it to be based on virtual addresses. */
151 #if !defined(CONFIG_USER_ONLY)
152 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
153 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
154 #else
155 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
156 #endif
157 #else
158 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
159 #endif
161 /* Size of the L2 (and L3, etc) page tables. */
162 #define L2_BITS 10
163 #define L2_SIZE (1 << L2_BITS)
165 /* The bits remaining after N lower levels of page tables. */
166 #define P_L1_BITS_REM \
167 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168 #define V_L1_BITS_REM \
169 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
171 /* Size of the L1 page table. Avoid silly small sizes. */
172 #if P_L1_BITS_REM < 4
173 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
174 #else
175 #define P_L1_BITS P_L1_BITS_REM
176 #endif
178 #if V_L1_BITS_REM < 4
179 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
180 #else
181 #define V_L1_BITS V_L1_BITS_REM
182 #endif
184 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
185 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
187 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
188 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
190 unsigned long qemu_real_host_page_size;
191 unsigned long qemu_host_page_bits;
192 unsigned long qemu_host_page_size;
193 unsigned long qemu_host_page_mask;
195 /* This is a multi-level map on the virtual address space.
196 The bottom level has pointers to PageDesc. */
197 static void *l1_map[V_L1_SIZE];
199 #if !defined(CONFIG_USER_ONLY)
200 typedef struct PhysPageDesc {
201 /* offset in host memory of the page + io_index in the low bits */
202 ram_addr_t phys_offset;
203 ram_addr_t region_offset;
204 } PhysPageDesc;
206 /* This is a multi-level map on the physical address space.
207 The bottom level has pointers to PhysPageDesc. */
208 static void *l1_phys_map[P_L1_SIZE];
210 static void io_mem_init(void);
212 /* io memory support */
213 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
214 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
215 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
216 static char io_mem_used[IO_MEM_NB_ENTRIES];
217 static int io_mem_watch;
218 #endif
220 /* log support */
221 #ifdef WIN32
222 static const char *logfilename = "qemu.log";
223 #else
224 static const char *logfilename = "/tmp/qemu.log";
225 #endif
226 FILE *logfile;
227 int loglevel;
228 static int log_append = 0;
230 /* statistics */
231 #if !defined(CONFIG_USER_ONLY)
232 static int tlb_flush_count;
233 #endif
234 static int tb_flush_count;
235 static int tb_phys_invalidate_count;
237 #ifdef _WIN32
238 static void map_exec(void *addr, long size)
240 DWORD old_protect;
241 VirtualProtect(addr, size,
242 PAGE_EXECUTE_READWRITE, &old_protect);
245 #else
246 static void map_exec(void *addr, long size)
248 unsigned long start, end, page_size;
250 page_size = getpagesize();
251 start = (unsigned long)addr;
252 start &= ~(page_size - 1);
254 end = (unsigned long)addr + size;
255 end += page_size - 1;
256 end &= ~(page_size - 1);
258 mprotect((void *)start, end - start,
259 PROT_READ | PROT_WRITE | PROT_EXEC);
261 #endif
263 static void page_init(void)
265 /* NOTE: we can always suppose that qemu_host_page_size >=
266 TARGET_PAGE_SIZE */
267 #ifdef _WIN32
269 SYSTEM_INFO system_info;
271 GetSystemInfo(&system_info);
272 qemu_real_host_page_size = system_info.dwPageSize;
274 #else
275 qemu_real_host_page_size = getpagesize();
276 #endif
277 if (qemu_host_page_size == 0)
278 qemu_host_page_size = qemu_real_host_page_size;
279 if (qemu_host_page_size < TARGET_PAGE_SIZE)
280 qemu_host_page_size = TARGET_PAGE_SIZE;
281 qemu_host_page_bits = 0;
282 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
283 qemu_host_page_bits++;
284 qemu_host_page_mask = ~(qemu_host_page_size - 1);
286 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
288 #ifdef HAVE_KINFO_GETVMMAP
289 struct kinfo_vmentry *freep;
290 int i, cnt;
292 freep = kinfo_getvmmap(getpid(), &cnt);
293 if (freep) {
294 mmap_lock();
295 for (i = 0; i < cnt; i++) {
296 unsigned long startaddr, endaddr;
298 startaddr = freep[i].kve_start;
299 endaddr = freep[i].kve_end;
300 if (h2g_valid(startaddr)) {
301 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
303 if (h2g_valid(endaddr)) {
304 endaddr = h2g(endaddr);
305 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
306 } else {
307 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
308 endaddr = ~0ul;
309 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
310 #endif
314 free(freep);
315 mmap_unlock();
317 #else
318 FILE *f;
320 last_brk = (unsigned long)sbrk(0);
322 f = fopen("/compat/linux/proc/self/maps", "r");
323 if (f) {
324 mmap_lock();
326 do {
327 unsigned long startaddr, endaddr;
328 int n;
330 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
332 if (n == 2 && h2g_valid(startaddr)) {
333 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
335 if (h2g_valid(endaddr)) {
336 endaddr = h2g(endaddr);
337 } else {
338 endaddr = ~0ul;
340 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
342 } while (!feof(f));
344 fclose(f);
345 mmap_unlock();
347 #endif
349 #endif
352 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
354 PageDesc *pd;
355 void **lp;
356 int i;
358 #if defined(CONFIG_USER_ONLY)
359 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
360 # define ALLOC(P, SIZE) \
361 do { \
362 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
363 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
364 } while (0)
365 #else
366 # define ALLOC(P, SIZE) \
367 do { P = qemu_mallocz(SIZE); } while (0)
368 #endif
370 /* Level 1. Always allocated. */
371 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
373 /* Level 2..N-1. */
374 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
375 void **p = *lp;
377 if (p == NULL) {
378 if (!alloc) {
379 return NULL;
381 ALLOC(p, sizeof(void *) * L2_SIZE);
382 *lp = p;
385 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
388 pd = *lp;
389 if (pd == NULL) {
390 if (!alloc) {
391 return NULL;
393 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
394 *lp = pd;
397 #undef ALLOC
399 return pd + (index & (L2_SIZE - 1));
402 static inline PageDesc *page_find(tb_page_addr_t index)
404 return page_find_alloc(index, 0);
407 #if !defined(CONFIG_USER_ONLY)
408 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
410 PhysPageDesc *pd;
411 void **lp;
412 int i;
414 /* Level 1. Always allocated. */
415 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
417 /* Level 2..N-1. */
418 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
419 void **p = *lp;
420 if (p == NULL) {
421 if (!alloc) {
422 return NULL;
424 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
426 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
429 pd = *lp;
430 if (pd == NULL) {
431 int i;
433 if (!alloc) {
434 return NULL;
437 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
439 for (i = 0; i < L2_SIZE; i++) {
440 pd[i].phys_offset = IO_MEM_UNASSIGNED;
441 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
445 return pd + (index & (L2_SIZE - 1));
448 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
450 return phys_page_find_alloc(index, 0);
453 static void tlb_protect_code(ram_addr_t ram_addr);
454 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
455 target_ulong vaddr);
456 #define mmap_lock() do { } while(0)
457 #define mmap_unlock() do { } while(0)
458 #endif
460 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
462 #if defined(CONFIG_USER_ONLY)
463 /* Currently it is not recommended to allocate big chunks of data in
464 user mode. It will change when a dedicated libc will be used */
465 #define USE_STATIC_CODE_GEN_BUFFER
466 #endif
468 #ifdef USE_STATIC_CODE_GEN_BUFFER
469 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
470 __attribute__((aligned (CODE_GEN_ALIGN)));
471 #endif
473 static void code_gen_alloc(unsigned long tb_size)
475 if (kvm_enabled())
476 return;
478 #ifdef USE_STATIC_CODE_GEN_BUFFER
479 code_gen_buffer = static_code_gen_buffer;
480 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482 #else
483 code_gen_buffer_size = tb_size;
484 if (code_gen_buffer_size == 0) {
485 #if defined(CONFIG_USER_ONLY)
486 /* in user mode, phys_ram_size is not meaningful */
487 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
488 #else
489 /* XXX: needs adjustments */
490 code_gen_buffer_size = (unsigned long)(ram_size / 4);
491 #endif
493 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
494 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
495 /* The code gen buffer location may have constraints depending on
496 the host cpu and OS */
497 #if defined(__linux__)
499 int flags;
500 void *start = NULL;
502 flags = MAP_PRIVATE | MAP_ANONYMOUS;
503 #if defined(__x86_64__)
504 flags |= MAP_32BIT;
505 /* Cannot map more than that */
506 if (code_gen_buffer_size > (800 * 1024 * 1024))
507 code_gen_buffer_size = (800 * 1024 * 1024);
508 #elif defined(__sparc_v9__)
509 // Map the buffer below 2G, so we can use direct calls and branches
510 flags |= MAP_FIXED;
511 start = (void *) 0x60000000UL;
512 if (code_gen_buffer_size > (512 * 1024 * 1024))
513 code_gen_buffer_size = (512 * 1024 * 1024);
514 #elif defined(__arm__)
515 /* Map the buffer below 32M, so we can use direct calls and branches */
516 flags |= MAP_FIXED;
517 start = (void *) 0x01000000UL;
518 if (code_gen_buffer_size > 16 * 1024 * 1024)
519 code_gen_buffer_size = 16 * 1024 * 1024;
520 #elif defined(__s390x__)
521 /* Map the buffer so that we can use direct calls and branches. */
522 /* We have a +- 4GB range on the branches; leave some slop. */
523 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
524 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
526 start = (void *)0x90000000UL;
527 #endif
528 code_gen_buffer = mmap(start, code_gen_buffer_size,
529 PROT_WRITE | PROT_READ | PROT_EXEC,
530 flags, -1, 0);
531 if (code_gen_buffer == MAP_FAILED) {
532 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
533 exit(1);
536 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
538 int flags;
539 void *addr = NULL;
540 flags = MAP_PRIVATE | MAP_ANONYMOUS;
541 #if defined(__x86_64__)
542 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
543 * 0x40000000 is free */
544 flags |= MAP_FIXED;
545 addr = (void *)0x40000000;
546 /* Cannot map more than that */
547 if (code_gen_buffer_size > (800 * 1024 * 1024))
548 code_gen_buffer_size = (800 * 1024 * 1024);
549 #endif
550 code_gen_buffer = mmap(addr, code_gen_buffer_size,
551 PROT_WRITE | PROT_READ | PROT_EXEC,
552 flags, -1, 0);
553 if (code_gen_buffer == MAP_FAILED) {
554 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
555 exit(1);
558 #else
559 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
560 map_exec(code_gen_buffer, code_gen_buffer_size);
561 #endif
562 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
563 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
564 code_gen_buffer_max_size = code_gen_buffer_size -
565 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
566 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
567 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
570 /* Must be called before using the QEMU cpus. 'tb_size' is the size
571 (in bytes) allocated to the translation buffer. Zero means default
572 size. */
573 void cpu_exec_init_all(unsigned long tb_size)
575 cpu_gen_init();
576 code_gen_alloc(tb_size);
577 code_gen_ptr = code_gen_buffer;
578 page_init();
579 #if !defined(CONFIG_USER_ONLY)
580 io_mem_init();
581 #endif
582 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
583 /* There's no guest base to take into account, so go ahead and
584 initialize the prologue now. */
585 tcg_prologue_init(&tcg_ctx);
586 #endif
589 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
591 static int cpu_common_post_load(void *opaque, int version_id)
593 CPUState *env = opaque;
595 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
596 version_id is increased. */
597 env->interrupt_request &= ~0x01;
598 tlb_flush(env, 1);
600 return 0;
603 static const VMStateDescription vmstate_cpu_common = {
604 .name = "cpu_common",
605 .version_id = 1,
606 .minimum_version_id = 1,
607 .minimum_version_id_old = 1,
608 .post_load = cpu_common_post_load,
609 .fields = (VMStateField []) {
610 VMSTATE_UINT32(halted, CPUState),
611 VMSTATE_UINT32(interrupt_request, CPUState),
612 VMSTATE_END_OF_LIST()
615 #endif
617 CPUState *qemu_get_cpu(int cpu)
619 CPUState *env = first_cpu;
621 while (env) {
622 if (env->cpu_index == cpu)
623 break;
624 env = env->next_cpu;
627 return env;
630 void cpu_exec_init(CPUState *env)
632 CPUState **penv;
633 int cpu_index;
635 #if defined(CONFIG_USER_ONLY)
636 cpu_list_lock();
637 #endif
638 env->next_cpu = NULL;
639 penv = &first_cpu;
640 cpu_index = 0;
641 while (*penv != NULL) {
642 penv = &(*penv)->next_cpu;
643 cpu_index++;
645 env->cpu_index = cpu_index;
646 env->numa_node = 0;
647 QTAILQ_INIT(&env->breakpoints);
648 QTAILQ_INIT(&env->watchpoints);
649 #ifdef __WIN32
650 env->thread_id = GetCurrentProcessId();
651 #else
652 env->thread_id = getpid();
653 #endif
654 *penv = env;
655 #if defined(CONFIG_USER_ONLY)
656 cpu_list_unlock();
657 #endif
658 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
659 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
660 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
661 cpu_save, cpu_load, env);
662 #endif
665 static inline void invalidate_page_bitmap(PageDesc *p)
667 if (p->code_bitmap) {
668 qemu_free(p->code_bitmap);
669 p->code_bitmap = NULL;
671 p->code_write_count = 0;
674 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
676 static void page_flush_tb_1 (int level, void **lp)
678 int i;
680 if (*lp == NULL) {
681 return;
683 if (level == 0) {
684 PageDesc *pd = *lp;
685 for (i = 0; i < L2_SIZE; ++i) {
686 pd[i].first_tb = NULL;
687 invalidate_page_bitmap(pd + i);
689 } else {
690 void **pp = *lp;
691 for (i = 0; i < L2_SIZE; ++i) {
692 page_flush_tb_1 (level - 1, pp + i);
697 static void page_flush_tb(void)
699 int i;
700 for (i = 0; i < V_L1_SIZE; i++) {
701 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
705 /* flush all the translation blocks */
706 /* XXX: tb_flush is currently not thread safe */
707 void tb_flush(CPUState *env1)
709 CPUState *env;
710 #if defined(DEBUG_FLUSH)
711 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
712 (unsigned long)(code_gen_ptr - code_gen_buffer),
713 nb_tbs, nb_tbs > 0 ?
714 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
715 #endif
716 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
717 cpu_abort(env1, "Internal error: code buffer overflow\n");
719 nb_tbs = 0;
721 for(env = first_cpu; env != NULL; env = env->next_cpu) {
722 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
725 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
726 page_flush_tb();
728 code_gen_ptr = code_gen_buffer;
729 /* XXX: flush processor icache at this point if cache flush is
730 expensive */
731 tb_flush_count++;
734 #ifdef DEBUG_TB_CHECK
736 static void tb_invalidate_check(target_ulong address)
738 TranslationBlock *tb;
739 int i;
740 address &= TARGET_PAGE_MASK;
741 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
742 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
743 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
744 address >= tb->pc + tb->size)) {
745 printf("ERROR invalidate: address=" TARGET_FMT_lx
746 " PC=%08lx size=%04x\n",
747 address, (long)tb->pc, tb->size);
753 /* verify that all the pages have correct rights for code */
754 static void tb_page_check(void)
756 TranslationBlock *tb;
757 int i, flags1, flags2;
759 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
760 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
761 flags1 = page_get_flags(tb->pc);
762 flags2 = page_get_flags(tb->pc + tb->size - 1);
763 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
764 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
765 (long)tb->pc, tb->size, flags1, flags2);
771 #endif
773 /* invalidate one TB */
774 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
775 int next_offset)
777 TranslationBlock *tb1;
778 for(;;) {
779 tb1 = *ptb;
780 if (tb1 == tb) {
781 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
782 break;
784 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
788 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
790 TranslationBlock *tb1;
791 unsigned int n1;
793 for(;;) {
794 tb1 = *ptb;
795 n1 = (long)tb1 & 3;
796 tb1 = (TranslationBlock *)((long)tb1 & ~3);
797 if (tb1 == tb) {
798 *ptb = tb1->page_next[n1];
799 break;
801 ptb = &tb1->page_next[n1];
805 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
807 TranslationBlock *tb1, **ptb;
808 unsigned int n1;
810 ptb = &tb->jmp_next[n];
811 tb1 = *ptb;
812 if (tb1) {
813 /* find tb(n) in circular list */
814 for(;;) {
815 tb1 = *ptb;
816 n1 = (long)tb1 & 3;
817 tb1 = (TranslationBlock *)((long)tb1 & ~3);
818 if (n1 == n && tb1 == tb)
819 break;
820 if (n1 == 2) {
821 ptb = &tb1->jmp_first;
822 } else {
823 ptb = &tb1->jmp_next[n1];
826 /* now we can suppress tb(n) from the list */
827 *ptb = tb->jmp_next[n];
829 tb->jmp_next[n] = NULL;
833 /* reset the jump entry 'n' of a TB so that it is not chained to
834 another TB */
835 static inline void tb_reset_jump(TranslationBlock *tb, int n)
837 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
840 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
842 CPUState *env;
843 PageDesc *p;
844 unsigned int h, n1;
845 tb_page_addr_t phys_pc;
846 TranslationBlock *tb1, *tb2;
848 /* remove the TB from the hash list */
849 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
850 h = tb_phys_hash_func(phys_pc);
851 tb_remove(&tb_phys_hash[h], tb,
852 offsetof(TranslationBlock, phys_hash_next));
854 /* remove the TB from the page list */
855 if (tb->page_addr[0] != page_addr) {
856 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
857 tb_page_remove(&p->first_tb, tb);
858 invalidate_page_bitmap(p);
860 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
861 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
862 tb_page_remove(&p->first_tb, tb);
863 invalidate_page_bitmap(p);
866 tb_invalidated_flag = 1;
868 /* remove the TB from the hash list */
869 h = tb_jmp_cache_hash_func(tb->pc);
870 for(env = first_cpu; env != NULL; env = env->next_cpu) {
871 if (env->tb_jmp_cache[h] == tb)
872 env->tb_jmp_cache[h] = NULL;
875 /* suppress this TB from the two jump lists */
876 tb_jmp_remove(tb, 0);
877 tb_jmp_remove(tb, 1);
879 /* suppress any remaining jumps to this TB */
880 tb1 = tb->jmp_first;
881 for(;;) {
882 n1 = (long)tb1 & 3;
883 if (n1 == 2)
884 break;
885 tb1 = (TranslationBlock *)((long)tb1 & ~3);
886 tb2 = tb1->jmp_next[n1];
887 tb_reset_jump(tb1, n1);
888 tb1->jmp_next[n1] = NULL;
889 tb1 = tb2;
891 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
893 tb_phys_invalidate_count++;
896 static inline void set_bits(uint8_t *tab, int start, int len)
898 int end, mask, end1;
900 end = start + len;
901 tab += start >> 3;
902 mask = 0xff << (start & 7);
903 if ((start & ~7) == (end & ~7)) {
904 if (start < end) {
905 mask &= ~(0xff << (end & 7));
906 *tab |= mask;
908 } else {
909 *tab++ |= mask;
910 start = (start + 8) & ~7;
911 end1 = end & ~7;
912 while (start < end1) {
913 *tab++ = 0xff;
914 start += 8;
916 if (start < end) {
917 mask = ~(0xff << (end & 7));
918 *tab |= mask;
923 static void build_page_bitmap(PageDesc *p)
925 int n, tb_start, tb_end;
926 TranslationBlock *tb;
928 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
930 tb = p->first_tb;
931 while (tb != NULL) {
932 n = (long)tb & 3;
933 tb = (TranslationBlock *)((long)tb & ~3);
934 /* NOTE: this is subtle as a TB may span two physical pages */
935 if (n == 0) {
936 /* NOTE: tb_end may be after the end of the page, but
937 it is not a problem */
938 tb_start = tb->pc & ~TARGET_PAGE_MASK;
939 tb_end = tb_start + tb->size;
940 if (tb_end > TARGET_PAGE_SIZE)
941 tb_end = TARGET_PAGE_SIZE;
942 } else {
943 tb_start = 0;
944 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
946 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
947 tb = tb->page_next[n];
951 TranslationBlock *tb_gen_code(CPUState *env,
952 target_ulong pc, target_ulong cs_base,
953 int flags, int cflags)
955 TranslationBlock *tb;
956 uint8_t *tc_ptr;
957 tb_page_addr_t phys_pc, phys_page2;
958 target_ulong virt_page2;
959 int code_gen_size;
961 phys_pc = get_page_addr_code(env, pc);
962 tb = tb_alloc(pc);
963 if (!tb) {
964 /* flush must be done */
965 tb_flush(env);
966 /* cannot fail at this point */
967 tb = tb_alloc(pc);
968 /* Don't forget to invalidate previous TB info. */
969 tb_invalidated_flag = 1;
971 tc_ptr = code_gen_ptr;
972 tb->tc_ptr = tc_ptr;
973 tb->cs_base = cs_base;
974 tb->flags = flags;
975 tb->cflags = cflags;
976 cpu_gen_code(env, tb, &code_gen_size);
977 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
979 /* check next page if needed */
980 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
981 phys_page2 = -1;
982 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
983 phys_page2 = get_page_addr_code(env, virt_page2);
985 tb_link_page(tb, phys_pc, phys_page2);
986 return tb;
989 /* invalidate all TBs which intersect with the target physical page
990 starting in range [start;end[. NOTE: start and end must refer to
991 the same physical page. 'is_cpu_write_access' should be true if called
992 from a real cpu write access: the virtual CPU will exit the current
993 TB if code is modified inside this TB. */
994 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
995 int is_cpu_write_access)
997 TranslationBlock *tb, *tb_next, *saved_tb;
998 CPUState *env = cpu_single_env;
999 tb_page_addr_t tb_start, tb_end;
1000 PageDesc *p;
1001 int n;
1002 #ifdef TARGET_HAS_PRECISE_SMC
1003 int current_tb_not_found = is_cpu_write_access;
1004 TranslationBlock *current_tb = NULL;
1005 int current_tb_modified = 0;
1006 target_ulong current_pc = 0;
1007 target_ulong current_cs_base = 0;
1008 int current_flags = 0;
1009 #endif /* TARGET_HAS_PRECISE_SMC */
1011 p = page_find(start >> TARGET_PAGE_BITS);
1012 if (!p)
1013 return;
1014 if (!p->code_bitmap &&
1015 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1016 is_cpu_write_access) {
1017 /* build code bitmap */
1018 build_page_bitmap(p);
1021 /* we remove all the TBs in the range [start, end[ */
1022 /* XXX: see if in some cases it could be faster to invalidate all the code */
1023 tb = p->first_tb;
1024 while (tb != NULL) {
1025 n = (long)tb & 3;
1026 tb = (TranslationBlock *)((long)tb & ~3);
1027 tb_next = tb->page_next[n];
1028 /* NOTE: this is subtle as a TB may span two physical pages */
1029 if (n == 0) {
1030 /* NOTE: tb_end may be after the end of the page, but
1031 it is not a problem */
1032 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1033 tb_end = tb_start + tb->size;
1034 } else {
1035 tb_start = tb->page_addr[1];
1036 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1038 if (!(tb_end <= start || tb_start >= end)) {
1039 #ifdef TARGET_HAS_PRECISE_SMC
1040 if (current_tb_not_found) {
1041 current_tb_not_found = 0;
1042 current_tb = NULL;
1043 if (env->mem_io_pc) {
1044 /* now we have a real cpu fault */
1045 current_tb = tb_find_pc(env->mem_io_pc);
1048 if (current_tb == tb &&
1049 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1050 /* If we are modifying the current TB, we must stop
1051 its execution. We could be more precise by checking
1052 that the modification is after the current PC, but it
1053 would require a specialized function to partially
1054 restore the CPU state */
1056 current_tb_modified = 1;
1057 cpu_restore_state(current_tb, env,
1058 env->mem_io_pc, NULL);
1059 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1060 &current_flags);
1062 #endif /* TARGET_HAS_PRECISE_SMC */
1063 /* we need to do that to handle the case where a signal
1064 occurs while doing tb_phys_invalidate() */
1065 saved_tb = NULL;
1066 if (env) {
1067 saved_tb = env->current_tb;
1068 env->current_tb = NULL;
1070 tb_phys_invalidate(tb, -1);
1071 if (env) {
1072 env->current_tb = saved_tb;
1073 if (env->interrupt_request && env->current_tb)
1074 cpu_interrupt(env, env->interrupt_request);
1077 tb = tb_next;
1079 #if !defined(CONFIG_USER_ONLY)
1080 /* if no code remaining, no need to continue to use slow writes */
1081 if (!p->first_tb) {
1082 invalidate_page_bitmap(p);
1083 if (is_cpu_write_access) {
1084 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1087 #endif
1088 #ifdef TARGET_HAS_PRECISE_SMC
1089 if (current_tb_modified) {
1090 /* we generate a block containing just the instruction
1091 modifying the memory. It will ensure that it cannot modify
1092 itself */
1093 env->current_tb = NULL;
1094 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1095 cpu_resume_from_signal(env, NULL);
1097 #endif
1100 /* len must be <= 8 and start must be a multiple of len */
1101 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1103 PageDesc *p;
1104 int offset, b;
1105 #if 0
1106 if (1) {
1107 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1108 cpu_single_env->mem_io_vaddr, len,
1109 cpu_single_env->eip,
1110 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1112 #endif
1113 p = page_find(start >> TARGET_PAGE_BITS);
1114 if (!p)
1115 return;
1116 if (p->code_bitmap) {
1117 offset = start & ~TARGET_PAGE_MASK;
1118 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1119 if (b & ((1 << len) - 1))
1120 goto do_invalidate;
1121 } else {
1122 do_invalidate:
1123 tb_invalidate_phys_page_range(start, start + len, 1);
1127 #if !defined(CONFIG_SOFTMMU)
1128 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1129 unsigned long pc, void *puc)
1131 TranslationBlock *tb;
1132 PageDesc *p;
1133 int n;
1134 #ifdef TARGET_HAS_PRECISE_SMC
1135 TranslationBlock *current_tb = NULL;
1136 CPUState *env = cpu_single_env;
1137 int current_tb_modified = 0;
1138 target_ulong current_pc = 0;
1139 target_ulong current_cs_base = 0;
1140 int current_flags = 0;
1141 #endif
1143 addr &= TARGET_PAGE_MASK;
1144 p = page_find(addr >> TARGET_PAGE_BITS);
1145 if (!p)
1146 return;
1147 tb = p->first_tb;
1148 #ifdef TARGET_HAS_PRECISE_SMC
1149 if (tb && pc != 0) {
1150 current_tb = tb_find_pc(pc);
1152 #endif
1153 while (tb != NULL) {
1154 n = (long)tb & 3;
1155 tb = (TranslationBlock *)((long)tb & ~3);
1156 #ifdef TARGET_HAS_PRECISE_SMC
1157 if (current_tb == tb &&
1158 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1159 /* If we are modifying the current TB, we must stop
1160 its execution. We could be more precise by checking
1161 that the modification is after the current PC, but it
1162 would require a specialized function to partially
1163 restore the CPU state */
1165 current_tb_modified = 1;
1166 cpu_restore_state(current_tb, env, pc, puc);
1167 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1168 &current_flags);
1170 #endif /* TARGET_HAS_PRECISE_SMC */
1171 tb_phys_invalidate(tb, addr);
1172 tb = tb->page_next[n];
1174 p->first_tb = NULL;
1175 #ifdef TARGET_HAS_PRECISE_SMC
1176 if (current_tb_modified) {
1177 /* we generate a block containing just the instruction
1178 modifying the memory. It will ensure that it cannot modify
1179 itself */
1180 env->current_tb = NULL;
1181 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1182 cpu_resume_from_signal(env, puc);
1184 #endif
1186 #endif
1188 /* add the tb in the target page and protect it if necessary */
1189 static inline void tb_alloc_page(TranslationBlock *tb,
1190 unsigned int n, tb_page_addr_t page_addr)
1192 PageDesc *p;
1193 TranslationBlock *last_first_tb;
1195 tb->page_addr[n] = page_addr;
1196 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1197 tb->page_next[n] = p->first_tb;
1198 last_first_tb = p->first_tb;
1199 p->first_tb = (TranslationBlock *)((long)tb | n);
1200 invalidate_page_bitmap(p);
1202 #if defined(TARGET_HAS_SMC) || 1
1204 #if defined(CONFIG_USER_ONLY)
1205 if (p->flags & PAGE_WRITE) {
1206 target_ulong addr;
1207 PageDesc *p2;
1208 int prot;
1210 /* force the host page as non writable (writes will have a
1211 page fault + mprotect overhead) */
1212 page_addr &= qemu_host_page_mask;
1213 prot = 0;
1214 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1215 addr += TARGET_PAGE_SIZE) {
1217 p2 = page_find (addr >> TARGET_PAGE_BITS);
1218 if (!p2)
1219 continue;
1220 prot |= p2->flags;
1221 p2->flags &= ~PAGE_WRITE;
1223 mprotect(g2h(page_addr), qemu_host_page_size,
1224 (prot & PAGE_BITS) & ~PAGE_WRITE);
1225 #ifdef DEBUG_TB_INVALIDATE
1226 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1227 page_addr);
1228 #endif
1230 #else
1231 /* if some code is already present, then the pages are already
1232 protected. So we handle the case where only the first TB is
1233 allocated in a physical page */
1234 if (!last_first_tb) {
1235 tlb_protect_code(page_addr);
1237 #endif
1239 #endif /* TARGET_HAS_SMC */
1242 /* Allocate a new translation block. Flush the translation buffer if
1243 too many translation blocks or too much generated code. */
1244 TranslationBlock *tb_alloc(target_ulong pc)
1246 TranslationBlock *tb;
1248 if (nb_tbs >= code_gen_max_blocks ||
1249 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1250 return NULL;
1251 tb = &tbs[nb_tbs++];
1252 tb->pc = pc;
1253 tb->cflags = 0;
1254 return tb;
1257 void tb_free(TranslationBlock *tb)
1259 /* In practice this is mostly used for single use temporary TB
1260 Ignore the hard cases and just back up if this TB happens to
1261 be the last one generated. */
1262 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1263 code_gen_ptr = tb->tc_ptr;
1264 nb_tbs--;
1268 /* add a new TB and link it to the physical page tables. phys_page2 is
1269 (-1) to indicate that only one page contains the TB. */
1270 void tb_link_page(TranslationBlock *tb,
1271 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1273 unsigned int h;
1274 TranslationBlock **ptb;
1276 /* Grab the mmap lock to stop another thread invalidating this TB
1277 before we are done. */
1278 mmap_lock();
1279 /* add in the physical hash table */
1280 h = tb_phys_hash_func(phys_pc);
1281 ptb = &tb_phys_hash[h];
1282 tb->phys_hash_next = *ptb;
1283 *ptb = tb;
1285 /* add in the page list */
1286 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1287 if (phys_page2 != -1)
1288 tb_alloc_page(tb, 1, phys_page2);
1289 else
1290 tb->page_addr[1] = -1;
1292 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1293 tb->jmp_next[0] = NULL;
1294 tb->jmp_next[1] = NULL;
1296 /* init original jump addresses */
1297 if (tb->tb_next_offset[0] != 0xffff)
1298 tb_reset_jump(tb, 0);
1299 if (tb->tb_next_offset[1] != 0xffff)
1300 tb_reset_jump(tb, 1);
1302 #ifdef DEBUG_TB_CHECK
1303 tb_page_check();
1304 #endif
1305 mmap_unlock();
1308 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1309 tb[1].tc_ptr. Return NULL if not found */
1310 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1312 int m_min, m_max, m;
1313 unsigned long v;
1314 TranslationBlock *tb;
1316 if (nb_tbs <= 0)
1317 return NULL;
1318 if (tc_ptr < (unsigned long)code_gen_buffer ||
1319 tc_ptr >= (unsigned long)code_gen_ptr)
1320 return NULL;
1321 /* binary search (cf Knuth) */
1322 m_min = 0;
1323 m_max = nb_tbs - 1;
1324 while (m_min <= m_max) {
1325 m = (m_min + m_max) >> 1;
1326 tb = &tbs[m];
1327 v = (unsigned long)tb->tc_ptr;
1328 if (v == tc_ptr)
1329 return tb;
1330 else if (tc_ptr < v) {
1331 m_max = m - 1;
1332 } else {
1333 m_min = m + 1;
1336 return &tbs[m_max];
1339 static void tb_reset_jump_recursive(TranslationBlock *tb);
1341 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1343 TranslationBlock *tb1, *tb_next, **ptb;
1344 unsigned int n1;
1346 tb1 = tb->jmp_next[n];
1347 if (tb1 != NULL) {
1348 /* find head of list */
1349 for(;;) {
1350 n1 = (long)tb1 & 3;
1351 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1352 if (n1 == 2)
1353 break;
1354 tb1 = tb1->jmp_next[n1];
1356 /* we are now sure now that tb jumps to tb1 */
1357 tb_next = tb1;
1359 /* remove tb from the jmp_first list */
1360 ptb = &tb_next->jmp_first;
1361 for(;;) {
1362 tb1 = *ptb;
1363 n1 = (long)tb1 & 3;
1364 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1365 if (n1 == n && tb1 == tb)
1366 break;
1367 ptb = &tb1->jmp_next[n1];
1369 *ptb = tb->jmp_next[n];
1370 tb->jmp_next[n] = NULL;
1372 /* suppress the jump to next tb in generated code */
1373 tb_reset_jump(tb, n);
1375 /* suppress jumps in the tb on which we could have jumped */
1376 tb_reset_jump_recursive(tb_next);
1380 static void tb_reset_jump_recursive(TranslationBlock *tb)
1382 tb_reset_jump_recursive2(tb, 0);
1383 tb_reset_jump_recursive2(tb, 1);
1386 #if defined(TARGET_HAS_ICE)
1387 #if defined(CONFIG_USER_ONLY)
1388 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1390 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1392 #else
1393 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1395 target_phys_addr_t addr;
1396 target_ulong pd;
1397 ram_addr_t ram_addr;
1398 PhysPageDesc *p;
1400 addr = cpu_get_phys_page_debug(env, pc);
1401 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1402 if (!p) {
1403 pd = IO_MEM_UNASSIGNED;
1404 } else {
1405 pd = p->phys_offset;
1407 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1408 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1410 #endif
1411 #endif /* TARGET_HAS_ICE */
1413 #if defined(CONFIG_USER_ONLY)
1414 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1419 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1420 int flags, CPUWatchpoint **watchpoint)
1422 return -ENOSYS;
1424 #else
1425 /* Add a watchpoint. */
1426 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1427 int flags, CPUWatchpoint **watchpoint)
1429 target_ulong len_mask = ~(len - 1);
1430 CPUWatchpoint *wp;
1432 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1433 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1434 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1435 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1436 return -EINVAL;
1438 wp = qemu_malloc(sizeof(*wp));
1440 wp->vaddr = addr;
1441 wp->len_mask = len_mask;
1442 wp->flags = flags;
1444 /* keep all GDB-injected watchpoints in front */
1445 if (flags & BP_GDB)
1446 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1447 else
1448 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1450 tlb_flush_page(env, addr);
1452 if (watchpoint)
1453 *watchpoint = wp;
1454 return 0;
1457 /* Remove a specific watchpoint. */
1458 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1459 int flags)
1461 target_ulong len_mask = ~(len - 1);
1462 CPUWatchpoint *wp;
1464 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1465 if (addr == wp->vaddr && len_mask == wp->len_mask
1466 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1467 cpu_watchpoint_remove_by_ref(env, wp);
1468 return 0;
1471 return -ENOENT;
1474 /* Remove a specific watchpoint by reference. */
1475 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1477 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1479 tlb_flush_page(env, watchpoint->vaddr);
1481 qemu_free(watchpoint);
1484 /* Remove all matching watchpoints. */
1485 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1487 CPUWatchpoint *wp, *next;
1489 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1490 if (wp->flags & mask)
1491 cpu_watchpoint_remove_by_ref(env, wp);
1494 #endif
1496 /* Add a breakpoint. */
1497 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1498 CPUBreakpoint **breakpoint)
1500 #if defined(TARGET_HAS_ICE)
1501 CPUBreakpoint *bp;
1503 bp = qemu_malloc(sizeof(*bp));
1505 bp->pc = pc;
1506 bp->flags = flags;
1508 /* keep all GDB-injected breakpoints in front */
1509 if (flags & BP_GDB)
1510 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1511 else
1512 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1514 breakpoint_invalidate(env, pc);
1516 if (breakpoint)
1517 *breakpoint = bp;
1518 return 0;
1519 #else
1520 return -ENOSYS;
1521 #endif
1524 /* Remove a specific breakpoint. */
1525 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1527 #if defined(TARGET_HAS_ICE)
1528 CPUBreakpoint *bp;
1530 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1531 if (bp->pc == pc && bp->flags == flags) {
1532 cpu_breakpoint_remove_by_ref(env, bp);
1533 return 0;
1536 return -ENOENT;
1537 #else
1538 return -ENOSYS;
1539 #endif
1542 /* Remove a specific breakpoint by reference. */
1543 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1545 #if defined(TARGET_HAS_ICE)
1546 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1548 breakpoint_invalidate(env, breakpoint->pc);
1550 qemu_free(breakpoint);
1551 #endif
1554 /* Remove all matching breakpoints. */
1555 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1557 #if defined(TARGET_HAS_ICE)
1558 CPUBreakpoint *bp, *next;
1560 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1561 if (bp->flags & mask)
1562 cpu_breakpoint_remove_by_ref(env, bp);
1564 #endif
1567 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1568 CPU loop after each instruction */
1569 void cpu_single_step(CPUState *env, int enabled)
1571 #if defined(TARGET_HAS_ICE)
1572 if (env->singlestep_enabled != enabled) {
1573 env->singlestep_enabled = enabled;
1574 if (kvm_enabled())
1575 kvm_update_guest_debug(env, 0);
1576 else {
1577 /* must flush all the translated code to avoid inconsistencies */
1578 /* XXX: only flush what is necessary */
1579 tb_flush(env);
1582 #endif
1585 /* enable or disable low levels log */
1586 void cpu_set_log(int log_flags)
1588 loglevel = log_flags;
1589 if (loglevel && !logfile) {
1590 logfile = fopen(logfilename, log_append ? "a" : "w");
1591 if (!logfile) {
1592 perror(logfilename);
1593 _exit(1);
1595 #if !defined(CONFIG_SOFTMMU)
1596 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1598 static char logfile_buf[4096];
1599 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1601 #elif !defined(_WIN32)
1602 /* Win32 doesn't support line-buffering and requires size >= 2 */
1603 setvbuf(logfile, NULL, _IOLBF, 0);
1604 #endif
1605 log_append = 1;
1607 if (!loglevel && logfile) {
1608 fclose(logfile);
1609 logfile = NULL;
1613 void cpu_set_log_filename(const char *filename)
1615 logfilename = strdup(filename);
1616 if (logfile) {
1617 fclose(logfile);
1618 logfile = NULL;
1620 cpu_set_log(loglevel);
1623 static void cpu_unlink_tb(CPUState *env)
1625 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1626 problem and hope the cpu will stop of its own accord. For userspace
1627 emulation this often isn't actually as bad as it sounds. Often
1628 signals are used primarily to interrupt blocking syscalls. */
1629 TranslationBlock *tb;
1630 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1632 spin_lock(&interrupt_lock);
1633 tb = env->current_tb;
1634 /* if the cpu is currently executing code, we must unlink it and
1635 all the potentially executing TB */
1636 if (tb) {
1637 env->current_tb = NULL;
1638 tb_reset_jump_recursive(tb);
1640 spin_unlock(&interrupt_lock);
1643 /* mask must never be zero, except for A20 change call */
1644 void cpu_interrupt(CPUState *env, int mask)
1646 int old_mask;
1648 old_mask = env->interrupt_request;
1649 env->interrupt_request |= mask;
1650 if (kvm_enabled() && !kvm_irqchip_in_kernel())
1651 kvm_update_interrupt_request(env);
1653 #ifndef CONFIG_USER_ONLY
1655 * If called from iothread context, wake the target cpu in
1656 * case its halted.
1658 if (!qemu_cpu_self(env)) {
1659 qemu_cpu_kick(env);
1660 return;
1662 #endif
1664 if (use_icount) {
1665 env->icount_decr.u16.high = 0xffff;
1666 #ifndef CONFIG_USER_ONLY
1667 if (!can_do_io(env)
1668 && (mask & ~old_mask) != 0) {
1669 cpu_abort(env, "Raised interrupt while not in I/O function");
1671 #endif
1672 } else {
1673 cpu_unlink_tb(env);
1677 void cpu_reset_interrupt(CPUState *env, int mask)
1679 env->interrupt_request &= ~mask;
1682 void cpu_exit(CPUState *env)
1684 env->exit_request = 1;
1685 cpu_unlink_tb(env);
1688 const CPULogItem cpu_log_items[] = {
1689 { CPU_LOG_TB_OUT_ASM, "out_asm",
1690 "show generated host assembly code for each compiled TB" },
1691 { CPU_LOG_TB_IN_ASM, "in_asm",
1692 "show target assembly code for each compiled TB" },
1693 { CPU_LOG_TB_OP, "op",
1694 "show micro ops for each compiled TB" },
1695 { CPU_LOG_TB_OP_OPT, "op_opt",
1696 "show micro ops "
1697 #ifdef TARGET_I386
1698 "before eflags optimization and "
1699 #endif
1700 "after liveness analysis" },
1701 { CPU_LOG_INT, "int",
1702 "show interrupts/exceptions in short format" },
1703 { CPU_LOG_EXEC, "exec",
1704 "show trace before each executed TB (lots of logs)" },
1705 { CPU_LOG_TB_CPU, "cpu",
1706 "show CPU state before block translation" },
1707 #ifdef TARGET_I386
1708 { CPU_LOG_PCALL, "pcall",
1709 "show protected mode far calls/returns/exceptions" },
1710 { CPU_LOG_RESET, "cpu_reset",
1711 "show CPU state before CPU resets" },
1712 #endif
1713 #ifdef DEBUG_IOPORT
1714 { CPU_LOG_IOPORT, "ioport",
1715 "show all i/o ports accesses" },
1716 #endif
1717 { 0, NULL, NULL },
1720 #ifndef CONFIG_USER_ONLY
1721 static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1722 = QLIST_HEAD_INITIALIZER(memory_client_list);
1724 static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1725 ram_addr_t size,
1726 ram_addr_t phys_offset)
1728 CPUPhysMemoryClient *client;
1729 QLIST_FOREACH(client, &memory_client_list, list) {
1730 client->set_memory(client, start_addr, size, phys_offset);
1734 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1735 target_phys_addr_t end)
1737 CPUPhysMemoryClient *client;
1738 QLIST_FOREACH(client, &memory_client_list, list) {
1739 int r = client->sync_dirty_bitmap(client, start, end);
1740 if (r < 0)
1741 return r;
1743 return 0;
1746 static int cpu_notify_migration_log(int enable)
1748 CPUPhysMemoryClient *client;
1749 QLIST_FOREACH(client, &memory_client_list, list) {
1750 int r = client->migration_log(client, enable);
1751 if (r < 0)
1752 return r;
1754 return 0;
1757 static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1758 int level, void **lp)
1760 int i;
1762 if (*lp == NULL) {
1763 return;
1765 if (level == 0) {
1766 PhysPageDesc *pd = *lp;
1767 for (i = 0; i < L2_SIZE; ++i) {
1768 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1769 client->set_memory(client, pd[i].region_offset,
1770 TARGET_PAGE_SIZE, pd[i].phys_offset);
1773 } else {
1774 void **pp = *lp;
1775 for (i = 0; i < L2_SIZE; ++i) {
1776 phys_page_for_each_1(client, level - 1, pp + i);
1781 static void phys_page_for_each(CPUPhysMemoryClient *client)
1783 int i;
1784 for (i = 0; i < P_L1_SIZE; ++i) {
1785 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1786 l1_phys_map + 1);
1790 void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1792 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1793 phys_page_for_each(client);
1796 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1798 QLIST_REMOVE(client, list);
1800 #endif
1802 static int cmp1(const char *s1, int n, const char *s2)
1804 if (strlen(s2) != n)
1805 return 0;
1806 return memcmp(s1, s2, n) == 0;
1809 /* takes a comma separated list of log masks. Return 0 if error. */
1810 int cpu_str_to_log_mask(const char *str)
1812 const CPULogItem *item;
1813 int mask;
1814 const char *p, *p1;
1816 p = str;
1817 mask = 0;
1818 for(;;) {
1819 p1 = strchr(p, ',');
1820 if (!p1)
1821 p1 = p + strlen(p);
1822 if(cmp1(p,p1-p,"all")) {
1823 for(item = cpu_log_items; item->mask != 0; item++) {
1824 mask |= item->mask;
1826 } else {
1827 for(item = cpu_log_items; item->mask != 0; item++) {
1828 if (cmp1(p, p1 - p, item->name))
1829 goto found;
1831 return 0;
1833 found:
1834 mask |= item->mask;
1835 if (*p1 != ',')
1836 break;
1837 p = p1 + 1;
1839 return mask;
1842 void cpu_abort(CPUState *env, const char *fmt, ...)
1844 va_list ap;
1845 va_list ap2;
1847 va_start(ap, fmt);
1848 va_copy(ap2, ap);
1849 fprintf(stderr, "qemu: fatal: ");
1850 vfprintf(stderr, fmt, ap);
1851 fprintf(stderr, "\n");
1852 #ifdef TARGET_I386
1853 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1854 #else
1855 cpu_dump_state(env, stderr, fprintf, 0);
1856 #endif
1857 if (qemu_log_enabled()) {
1858 qemu_log("qemu: fatal: ");
1859 qemu_log_vprintf(fmt, ap2);
1860 qemu_log("\n");
1861 #ifdef TARGET_I386
1862 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1863 #else
1864 log_cpu_state(env, 0);
1865 #endif
1866 qemu_log_flush();
1867 qemu_log_close();
1869 va_end(ap2);
1870 va_end(ap);
1871 #if defined(CONFIG_USER_ONLY)
1873 struct sigaction act;
1874 sigfillset(&act.sa_mask);
1875 act.sa_handler = SIG_DFL;
1876 sigaction(SIGABRT, &act, NULL);
1878 #endif
1879 abort();
1882 CPUState *cpu_copy(CPUState *env)
1884 CPUState *new_env = cpu_init(env->cpu_model_str);
1885 CPUState *next_cpu = new_env->next_cpu;
1886 int cpu_index = new_env->cpu_index;
1887 #if defined(TARGET_HAS_ICE)
1888 CPUBreakpoint *bp;
1889 CPUWatchpoint *wp;
1890 #endif
1892 memcpy(new_env, env, sizeof(CPUState));
1894 /* Preserve chaining and index. */
1895 new_env->next_cpu = next_cpu;
1896 new_env->cpu_index = cpu_index;
1898 /* Clone all break/watchpoints.
1899 Note: Once we support ptrace with hw-debug register access, make sure
1900 BP_CPU break/watchpoints are handled correctly on clone. */
1901 QTAILQ_INIT(&env->breakpoints);
1902 QTAILQ_INIT(&env->watchpoints);
1903 #if defined(TARGET_HAS_ICE)
1904 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1905 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1907 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1908 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1909 wp->flags, NULL);
1911 #endif
1913 return new_env;
1916 #if !defined(CONFIG_USER_ONLY)
1918 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1920 unsigned int i;
1922 /* Discard jump cache entries for any tb which might potentially
1923 overlap the flushed page. */
1924 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1925 memset (&env->tb_jmp_cache[i], 0,
1926 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1928 i = tb_jmp_cache_hash_page(addr);
1929 memset (&env->tb_jmp_cache[i], 0,
1930 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1933 static CPUTLBEntry s_cputlb_empty_entry = {
1934 .addr_read = -1,
1935 .addr_write = -1,
1936 .addr_code = -1,
1937 .addend = -1,
1940 /* NOTE: if flush_global is true, also flush global entries (not
1941 implemented yet) */
1942 void tlb_flush(CPUState *env, int flush_global)
1944 int i;
1946 #if defined(DEBUG_TLB)
1947 printf("tlb_flush:\n");
1948 #endif
1949 /* must reset current TB so that interrupts cannot modify the
1950 links while we are modifying them */
1951 env->current_tb = NULL;
1953 for(i = 0; i < CPU_TLB_SIZE; i++) {
1954 int mmu_idx;
1955 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1956 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1960 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1962 env->tlb_flush_addr = -1;
1963 env->tlb_flush_mask = 0;
1964 tlb_flush_count++;
1967 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1969 if (addr == (tlb_entry->addr_read &
1970 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1971 addr == (tlb_entry->addr_write &
1972 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1973 addr == (tlb_entry->addr_code &
1974 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1975 *tlb_entry = s_cputlb_empty_entry;
1979 void tlb_flush_page(CPUState *env, target_ulong addr)
1981 int i;
1982 int mmu_idx;
1984 #if defined(DEBUG_TLB)
1985 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1986 #endif
1987 /* Check if we need to flush due to large pages. */
1988 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1989 #if defined(DEBUG_TLB)
1990 printf("tlb_flush_page: forced full flush ("
1991 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1992 env->tlb_flush_addr, env->tlb_flush_mask);
1993 #endif
1994 tlb_flush(env, 1);
1995 return;
1997 /* must reset current TB so that interrupts cannot modify the
1998 links while we are modifying them */
1999 env->current_tb = NULL;
2001 addr &= TARGET_PAGE_MASK;
2002 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2003 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2004 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2006 tlb_flush_jmp_cache(env, addr);
2009 /* update the TLBs so that writes to code in the virtual page 'addr'
2010 can be detected */
2011 static void tlb_protect_code(ram_addr_t ram_addr)
2013 cpu_physical_memory_reset_dirty(ram_addr,
2014 ram_addr + TARGET_PAGE_SIZE,
2015 CODE_DIRTY_FLAG);
2018 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2019 tested for self modifying code */
2020 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2021 target_ulong vaddr)
2023 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2026 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2027 unsigned long start, unsigned long length)
2029 unsigned long addr;
2030 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2031 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2032 if ((addr - start) < length) {
2033 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2038 /* Note: start and end must be within the same ram block. */
2039 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2040 int dirty_flags)
2042 CPUState *env;
2043 unsigned long length, start1;
2044 int i;
2046 start &= TARGET_PAGE_MASK;
2047 end = TARGET_PAGE_ALIGN(end);
2049 length = end - start;
2050 if (length == 0)
2051 return;
2052 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2054 /* we modify the TLB cache so that the dirty bit will be set again
2055 when accessing the range */
2056 start1 = (unsigned long)qemu_get_ram_ptr(start);
2057 /* Chek that we don't span multiple blocks - this breaks the
2058 address comparisons below. */
2059 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2060 != (end - 1) - start) {
2061 abort();
2064 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2065 int mmu_idx;
2066 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2067 for(i = 0; i < CPU_TLB_SIZE; i++)
2068 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2069 start1, length);
2074 int cpu_physical_memory_set_dirty_tracking(int enable)
2076 int ret = 0;
2077 in_migration = enable;
2078 ret = cpu_notify_migration_log(!!enable);
2079 return ret;
2082 int cpu_physical_memory_get_dirty_tracking(void)
2084 return in_migration;
2087 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2088 target_phys_addr_t end_addr)
2090 int ret;
2092 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2093 return ret;
2096 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2098 ram_addr_t ram_addr;
2099 void *p;
2101 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2102 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2103 + tlb_entry->addend);
2104 ram_addr = qemu_ram_addr_from_host(p);
2105 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2106 tlb_entry->addr_write |= TLB_NOTDIRTY;
2111 /* update the TLB according to the current state of the dirty bits */
2112 void cpu_tlb_update_dirty(CPUState *env)
2114 int i;
2115 int mmu_idx;
2116 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2117 for(i = 0; i < CPU_TLB_SIZE; i++)
2118 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2122 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2124 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2125 tlb_entry->addr_write = vaddr;
2128 /* update the TLB corresponding to virtual page vaddr
2129 so that it is no longer dirty */
2130 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2132 int i;
2133 int mmu_idx;
2135 vaddr &= TARGET_PAGE_MASK;
2136 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2137 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2138 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2141 /* Our TLB does not support large pages, so remember the area covered by
2142 large pages and trigger a full TLB flush if these are invalidated. */
2143 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2144 target_ulong size)
2146 target_ulong mask = ~(size - 1);
2148 if (env->tlb_flush_addr == (target_ulong)-1) {
2149 env->tlb_flush_addr = vaddr & mask;
2150 env->tlb_flush_mask = mask;
2151 return;
2153 /* Extend the existing region to include the new page.
2154 This is a compromise between unnecessary flushes and the cost
2155 of maintaining a full variable size TLB. */
2156 mask &= env->tlb_flush_mask;
2157 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2158 mask <<= 1;
2160 env->tlb_flush_addr &= mask;
2161 env->tlb_flush_mask = mask;
2164 /* Add a new TLB entry. At most one entry for a given virtual address
2165 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2166 supplied size is only used by tlb_flush_page. */
2167 void tlb_set_page(CPUState *env, target_ulong vaddr,
2168 target_phys_addr_t paddr, int prot,
2169 int mmu_idx, target_ulong size)
2171 PhysPageDesc *p;
2172 unsigned long pd;
2173 unsigned int index;
2174 target_ulong address;
2175 target_ulong code_address;
2176 unsigned long addend;
2177 CPUTLBEntry *te;
2178 CPUWatchpoint *wp;
2179 target_phys_addr_t iotlb;
2181 assert(size >= TARGET_PAGE_SIZE);
2182 if (size != TARGET_PAGE_SIZE) {
2183 tlb_add_large_page(env, vaddr, size);
2185 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2186 if (!p) {
2187 pd = IO_MEM_UNASSIGNED;
2188 } else {
2189 pd = p->phys_offset;
2191 #if defined(DEBUG_TLB)
2192 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2193 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2194 #endif
2196 address = vaddr;
2197 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2198 /* IO memory case (romd handled later) */
2199 address |= TLB_MMIO;
2201 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2202 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2203 /* Normal RAM. */
2204 iotlb = pd & TARGET_PAGE_MASK;
2205 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2206 iotlb |= IO_MEM_NOTDIRTY;
2207 else
2208 iotlb |= IO_MEM_ROM;
2209 } else {
2210 /* IO handlers are currently passed a physical address.
2211 It would be nice to pass an offset from the base address
2212 of that region. This would avoid having to special case RAM,
2213 and avoid full address decoding in every device.
2214 We can't use the high bits of pd for this because
2215 IO_MEM_ROMD uses these as a ram address. */
2216 iotlb = (pd & ~TARGET_PAGE_MASK);
2217 if (p) {
2218 iotlb += p->region_offset;
2219 } else {
2220 iotlb += paddr;
2224 code_address = address;
2225 /* Make accesses to pages with watchpoints go via the
2226 watchpoint trap routines. */
2227 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2228 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2229 /* Avoid trapping reads of pages with a write breakpoint. */
2230 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2231 iotlb = io_mem_watch + paddr;
2232 address |= TLB_MMIO;
2233 break;
2238 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2239 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2240 te = &env->tlb_table[mmu_idx][index];
2241 te->addend = addend - vaddr;
2242 if (prot & PAGE_READ) {
2243 te->addr_read = address;
2244 } else {
2245 te->addr_read = -1;
2248 if (prot & PAGE_EXEC) {
2249 te->addr_code = code_address;
2250 } else {
2251 te->addr_code = -1;
2253 if (prot & PAGE_WRITE) {
2254 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2255 (pd & IO_MEM_ROMD)) {
2256 /* Write access calls the I/O callback. */
2257 te->addr_write = address | TLB_MMIO;
2258 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2259 !cpu_physical_memory_is_dirty(pd)) {
2260 te->addr_write = address | TLB_NOTDIRTY;
2261 } else {
2262 te->addr_write = address;
2264 } else {
2265 te->addr_write = -1;
2269 #else
2271 void tlb_flush(CPUState *env, int flush_global)
2275 void tlb_flush_page(CPUState *env, target_ulong addr)
2280 * Walks guest process memory "regions" one by one
2281 * and calls callback function 'fn' for each region.
2284 struct walk_memory_regions_data
2286 walk_memory_regions_fn fn;
2287 void *priv;
2288 unsigned long start;
2289 int prot;
2292 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2293 abi_ulong end, int new_prot)
2295 if (data->start != -1ul) {
2296 int rc = data->fn(data->priv, data->start, end, data->prot);
2297 if (rc != 0) {
2298 return rc;
2302 data->start = (new_prot ? end : -1ul);
2303 data->prot = new_prot;
2305 return 0;
2308 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2309 abi_ulong base, int level, void **lp)
2311 abi_ulong pa;
2312 int i, rc;
2314 if (*lp == NULL) {
2315 return walk_memory_regions_end(data, base, 0);
2318 if (level == 0) {
2319 PageDesc *pd = *lp;
2320 for (i = 0; i < L2_SIZE; ++i) {
2321 int prot = pd[i].flags;
2323 pa = base | (i << TARGET_PAGE_BITS);
2324 if (prot != data->prot) {
2325 rc = walk_memory_regions_end(data, pa, prot);
2326 if (rc != 0) {
2327 return rc;
2331 } else {
2332 void **pp = *lp;
2333 for (i = 0; i < L2_SIZE; ++i) {
2334 pa = base | ((abi_ulong)i <<
2335 (TARGET_PAGE_BITS + L2_BITS * level));
2336 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2337 if (rc != 0) {
2338 return rc;
2343 return 0;
2346 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2348 struct walk_memory_regions_data data;
2349 unsigned long i;
2351 data.fn = fn;
2352 data.priv = priv;
2353 data.start = -1ul;
2354 data.prot = 0;
2356 for (i = 0; i < V_L1_SIZE; i++) {
2357 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2358 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2359 if (rc != 0) {
2360 return rc;
2364 return walk_memory_regions_end(&data, 0, 0);
2367 static int dump_region(void *priv, abi_ulong start,
2368 abi_ulong end, unsigned long prot)
2370 FILE *f = (FILE *)priv;
2372 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2373 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2374 start, end, end - start,
2375 ((prot & PAGE_READ) ? 'r' : '-'),
2376 ((prot & PAGE_WRITE) ? 'w' : '-'),
2377 ((prot & PAGE_EXEC) ? 'x' : '-'));
2379 return (0);
2382 /* dump memory mappings */
2383 void page_dump(FILE *f)
2385 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2386 "start", "end", "size", "prot");
2387 walk_memory_regions(f, dump_region);
2390 int page_get_flags(target_ulong address)
2392 PageDesc *p;
2394 p = page_find(address >> TARGET_PAGE_BITS);
2395 if (!p)
2396 return 0;
2397 return p->flags;
2400 /* Modify the flags of a page and invalidate the code if necessary.
2401 The flag PAGE_WRITE_ORG is positioned automatically depending
2402 on PAGE_WRITE. The mmap_lock should already be held. */
2403 void page_set_flags(target_ulong start, target_ulong end, int flags)
2405 target_ulong addr, len;
2407 /* This function should never be called with addresses outside the
2408 guest address space. If this assert fires, it probably indicates
2409 a missing call to h2g_valid. */
2410 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2411 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2412 #endif
2413 assert(start < end);
2415 start = start & TARGET_PAGE_MASK;
2416 end = TARGET_PAGE_ALIGN(end);
2418 if (flags & PAGE_WRITE) {
2419 flags |= PAGE_WRITE_ORG;
2422 for (addr = start, len = end - start;
2423 len != 0;
2424 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2425 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2427 /* If the write protection bit is set, then we invalidate
2428 the code inside. */
2429 if (!(p->flags & PAGE_WRITE) &&
2430 (flags & PAGE_WRITE) &&
2431 p->first_tb) {
2432 tb_invalidate_phys_page(addr, 0, NULL);
2434 p->flags = flags;
2438 int page_check_range(target_ulong start, target_ulong len, int flags)
2440 PageDesc *p;
2441 target_ulong end;
2442 target_ulong addr;
2444 /* This function should never be called with addresses outside the
2445 guest address space. If this assert fires, it probably indicates
2446 a missing call to h2g_valid. */
2447 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2448 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2449 #endif
2451 if (len == 0) {
2452 return 0;
2454 if (start + len - 1 < start) {
2455 /* We've wrapped around. */
2456 return -1;
2459 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2460 start = start & TARGET_PAGE_MASK;
2462 for (addr = start, len = end - start;
2463 len != 0;
2464 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2465 p = page_find(addr >> TARGET_PAGE_BITS);
2466 if( !p )
2467 return -1;
2468 if( !(p->flags & PAGE_VALID) )
2469 return -1;
2471 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2472 return -1;
2473 if (flags & PAGE_WRITE) {
2474 if (!(p->flags & PAGE_WRITE_ORG))
2475 return -1;
2476 /* unprotect the page if it was put read-only because it
2477 contains translated code */
2478 if (!(p->flags & PAGE_WRITE)) {
2479 if (!page_unprotect(addr, 0, NULL))
2480 return -1;
2482 return 0;
2485 return 0;
2488 /* called from signal handler: invalidate the code and unprotect the
2489 page. Return TRUE if the fault was successfully handled. */
2490 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2492 unsigned int prot;
2493 PageDesc *p;
2494 target_ulong host_start, host_end, addr;
2496 /* Technically this isn't safe inside a signal handler. However we
2497 know this only ever happens in a synchronous SEGV handler, so in
2498 practice it seems to be ok. */
2499 mmap_lock();
2501 p = page_find(address >> TARGET_PAGE_BITS);
2502 if (!p) {
2503 mmap_unlock();
2504 return 0;
2507 /* if the page was really writable, then we change its
2508 protection back to writable */
2509 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2510 host_start = address & qemu_host_page_mask;
2511 host_end = host_start + qemu_host_page_size;
2513 prot = 0;
2514 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2515 p = page_find(addr >> TARGET_PAGE_BITS);
2516 p->flags |= PAGE_WRITE;
2517 prot |= p->flags;
2519 /* and since the content will be modified, we must invalidate
2520 the corresponding translated code. */
2521 tb_invalidate_phys_page(addr, pc, puc);
2522 #ifdef DEBUG_TB_CHECK
2523 tb_invalidate_check(addr);
2524 #endif
2526 mprotect((void *)g2h(host_start), qemu_host_page_size,
2527 prot & PAGE_BITS);
2529 mmap_unlock();
2530 return 1;
2532 mmap_unlock();
2533 return 0;
2536 static inline void tlb_set_dirty(CPUState *env,
2537 unsigned long addr, target_ulong vaddr)
2540 #endif /* defined(CONFIG_USER_ONLY) */
2542 #if !defined(CONFIG_USER_ONLY)
2544 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2545 typedef struct subpage_t {
2546 target_phys_addr_t base;
2547 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2548 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2549 } subpage_t;
2551 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2552 ram_addr_t memory, ram_addr_t region_offset);
2553 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2554 ram_addr_t orig_memory,
2555 ram_addr_t region_offset);
2556 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2557 need_subpage) \
2558 do { \
2559 if (addr > start_addr) \
2560 start_addr2 = 0; \
2561 else { \
2562 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2563 if (start_addr2 > 0) \
2564 need_subpage = 1; \
2567 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2568 end_addr2 = TARGET_PAGE_SIZE - 1; \
2569 else { \
2570 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2571 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2572 need_subpage = 1; \
2574 } while (0)
2576 /* register physical memory.
2577 For RAM, 'size' must be a multiple of the target page size.
2578 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2579 io memory page. The address used when calling the IO function is
2580 the offset from the start of the region, plus region_offset. Both
2581 start_addr and region_offset are rounded down to a page boundary
2582 before calculating this offset. This should not be a problem unless
2583 the low bits of start_addr and region_offset differ. */
2584 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2585 ram_addr_t size,
2586 ram_addr_t phys_offset,
2587 ram_addr_t region_offset)
2589 target_phys_addr_t addr, end_addr;
2590 PhysPageDesc *p;
2591 CPUState *env;
2592 ram_addr_t orig_size = size;
2593 subpage_t *subpage;
2595 cpu_notify_set_memory(start_addr, size, phys_offset);
2597 if (phys_offset == IO_MEM_UNASSIGNED) {
2598 region_offset = start_addr;
2600 region_offset &= TARGET_PAGE_MASK;
2601 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2602 end_addr = start_addr + (target_phys_addr_t)size;
2603 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2604 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2605 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2606 ram_addr_t orig_memory = p->phys_offset;
2607 target_phys_addr_t start_addr2, end_addr2;
2608 int need_subpage = 0;
2610 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2611 need_subpage);
2612 if (need_subpage) {
2613 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2614 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2615 &p->phys_offset, orig_memory,
2616 p->region_offset);
2617 } else {
2618 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2619 >> IO_MEM_SHIFT];
2621 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2622 region_offset);
2623 p->region_offset = 0;
2624 } else {
2625 p->phys_offset = phys_offset;
2626 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2627 (phys_offset & IO_MEM_ROMD))
2628 phys_offset += TARGET_PAGE_SIZE;
2630 } else {
2631 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2632 p->phys_offset = phys_offset;
2633 p->region_offset = region_offset;
2634 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2635 (phys_offset & IO_MEM_ROMD)) {
2636 phys_offset += TARGET_PAGE_SIZE;
2637 } else {
2638 target_phys_addr_t start_addr2, end_addr2;
2639 int need_subpage = 0;
2641 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2642 end_addr2, need_subpage);
2644 if (need_subpage) {
2645 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2646 &p->phys_offset, IO_MEM_UNASSIGNED,
2647 addr & TARGET_PAGE_MASK);
2648 subpage_register(subpage, start_addr2, end_addr2,
2649 phys_offset, region_offset);
2650 p->region_offset = 0;
2654 region_offset += TARGET_PAGE_SIZE;
2657 /* since each CPU stores ram addresses in its TLB cache, we must
2658 reset the modified entries */
2659 /* XXX: slow ! */
2660 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2661 tlb_flush(env, 1);
2665 /* XXX: temporary until new memory mapping API */
2666 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2668 PhysPageDesc *p;
2670 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2671 if (!p)
2672 return IO_MEM_UNASSIGNED;
2673 return p->phys_offset;
2676 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2678 if (kvm_enabled())
2679 kvm_coalesce_mmio_region(addr, size);
2682 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2684 if (kvm_enabled())
2685 kvm_uncoalesce_mmio_region(addr, size);
2688 void qemu_flush_coalesced_mmio_buffer(void)
2690 if (kvm_enabled())
2691 kvm_flush_coalesced_mmio_buffer();
2694 #if defined(__linux__) && !defined(TARGET_S390X)
2696 #include <sys/vfs.h>
2698 #define HUGETLBFS_MAGIC 0x958458f6
2700 static long gethugepagesize(const char *path)
2702 struct statfs fs;
2703 int ret;
2705 do {
2706 ret = statfs(path, &fs);
2707 } while (ret != 0 && errno == EINTR);
2709 if (ret != 0) {
2710 perror(path);
2711 return 0;
2714 if (fs.f_type != HUGETLBFS_MAGIC)
2715 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2717 return fs.f_bsize;
2720 static void *file_ram_alloc(RAMBlock *block,
2721 ram_addr_t memory,
2722 const char *path)
2724 char *filename;
2725 void *area;
2726 int fd;
2727 #ifdef MAP_POPULATE
2728 int flags;
2729 #endif
2730 unsigned long hpagesize;
2732 hpagesize = gethugepagesize(path);
2733 if (!hpagesize) {
2734 return NULL;
2737 if (memory < hpagesize) {
2738 return NULL;
2741 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2742 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2743 return NULL;
2746 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2747 return NULL;
2750 fd = mkstemp(filename);
2751 if (fd < 0) {
2752 perror("unable to create backing store for hugepages");
2753 free(filename);
2754 return NULL;
2756 unlink(filename);
2757 free(filename);
2759 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2762 * ftruncate is not supported by hugetlbfs in older
2763 * hosts, so don't bother bailing out on errors.
2764 * If anything goes wrong with it under other filesystems,
2765 * mmap will fail.
2767 if (ftruncate(fd, memory))
2768 perror("ftruncate");
2770 #ifdef MAP_POPULATE
2771 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2772 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2773 * to sidestep this quirk.
2775 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2776 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2777 #else
2778 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2779 #endif
2780 if (area == MAP_FAILED) {
2781 perror("file_ram_alloc: can't mmap RAM pages");
2782 close(fd);
2783 return (NULL);
2785 block->fd = fd;
2786 return area;
2788 #endif
2790 static ram_addr_t find_ram_offset(ram_addr_t size)
2792 RAMBlock *block, *next_block;
2793 ram_addr_t offset = 0, mingap = ULONG_MAX;
2795 if (QLIST_EMPTY(&ram_list.blocks))
2796 return 0;
2798 QLIST_FOREACH(block, &ram_list.blocks, next) {
2799 ram_addr_t end, next = ULONG_MAX;
2801 end = block->offset + block->length;
2803 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2804 if (next_block->offset >= end) {
2805 next = MIN(next, next_block->offset);
2808 if (next - end >= size && next - end < mingap) {
2809 offset = end;
2810 mingap = next - end;
2813 return offset;
2816 static ram_addr_t last_ram_offset(void)
2818 RAMBlock *block;
2819 ram_addr_t last = 0;
2821 QLIST_FOREACH(block, &ram_list.blocks, next)
2822 last = MAX(last, block->offset + block->length);
2824 return last;
2827 ram_addr_t qemu_ram_map(DeviceState *dev, const char *name,
2828 ram_addr_t size, void *host)
2830 RAMBlock *new_block, *block;
2832 size = TARGET_PAGE_ALIGN(size);
2833 new_block = qemu_mallocz(sizeof(*new_block));
2835 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2836 char *id = dev->parent_bus->info->get_dev_path(dev);
2837 if (id) {
2838 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2839 qemu_free(id);
2842 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2844 QLIST_FOREACH(block, &ram_list.blocks, next) {
2845 if (!strcmp(block->idstr, new_block->idstr)) {
2846 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2847 new_block->idstr);
2848 abort();
2852 new_block->host = host;
2854 new_block->offset = find_ram_offset(size);
2855 new_block->length = size;
2857 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2859 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2860 last_ram_offset() >> TARGET_PAGE_BITS);
2861 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2862 0xff, size >> TARGET_PAGE_BITS);
2864 if (kvm_enabled())
2865 kvm_setup_guest_memory(new_block->host, size);
2867 return new_block->offset;
2870 ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2872 RAMBlock *new_block, *block;
2874 size = TARGET_PAGE_ALIGN(size);
2875 new_block = qemu_mallocz(sizeof(*new_block));
2877 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2878 char *id = dev->parent_bus->info->get_dev_path(dev);
2879 if (id) {
2880 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2881 qemu_free(id);
2884 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2886 QLIST_FOREACH(block, &ram_list.blocks, next) {
2887 if (!strcmp(block->idstr, new_block->idstr)) {
2888 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2889 new_block->idstr);
2890 abort();
2894 if (mem_path) {
2895 #if defined (__linux__) && !defined(TARGET_S390X)
2896 new_block->host = file_ram_alloc(new_block, size, mem_path);
2897 if (!new_block->host) {
2898 new_block->host = qemu_vmalloc(size);
2899 #ifdef MADV_MERGEABLE
2900 madvise(new_block->host, size, MADV_MERGEABLE);
2901 #endif
2903 #else
2904 fprintf(stderr, "-mem-path option unsupported\n");
2905 exit(1);
2906 #endif
2907 } else {
2908 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2909 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2910 new_block->host = mmap((void*)0x1000000, size,
2911 PROT_EXEC|PROT_READ|PROT_WRITE,
2912 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2913 #else
2914 new_block->host = qemu_vmalloc(size);
2915 #endif
2916 #ifdef MADV_MERGEABLE
2917 madvise(new_block->host, size, MADV_MERGEABLE);
2918 #endif
2920 new_block->offset = find_ram_offset(size);
2921 new_block->length = size;
2923 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2925 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2926 last_ram_offset() >> TARGET_PAGE_BITS);
2927 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2928 0xff, size >> TARGET_PAGE_BITS);
2930 if (kvm_enabled())
2931 kvm_setup_guest_memory(new_block->host, size);
2933 return new_block->offset;
2936 void qemu_ram_unmap(ram_addr_t addr)
2938 RAMBlock *block;
2940 QLIST_FOREACH(block, &ram_list.blocks, next) {
2941 if (addr == block->offset) {
2942 QLIST_REMOVE(block, next);
2943 qemu_free(block);
2944 return;
2949 void qemu_ram_free(ram_addr_t addr)
2951 RAMBlock *block;
2953 QLIST_FOREACH(block, &ram_list.blocks, next) {
2954 if (addr == block->offset) {
2955 QLIST_REMOVE(block, next);
2956 if (mem_path) {
2957 #if defined (__linux__) && !defined(TARGET_S390X)
2958 if (block->fd) {
2959 munmap(block->host, block->length);
2960 close(block->fd);
2961 } else {
2962 qemu_vfree(block->host);
2964 #endif
2965 } else {
2966 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2967 munmap(block->host, block->length);
2968 #else
2969 qemu_vfree(block->host);
2970 #endif
2972 qemu_free(block);
2973 return;
2979 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2980 With the exception of the softmmu code in this file, this should
2981 only be used for local memory (e.g. video ram) that the device owns,
2982 and knows it isn't going to access beyond the end of the block.
2984 It should not be used for general purpose DMA.
2985 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2987 void *qemu_get_ram_ptr(ram_addr_t addr)
2989 RAMBlock *block;
2991 QLIST_FOREACH(block, &ram_list.blocks, next) {
2992 if (addr - block->offset < block->length) {
2993 QLIST_REMOVE(block, next);
2994 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2995 return block->host + (addr - block->offset);
2999 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3000 abort();
3002 return NULL;
3005 int do_qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3007 RAMBlock *block;
3008 uint8_t *host = ptr;
3010 QLIST_FOREACH(block, &ram_list.blocks, next) {
3011 if (host - block->host < block->length) {
3012 *ram_addr = block->offset + (host - block->host);
3013 return 0;
3016 return -1;
3019 /* Some of the softmmu routines need to translate from a host pointer
3020 (typically a TLB entry) back to a ram offset. */
3021 ram_addr_t qemu_ram_addr_from_host(void *ptr)
3023 ram_addr_t ram_addr;
3025 if (do_qemu_ram_addr_from_host(ptr, &ram_addr)) {
3026 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3027 abort();
3029 return ram_addr;
3032 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3034 #ifdef DEBUG_UNASSIGNED
3035 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3036 #endif
3037 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3038 do_unassigned_access(addr, 0, 0, 0, 1);
3039 #endif
3040 return 0;
3043 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3045 #ifdef DEBUG_UNASSIGNED
3046 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3047 #endif
3048 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3049 do_unassigned_access(addr, 0, 0, 0, 2);
3050 #endif
3051 return 0;
3054 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3056 #ifdef DEBUG_UNASSIGNED
3057 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3058 #endif
3059 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3060 do_unassigned_access(addr, 0, 0, 0, 4);
3061 #endif
3062 return 0;
3065 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3067 #ifdef DEBUG_UNASSIGNED
3068 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3069 #endif
3070 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3071 do_unassigned_access(addr, 1, 0, 0, 1);
3072 #endif
3075 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3077 #ifdef DEBUG_UNASSIGNED
3078 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3079 #endif
3080 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3081 do_unassigned_access(addr, 1, 0, 0, 2);
3082 #endif
3085 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3087 #ifdef DEBUG_UNASSIGNED
3088 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3089 #endif
3090 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3091 do_unassigned_access(addr, 1, 0, 0, 4);
3092 #endif
3095 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3096 unassigned_mem_readb,
3097 unassigned_mem_readw,
3098 unassigned_mem_readl,
3101 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3102 unassigned_mem_writeb,
3103 unassigned_mem_writew,
3104 unassigned_mem_writel,
3107 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3108 uint32_t val)
3110 int dirty_flags;
3111 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3112 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3113 #if !defined(CONFIG_USER_ONLY)
3114 tb_invalidate_phys_page_fast(ram_addr, 1);
3115 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3116 #endif
3118 stb_p(qemu_get_ram_ptr(ram_addr), val);
3119 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3120 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3121 /* we remove the notdirty callback only if the code has been
3122 flushed */
3123 if (dirty_flags == 0xff)
3124 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3127 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3128 uint32_t val)
3130 int dirty_flags;
3131 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3132 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3133 #if !defined(CONFIG_USER_ONLY)
3134 tb_invalidate_phys_page_fast(ram_addr, 2);
3135 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3136 #endif
3138 stw_p(qemu_get_ram_ptr(ram_addr), val);
3139 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3140 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3141 /* we remove the notdirty callback only if the code has been
3142 flushed */
3143 if (dirty_flags == 0xff)
3144 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3147 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3148 uint32_t val)
3150 int dirty_flags;
3151 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3152 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3153 #if !defined(CONFIG_USER_ONLY)
3154 tb_invalidate_phys_page_fast(ram_addr, 4);
3155 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3156 #endif
3158 stl_p(qemu_get_ram_ptr(ram_addr), val);
3159 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3160 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3161 /* we remove the notdirty callback only if the code has been
3162 flushed */
3163 if (dirty_flags == 0xff)
3164 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3167 static CPUReadMemoryFunc * const error_mem_read[3] = {
3168 NULL, /* never used */
3169 NULL, /* never used */
3170 NULL, /* never used */
3173 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3174 notdirty_mem_writeb,
3175 notdirty_mem_writew,
3176 notdirty_mem_writel,
3179 /* Generate a debug exception if a watchpoint has been hit. */
3180 static void check_watchpoint(int offset, int len_mask, int flags)
3182 CPUState *env = cpu_single_env;
3183 target_ulong pc, cs_base;
3184 TranslationBlock *tb;
3185 target_ulong vaddr;
3186 CPUWatchpoint *wp;
3187 int cpu_flags;
3189 if (env->watchpoint_hit) {
3190 /* We re-entered the check after replacing the TB. Now raise
3191 * the debug interrupt so that is will trigger after the
3192 * current instruction. */
3193 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3194 return;
3196 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3197 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3198 if ((vaddr == (wp->vaddr & len_mask) ||
3199 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3200 wp->flags |= BP_WATCHPOINT_HIT;
3201 if (!env->watchpoint_hit) {
3202 env->watchpoint_hit = wp;
3203 tb = tb_find_pc(env->mem_io_pc);
3204 if (!tb) {
3205 cpu_abort(env, "check_watchpoint: could not find TB for "
3206 "pc=%p", (void *)env->mem_io_pc);
3208 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3209 tb_phys_invalidate(tb, -1);
3210 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3211 env->exception_index = EXCP_DEBUG;
3212 } else {
3213 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3214 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3216 cpu_resume_from_signal(env, NULL);
3218 } else {
3219 wp->flags &= ~BP_WATCHPOINT_HIT;
3224 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3225 so these check for a hit then pass through to the normal out-of-line
3226 phys routines. */
3227 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3229 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3230 return ldub_phys(addr);
3233 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3235 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3236 return lduw_phys(addr);
3239 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3241 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3242 return ldl_phys(addr);
3245 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3246 uint32_t val)
3248 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3249 stb_phys(addr, val);
3252 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3253 uint32_t val)
3255 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3256 stw_phys(addr, val);
3259 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3260 uint32_t val)
3262 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3263 stl_phys(addr, val);
3266 static CPUReadMemoryFunc * const watch_mem_read[3] = {
3267 watch_mem_readb,
3268 watch_mem_readw,
3269 watch_mem_readl,
3272 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3273 watch_mem_writeb,
3274 watch_mem_writew,
3275 watch_mem_writel,
3278 static inline uint32_t subpage_readlen (subpage_t *mmio,
3279 target_phys_addr_t addr,
3280 unsigned int len)
3282 unsigned int idx = SUBPAGE_IDX(addr);
3283 #if defined(DEBUG_SUBPAGE)
3284 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3285 mmio, len, addr, idx);
3286 #endif
3288 addr += mmio->region_offset[idx];
3289 idx = mmio->sub_io_index[idx];
3290 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3293 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3294 uint32_t value, unsigned int len)
3296 unsigned int idx = SUBPAGE_IDX(addr);
3297 #if defined(DEBUG_SUBPAGE)
3298 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3299 __func__, mmio, len, addr, idx, value);
3300 #endif
3302 addr += mmio->region_offset[idx];
3303 idx = mmio->sub_io_index[idx];
3304 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3307 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3309 return subpage_readlen(opaque, addr, 0);
3312 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3313 uint32_t value)
3315 subpage_writelen(opaque, addr, value, 0);
3318 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3320 return subpage_readlen(opaque, addr, 1);
3323 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3324 uint32_t value)
3326 subpage_writelen(opaque, addr, value, 1);
3329 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3331 return subpage_readlen(opaque, addr, 2);
3334 static void subpage_writel (void *opaque, target_phys_addr_t addr,
3335 uint32_t value)
3337 subpage_writelen(opaque, addr, value, 2);
3340 static CPUReadMemoryFunc * const subpage_read[] = {
3341 &subpage_readb,
3342 &subpage_readw,
3343 &subpage_readl,
3346 static CPUWriteMemoryFunc * const subpage_write[] = {
3347 &subpage_writeb,
3348 &subpage_writew,
3349 &subpage_writel,
3352 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3353 ram_addr_t memory, ram_addr_t region_offset)
3355 int idx, eidx;
3357 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3358 return -1;
3359 idx = SUBPAGE_IDX(start);
3360 eidx = SUBPAGE_IDX(end);
3361 #if defined(DEBUG_SUBPAGE)
3362 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3363 mmio, start, end, idx, eidx, memory);
3364 #endif
3365 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3366 for (; idx <= eidx; idx++) {
3367 mmio->sub_io_index[idx] = memory;
3368 mmio->region_offset[idx] = region_offset;
3371 return 0;
3374 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3375 ram_addr_t orig_memory,
3376 ram_addr_t region_offset)
3378 subpage_t *mmio;
3379 int subpage_memory;
3381 mmio = qemu_mallocz(sizeof(subpage_t));
3383 mmio->base = base;
3384 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3385 #if defined(DEBUG_SUBPAGE)
3386 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3387 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3388 #endif
3389 *phys = subpage_memory | IO_MEM_SUBPAGE;
3390 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3392 return mmio;
3395 static int get_free_io_mem_idx(void)
3397 int i;
3399 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3400 if (!io_mem_used[i]) {
3401 io_mem_used[i] = 1;
3402 return i;
3404 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3405 return -1;
3408 /* mem_read and mem_write are arrays of functions containing the
3409 function to access byte (index 0), word (index 1) and dword (index
3410 2). Functions can be omitted with a NULL function pointer.
3411 If io_index is non zero, the corresponding io zone is
3412 modified. If it is zero, a new io zone is allocated. The return
3413 value can be used with cpu_register_physical_memory(). (-1) is
3414 returned if error. */
3415 static int cpu_register_io_memory_fixed(int io_index,
3416 CPUReadMemoryFunc * const *mem_read,
3417 CPUWriteMemoryFunc * const *mem_write,
3418 void *opaque)
3420 int i;
3422 if (io_index <= 0) {
3423 io_index = get_free_io_mem_idx();
3424 if (io_index == -1)
3425 return io_index;
3426 } else {
3427 io_index >>= IO_MEM_SHIFT;
3428 if (io_index >= IO_MEM_NB_ENTRIES)
3429 return -1;
3432 for (i = 0; i < 3; ++i) {
3433 io_mem_read[io_index][i]
3434 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3436 for (i = 0; i < 3; ++i) {
3437 io_mem_write[io_index][i]
3438 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3440 io_mem_opaque[io_index] = opaque;
3442 return (io_index << IO_MEM_SHIFT);
3445 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3446 CPUWriteMemoryFunc * const *mem_write,
3447 void *opaque)
3449 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3452 void cpu_unregister_io_memory(int io_table_address)
3454 int i;
3455 int io_index = io_table_address >> IO_MEM_SHIFT;
3457 for (i=0;i < 3; i++) {
3458 io_mem_read[io_index][i] = unassigned_mem_read[i];
3459 io_mem_write[io_index][i] = unassigned_mem_write[i];
3461 io_mem_opaque[io_index] = NULL;
3462 io_mem_used[io_index] = 0;
3465 static void io_mem_init(void)
3467 int i;
3469 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3470 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3471 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3472 for (i=0; i<5; i++)
3473 io_mem_used[i] = 1;
3475 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3476 watch_mem_write, NULL);
3479 #endif /* !defined(CONFIG_USER_ONLY) */
3481 /* physical memory access (slow version, mainly for debug) */
3482 #if defined(CONFIG_USER_ONLY)
3483 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3484 uint8_t *buf, int len, int is_write)
3486 int l, flags;
3487 target_ulong page;
3488 void * p;
3490 while (len > 0) {
3491 page = addr & TARGET_PAGE_MASK;
3492 l = (page + TARGET_PAGE_SIZE) - addr;
3493 if (l > len)
3494 l = len;
3495 flags = page_get_flags(page);
3496 if (!(flags & PAGE_VALID))
3497 return -1;
3498 if (is_write) {
3499 if (!(flags & PAGE_WRITE))
3500 return -1;
3501 /* XXX: this code should not depend on lock_user */
3502 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3503 return -1;
3504 memcpy(p, buf, l);
3505 unlock_user(p, addr, l);
3506 } else {
3507 if (!(flags & PAGE_READ))
3508 return -1;
3509 /* XXX: this code should not depend on lock_user */
3510 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3511 return -1;
3512 memcpy(buf, p, l);
3513 unlock_user(p, addr, 0);
3515 len -= l;
3516 buf += l;
3517 addr += l;
3519 return 0;
3522 #else
3523 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3524 int len, int is_write)
3526 int l, io_index;
3527 uint8_t *ptr;
3528 uint32_t val;
3529 target_phys_addr_t page;
3530 unsigned long pd;
3531 PhysPageDesc *p;
3533 while (len > 0) {
3534 page = addr & TARGET_PAGE_MASK;
3535 l = (page + TARGET_PAGE_SIZE) - addr;
3536 if (l > len)
3537 l = len;
3538 p = phys_page_find(page >> TARGET_PAGE_BITS);
3539 if (!p) {
3540 pd = IO_MEM_UNASSIGNED;
3541 } else {
3542 pd = p->phys_offset;
3545 if (is_write) {
3546 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3547 target_phys_addr_t addr1 = addr;
3548 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3549 if (p)
3550 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3551 /* XXX: could force cpu_single_env to NULL to avoid
3552 potential bugs */
3553 if (l >= 4 && ((addr1 & 3) == 0)) {
3554 /* 32 bit write access */
3555 val = ldl_p(buf);
3556 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3557 l = 4;
3558 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3559 /* 16 bit write access */
3560 val = lduw_p(buf);
3561 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3562 l = 2;
3563 } else {
3564 /* 8 bit write access */
3565 val = ldub_p(buf);
3566 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3567 l = 1;
3569 } else {
3570 unsigned long addr1;
3571 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3572 /* RAM case */
3573 ptr = qemu_get_ram_ptr(addr1);
3574 memcpy(ptr, buf, l);
3575 if (!cpu_physical_memory_is_dirty(addr1)) {
3576 /* invalidate code */
3577 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3578 /* set dirty bit */
3579 cpu_physical_memory_set_dirty_flags(
3580 addr1, (0xff & ~CODE_DIRTY_FLAG));
3582 /* qemu doesn't execute guest code directly, but kvm does
3583 therefore flush instruction caches */
3584 if (kvm_enabled())
3585 flush_icache_range((unsigned long)ptr,
3586 ((unsigned long)ptr)+l);
3588 } else {
3589 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3590 !(pd & IO_MEM_ROMD)) {
3591 target_phys_addr_t addr1 = addr;
3592 /* I/O case */
3593 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3594 if (p)
3595 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3596 if (l >= 4 && ((addr1 & 3) == 0)) {
3597 /* 32 bit read access */
3598 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3599 stl_p(buf, val);
3600 l = 4;
3601 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3602 /* 16 bit read access */
3603 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3604 stw_p(buf, val);
3605 l = 2;
3606 } else {
3607 /* 8 bit read access */
3608 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3609 stb_p(buf, val);
3610 l = 1;
3612 } else {
3613 /* RAM case */
3614 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3615 (addr & ~TARGET_PAGE_MASK);
3616 memcpy(buf, ptr, l);
3619 len -= l;
3620 buf += l;
3621 addr += l;
3625 /* used for ROM loading : can write in RAM and ROM */
3626 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3627 const uint8_t *buf, int len)
3629 int l;
3630 uint8_t *ptr;
3631 target_phys_addr_t page;
3632 unsigned long pd;
3633 PhysPageDesc *p;
3635 while (len > 0) {
3636 page = addr & TARGET_PAGE_MASK;
3637 l = (page + TARGET_PAGE_SIZE) - addr;
3638 if (l > len)
3639 l = len;
3640 p = phys_page_find(page >> TARGET_PAGE_BITS);
3641 if (!p) {
3642 pd = IO_MEM_UNASSIGNED;
3643 } else {
3644 pd = p->phys_offset;
3647 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3648 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3649 !(pd & IO_MEM_ROMD)) {
3650 /* do nothing */
3651 } else {
3652 unsigned long addr1;
3653 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3654 /* ROM/RAM case */
3655 ptr = qemu_get_ram_ptr(addr1);
3656 memcpy(ptr, buf, l);
3658 len -= l;
3659 buf += l;
3660 addr += l;
3664 typedef struct {
3665 void *buffer;
3666 target_phys_addr_t addr;
3667 target_phys_addr_t len;
3668 } BounceBuffer;
3670 static BounceBuffer bounce;
3672 typedef struct MapClient {
3673 void *opaque;
3674 void (*callback)(void *opaque);
3675 QLIST_ENTRY(MapClient) link;
3676 } MapClient;
3678 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3679 = QLIST_HEAD_INITIALIZER(map_client_list);
3681 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3683 MapClient *client = qemu_malloc(sizeof(*client));
3685 client->opaque = opaque;
3686 client->callback = callback;
3687 QLIST_INSERT_HEAD(&map_client_list, client, link);
3688 return client;
3691 void cpu_unregister_map_client(void *_client)
3693 MapClient *client = (MapClient *)_client;
3695 QLIST_REMOVE(client, link);
3696 qemu_free(client);
3699 static void cpu_notify_map_clients(void)
3701 MapClient *client;
3703 while (!QLIST_EMPTY(&map_client_list)) {
3704 client = QLIST_FIRST(&map_client_list);
3705 client->callback(client->opaque);
3706 cpu_unregister_map_client(client);
3710 /* Map a physical memory region into a host virtual address.
3711 * May map a subset of the requested range, given by and returned in *plen.
3712 * May return NULL if resources needed to perform the mapping are exhausted.
3713 * Use only for reads OR writes - not for read-modify-write operations.
3714 * Use cpu_register_map_client() to know when retrying the map operation is
3715 * likely to succeed.
3717 void *cpu_physical_memory_map(target_phys_addr_t addr,
3718 target_phys_addr_t *plen,
3719 int is_write)
3721 target_phys_addr_t len = *plen;
3722 target_phys_addr_t done = 0;
3723 int l;
3724 uint8_t *ret = NULL;
3725 uint8_t *ptr;
3726 target_phys_addr_t page;
3727 unsigned long pd;
3728 PhysPageDesc *p;
3729 unsigned long addr1;
3731 while (len > 0) {
3732 page = addr & TARGET_PAGE_MASK;
3733 l = (page + TARGET_PAGE_SIZE) - addr;
3734 if (l > len)
3735 l = len;
3736 p = phys_page_find(page >> TARGET_PAGE_BITS);
3737 if (!p) {
3738 pd = IO_MEM_UNASSIGNED;
3739 } else {
3740 pd = p->phys_offset;
3743 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3744 if (done || bounce.buffer) {
3745 break;
3747 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3748 bounce.addr = addr;
3749 bounce.len = l;
3750 if (!is_write) {
3751 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3753 ptr = bounce.buffer;
3754 } else {
3755 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3756 ptr = qemu_get_ram_ptr(addr1);
3758 if (!done) {
3759 ret = ptr;
3760 } else if (ret + done != ptr) {
3761 break;
3764 len -= l;
3765 addr += l;
3766 done += l;
3768 *plen = done;
3769 return ret;
3772 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3773 * Will also mark the memory as dirty if is_write == 1. access_len gives
3774 * the amount of memory that was actually read or written by the caller.
3776 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3777 int is_write, target_phys_addr_t access_len)
3779 unsigned long flush_len = (unsigned long)access_len;
3781 if (buffer != bounce.buffer) {
3782 if (is_write) {
3783 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3784 while (access_len) {
3785 unsigned l;
3786 l = TARGET_PAGE_SIZE;
3787 if (l > access_len)
3788 l = access_len;
3789 if (!cpu_physical_memory_is_dirty(addr1)) {
3790 /* invalidate code */
3791 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3792 /* set dirty bit */
3793 cpu_physical_memory_set_dirty_flags(
3794 addr1, (0xff & ~CODE_DIRTY_FLAG));
3796 addr1 += l;
3797 access_len -= l;
3799 dma_flush_range((unsigned long)buffer,
3800 (unsigned long)buffer + flush_len);
3802 return;
3804 if (is_write) {
3805 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3807 qemu_vfree(bounce.buffer);
3808 bounce.buffer = NULL;
3809 cpu_notify_map_clients();
3812 /* warning: addr must be aligned */
3813 uint32_t ldl_phys(target_phys_addr_t addr)
3815 int io_index;
3816 uint8_t *ptr;
3817 uint32_t val;
3818 unsigned long pd;
3819 PhysPageDesc *p;
3821 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3822 if (!p) {
3823 pd = IO_MEM_UNASSIGNED;
3824 } else {
3825 pd = p->phys_offset;
3828 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3829 !(pd & IO_MEM_ROMD)) {
3830 /* I/O case */
3831 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3832 if (p)
3833 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3834 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3835 } else {
3836 /* RAM case */
3837 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3838 (addr & ~TARGET_PAGE_MASK);
3839 val = ldl_p(ptr);
3841 return val;
3844 /* warning: addr must be aligned */
3845 uint64_t ldq_phys(target_phys_addr_t addr)
3847 int io_index;
3848 uint8_t *ptr;
3849 uint64_t val;
3850 unsigned long pd;
3851 PhysPageDesc *p;
3853 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3854 if (!p) {
3855 pd = IO_MEM_UNASSIGNED;
3856 } else {
3857 pd = p->phys_offset;
3860 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3861 !(pd & IO_MEM_ROMD)) {
3862 /* I/O case */
3863 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3864 if (p)
3865 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3866 #ifdef TARGET_WORDS_BIGENDIAN
3867 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3868 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3869 #else
3870 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3871 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3872 #endif
3873 } else {
3874 /* RAM case */
3875 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3876 (addr & ~TARGET_PAGE_MASK);
3877 val = ldq_p(ptr);
3879 return val;
3882 /* XXX: optimize */
3883 uint32_t ldub_phys(target_phys_addr_t addr)
3885 uint8_t val;
3886 cpu_physical_memory_read(addr, &val, 1);
3887 return val;
3890 /* warning: addr must be aligned */
3891 uint32_t lduw_phys(target_phys_addr_t addr)
3893 int io_index;
3894 uint8_t *ptr;
3895 uint64_t val;
3896 unsigned long pd;
3897 PhysPageDesc *p;
3899 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3900 if (!p) {
3901 pd = IO_MEM_UNASSIGNED;
3902 } else {
3903 pd = p->phys_offset;
3906 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3907 !(pd & IO_MEM_ROMD)) {
3908 /* I/O case */
3909 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3910 if (p)
3911 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3912 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3913 } else {
3914 /* RAM case */
3915 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3916 (addr & ~TARGET_PAGE_MASK);
3917 val = lduw_p(ptr);
3919 return val;
3922 /* warning: addr must be aligned. The ram page is not masked as dirty
3923 and the code inside is not invalidated. It is useful if the dirty
3924 bits are used to track modified PTEs */
3925 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3927 int io_index;
3928 uint8_t *ptr;
3929 unsigned long pd;
3930 PhysPageDesc *p;
3932 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3933 if (!p) {
3934 pd = IO_MEM_UNASSIGNED;
3935 } else {
3936 pd = p->phys_offset;
3939 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3940 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3941 if (p)
3942 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3943 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3944 } else {
3945 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3946 ptr = qemu_get_ram_ptr(addr1);
3947 stl_p(ptr, val);
3949 if (unlikely(in_migration)) {
3950 if (!cpu_physical_memory_is_dirty(addr1)) {
3951 /* invalidate code */
3952 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3953 /* set dirty bit */
3954 cpu_physical_memory_set_dirty_flags(
3955 addr1, (0xff & ~CODE_DIRTY_FLAG));
3961 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3963 int io_index;
3964 uint8_t *ptr;
3965 unsigned long pd;
3966 PhysPageDesc *p;
3968 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3969 if (!p) {
3970 pd = IO_MEM_UNASSIGNED;
3971 } else {
3972 pd = p->phys_offset;
3975 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3976 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3977 if (p)
3978 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3979 #ifdef TARGET_WORDS_BIGENDIAN
3980 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3981 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3982 #else
3983 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3984 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3985 #endif
3986 } else {
3987 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3988 (addr & ~TARGET_PAGE_MASK);
3989 stq_p(ptr, val);
3993 /* warning: addr must be aligned */
3994 void stl_phys(target_phys_addr_t addr, uint32_t val)
3996 int io_index;
3997 uint8_t *ptr;
3998 unsigned long pd;
3999 PhysPageDesc *p;
4001 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4002 if (!p) {
4003 pd = IO_MEM_UNASSIGNED;
4004 } else {
4005 pd = p->phys_offset;
4008 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4009 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4010 if (p)
4011 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4012 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4013 } else {
4014 unsigned long addr1;
4015 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4016 /* RAM case */
4017 ptr = qemu_get_ram_ptr(addr1);
4018 stl_p(ptr, val);
4019 if (!cpu_physical_memory_is_dirty(addr1)) {
4020 /* invalidate code */
4021 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4022 /* set dirty bit */
4023 cpu_physical_memory_set_dirty_flags(addr1,
4024 (0xff & ~CODE_DIRTY_FLAG));
4029 /* XXX: optimize */
4030 void stb_phys(target_phys_addr_t addr, uint32_t val)
4032 uint8_t v = val;
4033 cpu_physical_memory_write(addr, &v, 1);
4036 /* warning: addr must be aligned */
4037 void stw_phys(target_phys_addr_t addr, uint32_t val)
4039 int io_index;
4040 uint8_t *ptr;
4041 unsigned long pd;
4042 PhysPageDesc *p;
4044 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4045 if (!p) {
4046 pd = IO_MEM_UNASSIGNED;
4047 } else {
4048 pd = p->phys_offset;
4051 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4052 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4053 if (p)
4054 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4055 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4056 } else {
4057 unsigned long addr1;
4058 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4059 /* RAM case */
4060 ptr = qemu_get_ram_ptr(addr1);
4061 stw_p(ptr, val);
4062 if (!cpu_physical_memory_is_dirty(addr1)) {
4063 /* invalidate code */
4064 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4065 /* set dirty bit */
4066 cpu_physical_memory_set_dirty_flags(addr1,
4067 (0xff & ~CODE_DIRTY_FLAG));
4072 /* XXX: optimize */
4073 void stq_phys(target_phys_addr_t addr, uint64_t val)
4075 val = tswap64(val);
4076 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4079 /* virtual memory access for debug (includes writing to ROM) */
4080 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4081 uint8_t *buf, int len, int is_write)
4083 int l;
4084 target_phys_addr_t phys_addr;
4085 target_ulong page;
4087 while (len > 0) {
4088 page = addr & TARGET_PAGE_MASK;
4089 phys_addr = cpu_get_phys_page_debug(env, page);
4090 /* if no physical page mapped, return an error */
4091 if (phys_addr == -1)
4092 return -1;
4093 l = (page + TARGET_PAGE_SIZE) - addr;
4094 if (l > len)
4095 l = len;
4096 phys_addr += (addr & ~TARGET_PAGE_MASK);
4097 if (is_write)
4098 cpu_physical_memory_write_rom(phys_addr, buf, l);
4099 else
4100 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4101 len -= l;
4102 buf += l;
4103 addr += l;
4105 return 0;
4107 #endif
4109 /* in deterministic execution mode, instructions doing device I/Os
4110 must be at the end of the TB */
4111 void cpu_io_recompile(CPUState *env, void *retaddr)
4113 TranslationBlock *tb;
4114 uint32_t n, cflags;
4115 target_ulong pc, cs_base;
4116 uint64_t flags;
4118 tb = tb_find_pc((unsigned long)retaddr);
4119 if (!tb) {
4120 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4121 retaddr);
4123 n = env->icount_decr.u16.low + tb->icount;
4124 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4125 /* Calculate how many instructions had been executed before the fault
4126 occurred. */
4127 n = n - env->icount_decr.u16.low;
4128 /* Generate a new TB ending on the I/O insn. */
4129 n++;
4130 /* On MIPS and SH, delay slot instructions can only be restarted if
4131 they were already the first instruction in the TB. If this is not
4132 the first instruction in a TB then re-execute the preceding
4133 branch. */
4134 #if defined(TARGET_MIPS)
4135 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4136 env->active_tc.PC -= 4;
4137 env->icount_decr.u16.low++;
4138 env->hflags &= ~MIPS_HFLAG_BMASK;
4140 #elif defined(TARGET_SH4)
4141 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4142 && n > 1) {
4143 env->pc -= 2;
4144 env->icount_decr.u16.low++;
4145 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4147 #endif
4148 /* This should never happen. */
4149 if (n > CF_COUNT_MASK)
4150 cpu_abort(env, "TB too big during recompile");
4152 cflags = n | CF_LAST_IO;
4153 pc = tb->pc;
4154 cs_base = tb->cs_base;
4155 flags = tb->flags;
4156 tb_phys_invalidate(tb, -1);
4157 /* FIXME: In theory this could raise an exception. In practice
4158 we have already translated the block once so it's probably ok. */
4159 tb_gen_code(env, pc, cs_base, flags, cflags);
4160 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4161 the first in the TB) then we end up generating a whole new TB and
4162 repeating the fault, which is horribly inefficient.
4163 Better would be to execute just this insn uncached, or generate a
4164 second new TB. */
4165 cpu_resume_from_signal(env, NULL);
4168 #if !defined(CONFIG_USER_ONLY)
4170 void dump_exec_info(FILE *f,
4171 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4173 int i, target_code_size, max_target_code_size;
4174 int direct_jmp_count, direct_jmp2_count, cross_page;
4175 TranslationBlock *tb;
4177 target_code_size = 0;
4178 max_target_code_size = 0;
4179 cross_page = 0;
4180 direct_jmp_count = 0;
4181 direct_jmp2_count = 0;
4182 for(i = 0; i < nb_tbs; i++) {
4183 tb = &tbs[i];
4184 target_code_size += tb->size;
4185 if (tb->size > max_target_code_size)
4186 max_target_code_size = tb->size;
4187 if (tb->page_addr[1] != -1)
4188 cross_page++;
4189 if (tb->tb_next_offset[0] != 0xffff) {
4190 direct_jmp_count++;
4191 if (tb->tb_next_offset[1] != 0xffff) {
4192 direct_jmp2_count++;
4196 /* XXX: avoid using doubles ? */
4197 cpu_fprintf(f, "Translation buffer state:\n");
4198 cpu_fprintf(f, "gen code size %ld/%ld\n",
4199 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4200 cpu_fprintf(f, "TB count %d/%d\n",
4201 nb_tbs, code_gen_max_blocks);
4202 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4203 nb_tbs ? target_code_size / nb_tbs : 0,
4204 max_target_code_size);
4205 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4206 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4207 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4208 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4209 cross_page,
4210 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4211 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4212 direct_jmp_count,
4213 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4214 direct_jmp2_count,
4215 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4216 cpu_fprintf(f, "\nStatistics:\n");
4217 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4218 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4219 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4220 #ifdef CONFIG_PROFILER
4221 tcg_dump_info(f, cpu_fprintf);
4222 #endif
4225 #define MMUSUFFIX _cmmu
4226 #define GETPC() NULL
4227 #define env cpu_single_env
4228 #define SOFTMMU_CODE_ACCESS
4230 #define SHIFT 0
4231 #include "softmmu_template.h"
4233 #define SHIFT 1
4234 #include "softmmu_template.h"
4236 #define SHIFT 2
4237 #include "softmmu_template.h"
4239 #define SHIFT 3
4240 #include "softmmu_template.h"
4242 #undef env
4244 #endif