kvm: testsuite: fix warning in msr.c
[qemu-kvm/fedora.git] / exec.c
blob2cb71df9541a9364b890798cf4d5e8db06036e9f
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #include "qemu-common.h"
39 #if !defined(TARGET_IA64)
40 #include "tcg.h"
41 #endif
42 #include "qemu-kvm.h"
44 #include "hw/hw.h"
45 #include "osdep.h"
46 #include "kvm.h"
47 #if defined(CONFIG_USER_ONLY)
48 #include <qemu.h>
49 #endif
51 //#define DEBUG_TB_INVALIDATE
52 //#define DEBUG_FLUSH
53 //#define DEBUG_TLB
54 //#define DEBUG_UNASSIGNED
56 /* make various TB consistency checks */
57 //#define DEBUG_TB_CHECK
58 //#define DEBUG_TLB_CHECK
60 //#define DEBUG_IOPORT
61 //#define DEBUG_SUBPAGE
63 #if !defined(CONFIG_USER_ONLY)
64 /* TB consistency checks only implemented for usermode emulation. */
65 #undef DEBUG_TB_CHECK
66 #endif
68 #define SMC_BITMAP_USE_THRESHOLD 10
70 #if defined(TARGET_SPARC64)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 41
72 #elif defined(TARGET_SPARC)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 36
74 #elif defined(TARGET_ALPHA)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #define TARGET_VIRT_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_PPC64)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 #elif defined(TARGET_IA64)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 36
85 #else
86 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
87 #define TARGET_PHYS_ADDR_SPACE_BITS 32
88 #endif
90 static TranslationBlock *tbs;
91 int code_gen_max_blocks;
92 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
93 static int nb_tbs;
94 /* any access to the tbs or the page table must use this lock */
95 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
97 #if defined(__arm__) || defined(__sparc_v9__)
98 /* The prologue must be reachable with a direct jump. ARM and Sparc64
99 have limited branch ranges (possibly also PPC) so place it in a
100 section close to code segment. */
101 #define code_gen_section \
102 __attribute__((__section__(".gen_code"))) \
103 __attribute__((aligned (32)))
104 #else
105 #define code_gen_section \
106 __attribute__((aligned (32)))
107 #endif
109 uint8_t code_gen_prologue[1024] code_gen_section;
110 static uint8_t *code_gen_buffer;
111 static unsigned long code_gen_buffer_size;
112 /* threshold to flush the translated code buffer */
113 static unsigned long code_gen_buffer_max_size;
114 uint8_t *code_gen_ptr;
116 #if !defined(CONFIG_USER_ONLY)
117 ram_addr_t phys_ram_size;
118 int phys_ram_fd;
119 uint8_t *phys_ram_base;
120 uint8_t *phys_ram_dirty;
121 uint8_t *bios_mem;
122 static int in_migration;
123 static ram_addr_t phys_ram_alloc_offset = 0;
124 #endif
126 CPUState *first_cpu;
127 /* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
129 CPUState *cpu_single_env;
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
133 int use_icount = 0;
134 /* Current instruction counter. While executing translated code this may
135 include some instructions that have not yet been executed. */
136 int64_t qemu_icount;
138 typedef struct PageDesc {
139 /* list of TBs intersecting this ram page */
140 TranslationBlock *first_tb;
141 /* in order to optimize self modifying code, we count the number
142 of lookups we do to a given page to use a bitmap */
143 unsigned int code_write_count;
144 uint8_t *code_bitmap;
145 #if defined(CONFIG_USER_ONLY)
146 unsigned long flags;
147 #endif
148 } PageDesc;
150 typedef struct PhysPageDesc {
151 /* offset in host memory of the page + io_index in the low bits */
152 ram_addr_t phys_offset;
153 ram_addr_t region_offset;
154 } PhysPageDesc;
156 #define L2_BITS 10
157 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
158 /* XXX: this is a temporary hack for alpha target.
159 * In the future, this is to be replaced by a multi-level table
160 * to actually be able to handle the complete 64 bits address space.
162 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
163 #else
164 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
165 #endif
167 #define L1_SIZE (1 << L1_BITS)
168 #define L2_SIZE (1 << L2_BITS)
170 unsigned long qemu_real_host_page_size;
171 unsigned long qemu_host_page_bits;
172 unsigned long qemu_host_page_size;
173 unsigned long qemu_host_page_mask;
175 /* XXX: for system emulation, it could just be an array */
176 static PageDesc *l1_map[L1_SIZE];
177 static PhysPageDesc **l1_phys_map;
179 #if !defined(CONFIG_USER_ONLY)
180 static void io_mem_init(void);
182 /* io memory support */
183 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
184 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
185 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
186 static char io_mem_used[IO_MEM_NB_ENTRIES];
187 static int io_mem_watch;
188 #endif
190 /* log support */
191 static const char *logfilename = "/tmp/qemu.log";
192 FILE *logfile;
193 int loglevel;
194 static int log_append = 0;
196 /* statistics */
197 static int tlb_flush_count;
198 static int tb_flush_count;
199 static int tb_phys_invalidate_count;
201 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
202 typedef struct subpage_t {
203 target_phys_addr_t base;
204 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
205 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
206 void *opaque[TARGET_PAGE_SIZE][2][4];
207 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
208 } subpage_t;
210 #ifdef _WIN32
211 static void map_exec(void *addr, long size)
213 DWORD old_protect;
214 VirtualProtect(addr, size,
215 PAGE_EXECUTE_READWRITE, &old_protect);
218 #else
219 static void map_exec(void *addr, long size)
221 unsigned long start, end, page_size;
223 page_size = getpagesize();
224 start = (unsigned long)addr;
225 start &= ~(page_size - 1);
227 end = (unsigned long)addr + size;
228 end += page_size - 1;
229 end &= ~(page_size - 1);
231 mprotect((void *)start, end - start,
232 PROT_READ | PROT_WRITE | PROT_EXEC);
234 #endif
236 static void page_init(void)
238 /* NOTE: we can always suppose that qemu_host_page_size >=
239 TARGET_PAGE_SIZE */
240 #ifdef _WIN32
242 SYSTEM_INFO system_info;
244 GetSystemInfo(&system_info);
245 qemu_real_host_page_size = system_info.dwPageSize;
247 #else
248 qemu_real_host_page_size = getpagesize();
249 #endif
250 if (qemu_host_page_size == 0)
251 qemu_host_page_size = qemu_real_host_page_size;
252 if (qemu_host_page_size < TARGET_PAGE_SIZE)
253 qemu_host_page_size = TARGET_PAGE_SIZE;
254 qemu_host_page_bits = 0;
255 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
256 qemu_host_page_bits++;
257 qemu_host_page_mask = ~(qemu_host_page_size - 1);
258 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
259 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
261 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
263 long long startaddr, endaddr;
264 FILE *f;
265 int n;
267 mmap_lock();
268 last_brk = (unsigned long)sbrk(0);
269 f = fopen("/proc/self/maps", "r");
270 if (f) {
271 do {
272 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
273 if (n == 2) {
274 startaddr = MIN(startaddr,
275 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
276 endaddr = MIN(endaddr,
277 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278 page_set_flags(startaddr & TARGET_PAGE_MASK,
279 TARGET_PAGE_ALIGN(endaddr),
280 PAGE_RESERVED);
282 } while (!feof(f));
283 fclose(f);
285 mmap_unlock();
287 #endif
290 static inline PageDesc **page_l1_map(target_ulong index)
292 #if TARGET_LONG_BITS > 32
293 /* Host memory outside guest VM. For 32-bit targets we have already
294 excluded high addresses. */
295 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
296 return NULL;
297 #endif
298 return &l1_map[index >> L2_BITS];
301 static inline PageDesc *page_find_alloc(target_ulong index)
303 PageDesc **lp, *p;
304 lp = page_l1_map(index);
305 if (!lp)
306 return NULL;
308 p = *lp;
309 if (!p) {
310 /* allocate if not found */
311 #if defined(CONFIG_USER_ONLY)
312 size_t len = sizeof(PageDesc) * L2_SIZE;
313 /* Don't use qemu_malloc because it may recurse. */
314 p = mmap(0, len, PROT_READ | PROT_WRITE,
315 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
316 *lp = p;
317 if (h2g_valid(p)) {
318 unsigned long addr = h2g(p);
319 page_set_flags(addr & TARGET_PAGE_MASK,
320 TARGET_PAGE_ALIGN(addr + len),
321 PAGE_RESERVED);
323 #else
324 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
325 *lp = p;
326 #endif
328 return p + (index & (L2_SIZE - 1));
331 static inline PageDesc *page_find(target_ulong index)
333 PageDesc **lp, *p;
334 lp = page_l1_map(index);
335 if (!lp)
336 return NULL;
338 p = *lp;
339 if (!p)
340 return 0;
341 return p + (index & (L2_SIZE - 1));
344 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
346 void **lp, **p;
347 PhysPageDesc *pd;
349 p = (void **)l1_phys_map;
350 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
352 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
353 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
354 #endif
355 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
356 p = *lp;
357 if (!p) {
358 /* allocate if not found */
359 if (!alloc)
360 return NULL;
361 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
362 memset(p, 0, sizeof(void *) * L1_SIZE);
363 *lp = p;
365 #endif
366 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
367 pd = *lp;
368 if (!pd) {
369 int i;
370 /* allocate if not found */
371 if (!alloc)
372 return NULL;
373 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
374 *lp = pd;
375 for (i = 0; i < L2_SIZE; i++) {
376 pd[i].phys_offset = IO_MEM_UNASSIGNED;
377 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
380 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
383 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
385 return phys_page_find_alloc(index, 0);
388 #if !defined(CONFIG_USER_ONLY)
389 static void tlb_protect_code(ram_addr_t ram_addr);
390 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
391 target_ulong vaddr);
392 #define mmap_lock() do { } while(0)
393 #define mmap_unlock() do { } while(0)
394 #endif
396 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
398 #if defined(CONFIG_USER_ONLY)
399 /* Currently it is not recommanded to allocate big chunks of data in
400 user mode. It will change when a dedicated libc will be used */
401 #define USE_STATIC_CODE_GEN_BUFFER
402 #endif
404 #ifdef USE_STATIC_CODE_GEN_BUFFER
405 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
406 #endif
408 static void code_gen_alloc(unsigned long tb_size)
410 if (kvm_enabled())
411 return;
413 #ifdef USE_STATIC_CODE_GEN_BUFFER
414 code_gen_buffer = static_code_gen_buffer;
415 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416 map_exec(code_gen_buffer, code_gen_buffer_size);
417 #else
418 code_gen_buffer_size = tb_size;
419 if (code_gen_buffer_size == 0) {
420 #if defined(CONFIG_USER_ONLY)
421 /* in user mode, phys_ram_size is not meaningful */
422 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
423 #else
424 /* XXX: needs ajustments */
425 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
426 #endif
428 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
429 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
430 /* The code gen buffer location may have constraints depending on
431 the host cpu and OS */
432 #if defined(__linux__)
434 int flags;
435 void *start = NULL;
437 flags = MAP_PRIVATE | MAP_ANONYMOUS;
438 #if defined(__x86_64__)
439 flags |= MAP_32BIT;
440 /* Cannot map more than that */
441 if (code_gen_buffer_size > (800 * 1024 * 1024))
442 code_gen_buffer_size = (800 * 1024 * 1024);
443 #elif defined(__sparc_v9__)
444 // Map the buffer below 2G, so we can use direct calls and branches
445 flags |= MAP_FIXED;
446 start = (void *) 0x60000000UL;
447 if (code_gen_buffer_size > (512 * 1024 * 1024))
448 code_gen_buffer_size = (512 * 1024 * 1024);
449 #elif defined(__arm__)
450 /* Map the buffer below 32M, so we can use direct calls and branches */
451 flags |= MAP_FIXED;
452 start = (void *) 0x01000000UL;
453 if (code_gen_buffer_size > 16 * 1024 * 1024)
454 code_gen_buffer_size = 16 * 1024 * 1024;
455 #endif
456 code_gen_buffer = mmap(start, code_gen_buffer_size,
457 PROT_WRITE | PROT_READ | PROT_EXEC,
458 flags, -1, 0);
459 if (code_gen_buffer == MAP_FAILED) {
460 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
461 exit(1);
464 #elif defined(__FreeBSD__) || defined(__DragonFly__)
466 int flags;
467 void *addr = NULL;
468 flags = MAP_PRIVATE | MAP_ANONYMOUS;
469 #if defined(__x86_64__)
470 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
471 * 0x40000000 is free */
472 flags |= MAP_FIXED;
473 addr = (void *)0x40000000;
474 /* Cannot map more than that */
475 if (code_gen_buffer_size > (800 * 1024 * 1024))
476 code_gen_buffer_size = (800 * 1024 * 1024);
477 #endif
478 code_gen_buffer = mmap(addr, code_gen_buffer_size,
479 PROT_WRITE | PROT_READ | PROT_EXEC,
480 flags, -1, 0);
481 if (code_gen_buffer == MAP_FAILED) {
482 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
483 exit(1);
486 #else
487 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
488 map_exec(code_gen_buffer, code_gen_buffer_size);
489 #endif
490 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
491 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
492 code_gen_buffer_max_size = code_gen_buffer_size -
493 code_gen_max_block_size();
494 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
495 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
498 /* Must be called before using the QEMU cpus. 'tb_size' is the size
499 (in bytes) allocated to the translation buffer. Zero means default
500 size. */
501 void cpu_exec_init_all(unsigned long tb_size)
503 cpu_gen_init();
504 code_gen_alloc(tb_size);
505 code_gen_ptr = code_gen_buffer;
506 page_init();
507 #if !defined(CONFIG_USER_ONLY)
508 io_mem_init();
509 #endif
512 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
514 #define CPU_COMMON_SAVE_VERSION 1
516 static void cpu_common_save(QEMUFile *f, void *opaque)
518 CPUState *env = opaque;
520 qemu_put_be32s(f, &env->halted);
521 qemu_put_be32s(f, &env->interrupt_request);
524 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
526 CPUState *env = opaque;
528 if (version_id != CPU_COMMON_SAVE_VERSION)
529 return -EINVAL;
531 qemu_get_be32s(f, &env->halted);
532 qemu_get_be32s(f, &env->interrupt_request);
533 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
534 version_id is increased. */
535 env->interrupt_request &= ~0x01;
536 tlb_flush(env, 1);
538 return 0;
540 #endif
542 void cpu_exec_init(CPUState *env)
544 CPUState **penv;
545 int cpu_index;
547 #if defined(CONFIG_USER_ONLY)
548 cpu_list_lock();
549 #endif
550 env->next_cpu = NULL;
551 penv = &first_cpu;
552 cpu_index = 0;
553 while (*penv != NULL) {
554 penv = (CPUState **)&(*penv)->next_cpu;
555 cpu_index++;
557 env->cpu_index = cpu_index;
558 TAILQ_INIT(&env->breakpoints);
559 TAILQ_INIT(&env->watchpoints);
560 #ifdef __WIN32
561 env->thread_id = GetCurrentProcessId();
562 #else
563 env->thread_id = getpid();
564 #endif
565 *penv = env;
566 #if defined(CONFIG_USER_ONLY)
567 cpu_list_unlock();
568 #endif
569 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
570 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
571 cpu_common_save, cpu_common_load, env);
572 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
573 cpu_save, cpu_load, env);
574 #endif
577 static inline void invalidate_page_bitmap(PageDesc *p)
579 if (p->code_bitmap) {
580 qemu_free(p->code_bitmap);
581 p->code_bitmap = NULL;
583 p->code_write_count = 0;
586 /* set to NULL all the 'first_tb' fields in all PageDescs */
587 static void page_flush_tb(void)
589 int i, j;
590 PageDesc *p;
592 for(i = 0; i < L1_SIZE; i++) {
593 p = l1_map[i];
594 if (p) {
595 for(j = 0; j < L2_SIZE; j++) {
596 p->first_tb = NULL;
597 invalidate_page_bitmap(p);
598 p++;
604 /* flush all the translation blocks */
605 /* XXX: tb_flush is currently not thread safe */
606 void tb_flush(CPUState *env1)
608 CPUState *env;
609 #if defined(DEBUG_FLUSH)
610 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
611 (unsigned long)(code_gen_ptr - code_gen_buffer),
612 nb_tbs, nb_tbs > 0 ?
613 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
614 #endif
615 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
616 cpu_abort(env1, "Internal error: code buffer overflow\n");
618 nb_tbs = 0;
620 for(env = first_cpu; env != NULL; env = env->next_cpu) {
621 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
624 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
625 page_flush_tb();
627 code_gen_ptr = code_gen_buffer;
628 /* XXX: flush processor icache at this point if cache flush is
629 expensive */
630 tb_flush_count++;
633 #ifdef DEBUG_TB_CHECK
635 static void tb_invalidate_check(target_ulong address)
637 TranslationBlock *tb;
638 int i;
639 address &= TARGET_PAGE_MASK;
640 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
641 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
642 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
643 address >= tb->pc + tb->size)) {
644 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
645 address, (long)tb->pc, tb->size);
651 /* verify that all the pages have correct rights for code */
652 static void tb_page_check(void)
654 TranslationBlock *tb;
655 int i, flags1, flags2;
657 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
658 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
659 flags1 = page_get_flags(tb->pc);
660 flags2 = page_get_flags(tb->pc + tb->size - 1);
661 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
662 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
663 (long)tb->pc, tb->size, flags1, flags2);
669 static void tb_jmp_check(TranslationBlock *tb)
671 TranslationBlock *tb1;
672 unsigned int n1;
674 /* suppress any remaining jumps to this TB */
675 tb1 = tb->jmp_first;
676 for(;;) {
677 n1 = (long)tb1 & 3;
678 tb1 = (TranslationBlock *)((long)tb1 & ~3);
679 if (n1 == 2)
680 break;
681 tb1 = tb1->jmp_next[n1];
683 /* check end of list */
684 if (tb1 != tb) {
685 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
689 #endif
691 /* invalidate one TB */
692 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
693 int next_offset)
695 TranslationBlock *tb1;
696 for(;;) {
697 tb1 = *ptb;
698 if (tb1 == tb) {
699 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
700 break;
702 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
706 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
708 TranslationBlock *tb1;
709 unsigned int n1;
711 for(;;) {
712 tb1 = *ptb;
713 n1 = (long)tb1 & 3;
714 tb1 = (TranslationBlock *)((long)tb1 & ~3);
715 if (tb1 == tb) {
716 *ptb = tb1->page_next[n1];
717 break;
719 ptb = &tb1->page_next[n1];
723 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
725 TranslationBlock *tb1, **ptb;
726 unsigned int n1;
728 ptb = &tb->jmp_next[n];
729 tb1 = *ptb;
730 if (tb1) {
731 /* find tb(n) in circular list */
732 for(;;) {
733 tb1 = *ptb;
734 n1 = (long)tb1 & 3;
735 tb1 = (TranslationBlock *)((long)tb1 & ~3);
736 if (n1 == n && tb1 == tb)
737 break;
738 if (n1 == 2) {
739 ptb = &tb1->jmp_first;
740 } else {
741 ptb = &tb1->jmp_next[n1];
744 /* now we can suppress tb(n) from the list */
745 *ptb = tb->jmp_next[n];
747 tb->jmp_next[n] = NULL;
751 /* reset the jump entry 'n' of a TB so that it is not chained to
752 another TB */
753 static inline void tb_reset_jump(TranslationBlock *tb, int n)
755 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
758 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
760 CPUState *env;
761 PageDesc *p;
762 unsigned int h, n1;
763 target_phys_addr_t phys_pc;
764 TranslationBlock *tb1, *tb2;
766 /* remove the TB from the hash list */
767 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
768 h = tb_phys_hash_func(phys_pc);
769 tb_remove(&tb_phys_hash[h], tb,
770 offsetof(TranslationBlock, phys_hash_next));
772 /* remove the TB from the page list */
773 if (tb->page_addr[0] != page_addr) {
774 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
775 tb_page_remove(&p->first_tb, tb);
776 invalidate_page_bitmap(p);
778 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
779 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
780 tb_page_remove(&p->first_tb, tb);
781 invalidate_page_bitmap(p);
784 tb_invalidated_flag = 1;
786 /* remove the TB from the hash list */
787 h = tb_jmp_cache_hash_func(tb->pc);
788 for(env = first_cpu; env != NULL; env = env->next_cpu) {
789 if (env->tb_jmp_cache[h] == tb)
790 env->tb_jmp_cache[h] = NULL;
793 /* suppress this TB from the two jump lists */
794 tb_jmp_remove(tb, 0);
795 tb_jmp_remove(tb, 1);
797 /* suppress any remaining jumps to this TB */
798 tb1 = tb->jmp_first;
799 for(;;) {
800 n1 = (long)tb1 & 3;
801 if (n1 == 2)
802 break;
803 tb1 = (TranslationBlock *)((long)tb1 & ~3);
804 tb2 = tb1->jmp_next[n1];
805 tb_reset_jump(tb1, n1);
806 tb1->jmp_next[n1] = NULL;
807 tb1 = tb2;
809 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
811 tb_phys_invalidate_count++;
814 static inline void set_bits(uint8_t *tab, int start, int len)
816 int end, mask, end1;
818 end = start + len;
819 tab += start >> 3;
820 mask = 0xff << (start & 7);
821 if ((start & ~7) == (end & ~7)) {
822 if (start < end) {
823 mask &= ~(0xff << (end & 7));
824 *tab |= mask;
826 } else {
827 *tab++ |= mask;
828 start = (start + 8) & ~7;
829 end1 = end & ~7;
830 while (start < end1) {
831 *tab++ = 0xff;
832 start += 8;
834 if (start < end) {
835 mask = ~(0xff << (end & 7));
836 *tab |= mask;
841 static void build_page_bitmap(PageDesc *p)
843 int n, tb_start, tb_end;
844 TranslationBlock *tb;
846 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
848 tb = p->first_tb;
849 while (tb != NULL) {
850 n = (long)tb & 3;
851 tb = (TranslationBlock *)((long)tb & ~3);
852 /* NOTE: this is subtle as a TB may span two physical pages */
853 if (n == 0) {
854 /* NOTE: tb_end may be after the end of the page, but
855 it is not a problem */
856 tb_start = tb->pc & ~TARGET_PAGE_MASK;
857 tb_end = tb_start + tb->size;
858 if (tb_end > TARGET_PAGE_SIZE)
859 tb_end = TARGET_PAGE_SIZE;
860 } else {
861 tb_start = 0;
862 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
864 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
865 tb = tb->page_next[n];
869 TranslationBlock *tb_gen_code(CPUState *env,
870 target_ulong pc, target_ulong cs_base,
871 int flags, int cflags)
873 TranslationBlock *tb;
874 uint8_t *tc_ptr;
875 target_ulong phys_pc, phys_page2, virt_page2;
876 int code_gen_size;
878 phys_pc = get_phys_addr_code(env, pc);
879 tb = tb_alloc(pc);
880 if (!tb) {
881 /* flush must be done */
882 tb_flush(env);
883 /* cannot fail at this point */
884 tb = tb_alloc(pc);
885 /* Don't forget to invalidate previous TB info. */
886 tb_invalidated_flag = 1;
888 tc_ptr = code_gen_ptr;
889 tb->tc_ptr = tc_ptr;
890 tb->cs_base = cs_base;
891 tb->flags = flags;
892 tb->cflags = cflags;
893 cpu_gen_code(env, tb, &code_gen_size);
894 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
896 /* check next page if needed */
897 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
898 phys_page2 = -1;
899 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
900 phys_page2 = get_phys_addr_code(env, virt_page2);
902 tb_link_phys(tb, phys_pc, phys_page2);
903 return tb;
906 /* invalidate all TBs which intersect with the target physical page
907 starting in range [start;end[. NOTE: start and end must refer to
908 the same physical page. 'is_cpu_write_access' should be true if called
909 from a real cpu write access: the virtual CPU will exit the current
910 TB if code is modified inside this TB. */
911 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
912 int is_cpu_write_access)
914 TranslationBlock *tb, *tb_next, *saved_tb;
915 CPUState *env = cpu_single_env;
916 target_ulong tb_start, tb_end;
917 PageDesc *p;
918 int n;
919 #ifdef TARGET_HAS_PRECISE_SMC
920 int current_tb_not_found = is_cpu_write_access;
921 TranslationBlock *current_tb = NULL;
922 int current_tb_modified = 0;
923 target_ulong current_pc = 0;
924 target_ulong current_cs_base = 0;
925 int current_flags = 0;
926 #endif /* TARGET_HAS_PRECISE_SMC */
928 p = page_find(start >> TARGET_PAGE_BITS);
929 if (!p)
930 return;
931 if (!p->code_bitmap &&
932 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
933 is_cpu_write_access) {
934 /* build code bitmap */
935 build_page_bitmap(p);
938 /* we remove all the TBs in the range [start, end[ */
939 /* XXX: see if in some cases it could be faster to invalidate all the code */
940 tb = p->first_tb;
941 while (tb != NULL) {
942 n = (long)tb & 3;
943 tb = (TranslationBlock *)((long)tb & ~3);
944 tb_next = tb->page_next[n];
945 /* NOTE: this is subtle as a TB may span two physical pages */
946 if (n == 0) {
947 /* NOTE: tb_end may be after the end of the page, but
948 it is not a problem */
949 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
950 tb_end = tb_start + tb->size;
951 } else {
952 tb_start = tb->page_addr[1];
953 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
955 if (!(tb_end <= start || tb_start >= end)) {
956 #ifdef TARGET_HAS_PRECISE_SMC
957 if (current_tb_not_found) {
958 current_tb_not_found = 0;
959 current_tb = NULL;
960 if (env->mem_io_pc) {
961 /* now we have a real cpu fault */
962 current_tb = tb_find_pc(env->mem_io_pc);
965 if (current_tb == tb &&
966 (current_tb->cflags & CF_COUNT_MASK) != 1) {
967 /* If we are modifying the current TB, we must stop
968 its execution. We could be more precise by checking
969 that the modification is after the current PC, but it
970 would require a specialized function to partially
971 restore the CPU state */
973 current_tb_modified = 1;
974 cpu_restore_state(current_tb, env,
975 env->mem_io_pc, NULL);
976 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
977 &current_flags);
979 #endif /* TARGET_HAS_PRECISE_SMC */
980 /* we need to do that to handle the case where a signal
981 occurs while doing tb_phys_invalidate() */
982 saved_tb = NULL;
983 if (env) {
984 saved_tb = env->current_tb;
985 env->current_tb = NULL;
987 tb_phys_invalidate(tb, -1);
988 if (env) {
989 env->current_tb = saved_tb;
990 if (env->interrupt_request && env->current_tb)
991 cpu_interrupt(env, env->interrupt_request);
994 tb = tb_next;
996 #if !defined(CONFIG_USER_ONLY)
997 /* if no code remaining, no need to continue to use slow writes */
998 if (!p->first_tb) {
999 invalidate_page_bitmap(p);
1000 if (is_cpu_write_access) {
1001 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1004 #endif
1005 #ifdef TARGET_HAS_PRECISE_SMC
1006 if (current_tb_modified) {
1007 /* we generate a block containing just the instruction
1008 modifying the memory. It will ensure that it cannot modify
1009 itself */
1010 env->current_tb = NULL;
1011 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1012 cpu_resume_from_signal(env, NULL);
1014 #endif
1017 /* len must be <= 8 and start must be a multiple of len */
1018 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1020 PageDesc *p;
1021 int offset, b;
1022 #if 0
1023 if (1) {
1024 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1025 cpu_single_env->mem_io_vaddr, len,
1026 cpu_single_env->eip,
1027 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1029 #endif
1030 p = page_find(start >> TARGET_PAGE_BITS);
1031 if (!p)
1032 return;
1033 if (p->code_bitmap) {
1034 offset = start & ~TARGET_PAGE_MASK;
1035 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1036 if (b & ((1 << len) - 1))
1037 goto do_invalidate;
1038 } else {
1039 do_invalidate:
1040 tb_invalidate_phys_page_range(start, start + len, 1);
1044 #if !defined(CONFIG_SOFTMMU)
1045 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1046 unsigned long pc, void *puc)
1048 TranslationBlock *tb;
1049 PageDesc *p;
1050 int n;
1051 #ifdef TARGET_HAS_PRECISE_SMC
1052 TranslationBlock *current_tb = NULL;
1053 CPUState *env = cpu_single_env;
1054 int current_tb_modified = 0;
1055 target_ulong current_pc = 0;
1056 target_ulong current_cs_base = 0;
1057 int current_flags = 0;
1058 #endif
1060 addr &= TARGET_PAGE_MASK;
1061 p = page_find(addr >> TARGET_PAGE_BITS);
1062 if (!p)
1063 return;
1064 tb = p->first_tb;
1065 #ifdef TARGET_HAS_PRECISE_SMC
1066 if (tb && pc != 0) {
1067 current_tb = tb_find_pc(pc);
1069 #endif
1070 while (tb != NULL) {
1071 n = (long)tb & 3;
1072 tb = (TranslationBlock *)((long)tb & ~3);
1073 #ifdef TARGET_HAS_PRECISE_SMC
1074 if (current_tb == tb &&
1075 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1076 /* If we are modifying the current TB, we must stop
1077 its execution. We could be more precise by checking
1078 that the modification is after the current PC, but it
1079 would require a specialized function to partially
1080 restore the CPU state */
1082 current_tb_modified = 1;
1083 cpu_restore_state(current_tb, env, pc, puc);
1084 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1085 &current_flags);
1087 #endif /* TARGET_HAS_PRECISE_SMC */
1088 tb_phys_invalidate(tb, addr);
1089 tb = tb->page_next[n];
1091 p->first_tb = NULL;
1092 #ifdef TARGET_HAS_PRECISE_SMC
1093 if (current_tb_modified) {
1094 /* we generate a block containing just the instruction
1095 modifying the memory. It will ensure that it cannot modify
1096 itself */
1097 env->current_tb = NULL;
1098 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1099 cpu_resume_from_signal(env, puc);
1101 #endif
1103 #endif
1105 /* add the tb in the target page and protect it if necessary */
1106 static inline void tb_alloc_page(TranslationBlock *tb,
1107 unsigned int n, target_ulong page_addr)
1109 PageDesc *p;
1110 TranslationBlock *last_first_tb;
1112 tb->page_addr[n] = page_addr;
1113 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1114 tb->page_next[n] = p->first_tb;
1115 last_first_tb = p->first_tb;
1116 p->first_tb = (TranslationBlock *)((long)tb | n);
1117 invalidate_page_bitmap(p);
1119 #if defined(TARGET_HAS_SMC) || 1
1121 #if defined(CONFIG_USER_ONLY)
1122 if (p->flags & PAGE_WRITE) {
1123 target_ulong addr;
1124 PageDesc *p2;
1125 int prot;
1127 /* force the host page as non writable (writes will have a
1128 page fault + mprotect overhead) */
1129 page_addr &= qemu_host_page_mask;
1130 prot = 0;
1131 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1132 addr += TARGET_PAGE_SIZE) {
1134 p2 = page_find (addr >> TARGET_PAGE_BITS);
1135 if (!p2)
1136 continue;
1137 prot |= p2->flags;
1138 p2->flags &= ~PAGE_WRITE;
1139 page_get_flags(addr);
1141 mprotect(g2h(page_addr), qemu_host_page_size,
1142 (prot & PAGE_BITS) & ~PAGE_WRITE);
1143 #ifdef DEBUG_TB_INVALIDATE
1144 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1145 page_addr);
1146 #endif
1148 #else
1149 /* if some code is already present, then the pages are already
1150 protected. So we handle the case where only the first TB is
1151 allocated in a physical page */
1152 if (!last_first_tb) {
1153 tlb_protect_code(page_addr);
1155 #endif
1157 #endif /* TARGET_HAS_SMC */
1160 /* Allocate a new translation block. Flush the translation buffer if
1161 too many translation blocks or too much generated code. */
1162 TranslationBlock *tb_alloc(target_ulong pc)
1164 TranslationBlock *tb;
1166 if (nb_tbs >= code_gen_max_blocks ||
1167 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1168 return NULL;
1169 tb = &tbs[nb_tbs++];
1170 tb->pc = pc;
1171 tb->cflags = 0;
1172 return tb;
1175 void tb_free(TranslationBlock *tb)
1177 /* In practice this is mostly used for single use temporary TB
1178 Ignore the hard cases and just back up if this TB happens to
1179 be the last one generated. */
1180 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1181 code_gen_ptr = tb->tc_ptr;
1182 nb_tbs--;
1186 /* add a new TB and link it to the physical page tables. phys_page2 is
1187 (-1) to indicate that only one page contains the TB. */
1188 void tb_link_phys(TranslationBlock *tb,
1189 target_ulong phys_pc, target_ulong phys_page2)
1191 unsigned int h;
1192 TranslationBlock **ptb;
1194 /* Grab the mmap lock to stop another thread invalidating this TB
1195 before we are done. */
1196 mmap_lock();
1197 /* add in the physical hash table */
1198 h = tb_phys_hash_func(phys_pc);
1199 ptb = &tb_phys_hash[h];
1200 tb->phys_hash_next = *ptb;
1201 *ptb = tb;
1203 /* add in the page list */
1204 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1205 if (phys_page2 != -1)
1206 tb_alloc_page(tb, 1, phys_page2);
1207 else
1208 tb->page_addr[1] = -1;
1210 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1211 tb->jmp_next[0] = NULL;
1212 tb->jmp_next[1] = NULL;
1214 /* init original jump addresses */
1215 if (tb->tb_next_offset[0] != 0xffff)
1216 tb_reset_jump(tb, 0);
1217 if (tb->tb_next_offset[1] != 0xffff)
1218 tb_reset_jump(tb, 1);
1220 #ifdef DEBUG_TB_CHECK
1221 tb_page_check();
1222 #endif
1223 mmap_unlock();
1226 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1227 tb[1].tc_ptr. Return NULL if not found */
1228 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1230 int m_min, m_max, m;
1231 unsigned long v;
1232 TranslationBlock *tb;
1234 if (nb_tbs <= 0)
1235 return NULL;
1236 if (tc_ptr < (unsigned long)code_gen_buffer ||
1237 tc_ptr >= (unsigned long)code_gen_ptr)
1238 return NULL;
1239 /* binary search (cf Knuth) */
1240 m_min = 0;
1241 m_max = nb_tbs - 1;
1242 while (m_min <= m_max) {
1243 m = (m_min + m_max) >> 1;
1244 tb = &tbs[m];
1245 v = (unsigned long)tb->tc_ptr;
1246 if (v == tc_ptr)
1247 return tb;
1248 else if (tc_ptr < v) {
1249 m_max = m - 1;
1250 } else {
1251 m_min = m + 1;
1254 return &tbs[m_max];
1257 static void tb_reset_jump_recursive(TranslationBlock *tb);
1259 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1261 TranslationBlock *tb1, *tb_next, **ptb;
1262 unsigned int n1;
1264 tb1 = tb->jmp_next[n];
1265 if (tb1 != NULL) {
1266 /* find head of list */
1267 for(;;) {
1268 n1 = (long)tb1 & 3;
1269 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1270 if (n1 == 2)
1271 break;
1272 tb1 = tb1->jmp_next[n1];
1274 /* we are now sure now that tb jumps to tb1 */
1275 tb_next = tb1;
1277 /* remove tb from the jmp_first list */
1278 ptb = &tb_next->jmp_first;
1279 for(;;) {
1280 tb1 = *ptb;
1281 n1 = (long)tb1 & 3;
1282 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1283 if (n1 == n && tb1 == tb)
1284 break;
1285 ptb = &tb1->jmp_next[n1];
1287 *ptb = tb->jmp_next[n];
1288 tb->jmp_next[n] = NULL;
1290 /* suppress the jump to next tb in generated code */
1291 tb_reset_jump(tb, n);
1293 /* suppress jumps in the tb on which we could have jumped */
1294 tb_reset_jump_recursive(tb_next);
1298 static void tb_reset_jump_recursive(TranslationBlock *tb)
1300 tb_reset_jump_recursive2(tb, 0);
1301 tb_reset_jump_recursive2(tb, 1);
1304 #if defined(TARGET_HAS_ICE)
1305 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1307 target_phys_addr_t addr;
1308 target_ulong pd;
1309 ram_addr_t ram_addr;
1310 PhysPageDesc *p;
1312 addr = cpu_get_phys_page_debug(env, pc);
1313 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1314 if (!p) {
1315 pd = IO_MEM_UNASSIGNED;
1316 } else {
1317 pd = p->phys_offset;
1319 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1320 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1322 #endif
1324 /* Add a watchpoint. */
1325 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1326 int flags, CPUWatchpoint **watchpoint)
1328 target_ulong len_mask = ~(len - 1);
1329 CPUWatchpoint *wp;
1331 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1332 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1333 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1334 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1335 return -EINVAL;
1337 wp = qemu_malloc(sizeof(*wp));
1339 wp->vaddr = addr;
1340 wp->len_mask = len_mask;
1341 wp->flags = flags;
1343 /* keep all GDB-injected watchpoints in front */
1344 if (flags & BP_GDB)
1345 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1346 else
1347 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1349 tlb_flush_page(env, addr);
1351 if (watchpoint)
1352 *watchpoint = wp;
1353 return 0;
1356 /* Remove a specific watchpoint. */
1357 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1358 int flags)
1360 target_ulong len_mask = ~(len - 1);
1361 CPUWatchpoint *wp;
1363 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1364 if (addr == wp->vaddr && len_mask == wp->len_mask
1365 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1366 cpu_watchpoint_remove_by_ref(env, wp);
1367 return 0;
1370 return -ENOENT;
1373 /* Remove a specific watchpoint by reference. */
1374 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1376 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1378 tlb_flush_page(env, watchpoint->vaddr);
1380 qemu_free(watchpoint);
1383 /* Remove all matching watchpoints. */
1384 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1386 CPUWatchpoint *wp, *next;
1388 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1389 if (wp->flags & mask)
1390 cpu_watchpoint_remove_by_ref(env, wp);
1394 /* Add a breakpoint. */
1395 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1396 CPUBreakpoint **breakpoint)
1398 #if defined(TARGET_HAS_ICE)
1399 CPUBreakpoint *bp;
1401 bp = qemu_malloc(sizeof(*bp));
1403 bp->pc = pc;
1404 bp->flags = flags;
1406 /* keep all GDB-injected breakpoints in front */
1407 if (flags & BP_GDB)
1408 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1409 else
1410 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1412 breakpoint_invalidate(env, pc);
1414 if (breakpoint)
1415 *breakpoint = bp;
1416 return 0;
1417 #else
1418 return -ENOSYS;
1419 #endif
1422 /* Remove a specific breakpoint. */
1423 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1425 #if defined(TARGET_HAS_ICE)
1426 CPUBreakpoint *bp;
1428 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1429 if (bp->pc == pc && bp->flags == flags) {
1430 cpu_breakpoint_remove_by_ref(env, bp);
1431 return 0;
1434 return -ENOENT;
1435 #else
1436 return -ENOSYS;
1437 #endif
1440 /* Remove a specific breakpoint by reference. */
1441 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1443 #if defined(TARGET_HAS_ICE)
1444 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1446 breakpoint_invalidate(env, breakpoint->pc);
1448 qemu_free(breakpoint);
1449 #endif
1452 /* Remove all matching breakpoints. */
1453 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1455 #if defined(TARGET_HAS_ICE)
1456 CPUBreakpoint *bp, *next;
1458 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1459 if (bp->flags & mask)
1460 cpu_breakpoint_remove_by_ref(env, bp);
1462 #endif
1465 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1466 CPU loop after each instruction */
1467 void cpu_single_step(CPUState *env, int enabled)
1469 #if defined(TARGET_HAS_ICE)
1470 if (env->singlestep_enabled != enabled) {
1471 env->singlestep_enabled = enabled;
1472 if (kvm_enabled())
1473 kvm_update_guest_debug(env, 0);
1474 else {
1475 /* must flush all the translated code to avoid inconsistancies */
1476 /* XXX: only flush what is necessary */
1477 tb_flush(env);
1480 #endif
1483 /* enable or disable low levels log */
1484 void cpu_set_log(int log_flags)
1486 loglevel = log_flags;
1487 if (loglevel && !logfile) {
1488 logfile = fopen(logfilename, log_append ? "a" : "w");
1489 if (!logfile) {
1490 perror(logfilename);
1491 _exit(1);
1493 #if !defined(CONFIG_SOFTMMU)
1494 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1496 static char logfile_buf[4096];
1497 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1499 #else
1500 setvbuf(logfile, NULL, _IOLBF, 0);
1501 #endif
1502 log_append = 1;
1504 if (!loglevel && logfile) {
1505 fclose(logfile);
1506 logfile = NULL;
1510 void cpu_set_log_filename(const char *filename)
1512 logfilename = strdup(filename);
1513 if (logfile) {
1514 fclose(logfile);
1515 logfile = NULL;
1517 cpu_set_log(loglevel);
1520 static void cpu_unlink_tb(CPUState *env)
1522 #if defined(USE_NPTL)
1523 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1524 problem and hope the cpu will stop of its own accord. For userspace
1525 emulation this often isn't actually as bad as it sounds. Often
1526 signals are used primarily to interrupt blocking syscalls. */
1527 #else
1528 TranslationBlock *tb;
1529 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1531 tb = env->current_tb;
1532 /* if the cpu is currently executing code, we must unlink it and
1533 all the potentially executing TB */
1534 if (tb && !testandset(&interrupt_lock)) {
1535 env->current_tb = NULL;
1536 tb_reset_jump_recursive(tb);
1537 resetlock(&interrupt_lock);
1539 #endif
1542 /* mask must never be zero, except for A20 change call */
1543 void cpu_interrupt(CPUState *env, int mask)
1545 int old_mask;
1547 old_mask = env->interrupt_request;
1548 env->interrupt_request |= mask;
1549 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1550 kvm_update_interrupt_request(env);
1552 if (use_icount) {
1553 env->icount_decr.u16.high = 0xffff;
1554 #ifndef CONFIG_USER_ONLY
1555 if (!can_do_io(env)
1556 && (mask & ~old_mask) != 0) {
1557 cpu_abort(env, "Raised interrupt while not in I/O function");
1559 #endif
1560 } else {
1561 cpu_unlink_tb(env);
1565 void cpu_reset_interrupt(CPUState *env, int mask)
1567 env->interrupt_request &= ~mask;
1570 void cpu_exit(CPUState *env)
1572 env->exit_request = 1;
1573 cpu_unlink_tb(env);
1576 const CPULogItem cpu_log_items[] = {
1577 { CPU_LOG_TB_OUT_ASM, "out_asm",
1578 "show generated host assembly code for each compiled TB" },
1579 { CPU_LOG_TB_IN_ASM, "in_asm",
1580 "show target assembly code for each compiled TB" },
1581 { CPU_LOG_TB_OP, "op",
1582 "show micro ops for each compiled TB" },
1583 { CPU_LOG_TB_OP_OPT, "op_opt",
1584 "show micro ops "
1585 #ifdef TARGET_I386
1586 "before eflags optimization and "
1587 #endif
1588 "after liveness analysis" },
1589 { CPU_LOG_INT, "int",
1590 "show interrupts/exceptions in short format" },
1591 { CPU_LOG_EXEC, "exec",
1592 "show trace before each executed TB (lots of logs)" },
1593 { CPU_LOG_TB_CPU, "cpu",
1594 "show CPU state before block translation" },
1595 #ifdef TARGET_I386
1596 { CPU_LOG_PCALL, "pcall",
1597 "show protected mode far calls/returns/exceptions" },
1598 { CPU_LOG_RESET, "cpu_reset",
1599 "show CPU state before CPU resets" },
1600 #endif
1601 #ifdef DEBUG_IOPORT
1602 { CPU_LOG_IOPORT, "ioport",
1603 "show all i/o ports accesses" },
1604 #endif
1605 { 0, NULL, NULL },
1608 static int cmp1(const char *s1, int n, const char *s2)
1610 if (strlen(s2) != n)
1611 return 0;
1612 return memcmp(s1, s2, n) == 0;
1615 /* takes a comma separated list of log masks. Return 0 if error. */
1616 int cpu_str_to_log_mask(const char *str)
1618 const CPULogItem *item;
1619 int mask;
1620 const char *p, *p1;
1622 p = str;
1623 mask = 0;
1624 for(;;) {
1625 p1 = strchr(p, ',');
1626 if (!p1)
1627 p1 = p + strlen(p);
1628 if(cmp1(p,p1-p,"all")) {
1629 for(item = cpu_log_items; item->mask != 0; item++) {
1630 mask |= item->mask;
1632 } else {
1633 for(item = cpu_log_items; item->mask != 0; item++) {
1634 if (cmp1(p, p1 - p, item->name))
1635 goto found;
1637 return 0;
1639 found:
1640 mask |= item->mask;
1641 if (*p1 != ',')
1642 break;
1643 p = p1 + 1;
1645 return mask;
1648 void cpu_abort(CPUState *env, const char *fmt, ...)
1650 va_list ap;
1651 va_list ap2;
1653 va_start(ap, fmt);
1654 va_copy(ap2, ap);
1655 fprintf(stderr, "qemu: fatal: ");
1656 vfprintf(stderr, fmt, ap);
1657 fprintf(stderr, "\n");
1658 #ifdef TARGET_I386
1659 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1660 #else
1661 cpu_dump_state(env, stderr, fprintf, 0);
1662 #endif
1663 if (qemu_log_enabled()) {
1664 qemu_log("qemu: fatal: ");
1665 qemu_log_vprintf(fmt, ap2);
1666 qemu_log("\n");
1667 #ifdef TARGET_I386
1668 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1669 #else
1670 log_cpu_state(env, 0);
1671 #endif
1672 qemu_log_flush();
1673 qemu_log_close();
1675 va_end(ap2);
1676 va_end(ap);
1677 abort();
1680 CPUState *cpu_copy(CPUState *env)
1682 CPUState *new_env = cpu_init(env->cpu_model_str);
1683 CPUState *next_cpu = new_env->next_cpu;
1684 int cpu_index = new_env->cpu_index;
1685 #if defined(TARGET_HAS_ICE)
1686 CPUBreakpoint *bp;
1687 CPUWatchpoint *wp;
1688 #endif
1690 memcpy(new_env, env, sizeof(CPUState));
1692 /* Preserve chaining and index. */
1693 new_env->next_cpu = next_cpu;
1694 new_env->cpu_index = cpu_index;
1696 /* Clone all break/watchpoints.
1697 Note: Once we support ptrace with hw-debug register access, make sure
1698 BP_CPU break/watchpoints are handled correctly on clone. */
1699 TAILQ_INIT(&env->breakpoints);
1700 TAILQ_INIT(&env->watchpoints);
1701 #if defined(TARGET_HAS_ICE)
1702 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1703 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1705 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1706 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1707 wp->flags, NULL);
1709 #endif
1711 return new_env;
1714 #if !defined(CONFIG_USER_ONLY)
1716 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1718 unsigned int i;
1720 /* Discard jump cache entries for any tb which might potentially
1721 overlap the flushed page. */
1722 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1723 memset (&env->tb_jmp_cache[i], 0,
1724 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1726 i = tb_jmp_cache_hash_page(addr);
1727 memset (&env->tb_jmp_cache[i], 0,
1728 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1731 /* NOTE: if flush_global is true, also flush global entries (not
1732 implemented yet) */
1733 void tlb_flush(CPUState *env, int flush_global)
1735 int i;
1737 #if defined(DEBUG_TLB)
1738 printf("tlb_flush:\n");
1739 #endif
1740 /* must reset current TB so that interrupts cannot modify the
1741 links while we are modifying them */
1742 env->current_tb = NULL;
1744 for(i = 0; i < CPU_TLB_SIZE; i++) {
1745 env->tlb_table[0][i].addr_read = -1;
1746 env->tlb_table[0][i].addr_write = -1;
1747 env->tlb_table[0][i].addr_code = -1;
1748 env->tlb_table[1][i].addr_read = -1;
1749 env->tlb_table[1][i].addr_write = -1;
1750 env->tlb_table[1][i].addr_code = -1;
1751 #if (NB_MMU_MODES >= 3)
1752 env->tlb_table[2][i].addr_read = -1;
1753 env->tlb_table[2][i].addr_write = -1;
1754 env->tlb_table[2][i].addr_code = -1;
1755 #if (NB_MMU_MODES == 4)
1756 env->tlb_table[3][i].addr_read = -1;
1757 env->tlb_table[3][i].addr_write = -1;
1758 env->tlb_table[3][i].addr_code = -1;
1759 #endif
1760 #endif
1763 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1765 #ifdef USE_KQEMU
1766 if (env->kqemu_enabled) {
1767 kqemu_flush(env, flush_global);
1769 #endif
1770 tlb_flush_count++;
1773 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1775 if (addr == (tlb_entry->addr_read &
1776 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1777 addr == (tlb_entry->addr_write &
1778 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1779 addr == (tlb_entry->addr_code &
1780 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1781 tlb_entry->addr_read = -1;
1782 tlb_entry->addr_write = -1;
1783 tlb_entry->addr_code = -1;
1787 void tlb_flush_page(CPUState *env, target_ulong addr)
1789 int i;
1791 #if defined(DEBUG_TLB)
1792 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1793 #endif
1794 /* must reset current TB so that interrupts cannot modify the
1795 links while we are modifying them */
1796 env->current_tb = NULL;
1798 addr &= TARGET_PAGE_MASK;
1799 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1800 tlb_flush_entry(&env->tlb_table[0][i], addr);
1801 tlb_flush_entry(&env->tlb_table[1][i], addr);
1802 #if (NB_MMU_MODES >= 3)
1803 tlb_flush_entry(&env->tlb_table[2][i], addr);
1804 #if (NB_MMU_MODES == 4)
1805 tlb_flush_entry(&env->tlb_table[3][i], addr);
1806 #endif
1807 #endif
1809 tlb_flush_jmp_cache(env, addr);
1811 #ifdef USE_KQEMU
1812 if (env->kqemu_enabled) {
1813 kqemu_flush_page(env, addr);
1815 #endif
1818 /* update the TLBs so that writes to code in the virtual page 'addr'
1819 can be detected */
1820 static void tlb_protect_code(ram_addr_t ram_addr)
1822 cpu_physical_memory_reset_dirty(ram_addr,
1823 ram_addr + TARGET_PAGE_SIZE,
1824 CODE_DIRTY_FLAG);
1827 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1828 tested for self modifying code */
1829 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1830 target_ulong vaddr)
1832 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1835 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1836 unsigned long start, unsigned long length)
1838 unsigned long addr;
1839 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1840 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1841 if ((addr - start) < length) {
1842 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1847 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1848 int dirty_flags)
1850 CPUState *env;
1851 unsigned long length, start1;
1852 int i, mask, len;
1853 uint8_t *p;
1855 start &= TARGET_PAGE_MASK;
1856 end = TARGET_PAGE_ALIGN(end);
1858 length = end - start;
1859 if (length == 0)
1860 return;
1861 len = length >> TARGET_PAGE_BITS;
1862 #ifdef USE_KQEMU
1863 /* XXX: should not depend on cpu context */
1864 env = first_cpu;
1865 if (env->kqemu_enabled) {
1866 ram_addr_t addr;
1867 addr = start;
1868 for(i = 0; i < len; i++) {
1869 kqemu_set_notdirty(env, addr);
1870 addr += TARGET_PAGE_SIZE;
1873 #endif
1874 mask = ~dirty_flags;
1875 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1876 for(i = 0; i < len; i++)
1877 p[i] &= mask;
1879 /* we modify the TLB cache so that the dirty bit will be set again
1880 when accessing the range */
1881 start1 = start + (unsigned long)phys_ram_base;
1882 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1883 for(i = 0; i < CPU_TLB_SIZE; i++)
1884 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1885 for(i = 0; i < CPU_TLB_SIZE; i++)
1886 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1887 #if (NB_MMU_MODES >= 3)
1888 for(i = 0; i < CPU_TLB_SIZE; i++)
1889 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1890 #if (NB_MMU_MODES == 4)
1891 for(i = 0; i < CPU_TLB_SIZE; i++)
1892 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1893 #endif
1894 #endif
1898 int cpu_physical_memory_set_dirty_tracking(int enable)
1900 int r=0;
1902 if (kvm_enabled())
1903 r = kvm_physical_memory_set_dirty_tracking(enable);
1904 in_migration = enable;
1905 return r;
1908 int cpu_physical_memory_get_dirty_tracking(void)
1910 return in_migration;
1913 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1915 if (kvm_enabled())
1916 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1919 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1921 ram_addr_t ram_addr;
1923 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1924 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1925 tlb_entry->addend - (unsigned long)phys_ram_base;
1926 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1927 tlb_entry->addr_write |= TLB_NOTDIRTY;
1932 /* update the TLB according to the current state of the dirty bits */
1933 void cpu_tlb_update_dirty(CPUState *env)
1935 int i;
1936 for(i = 0; i < CPU_TLB_SIZE; i++)
1937 tlb_update_dirty(&env->tlb_table[0][i]);
1938 for(i = 0; i < CPU_TLB_SIZE; i++)
1939 tlb_update_dirty(&env->tlb_table[1][i]);
1940 #if (NB_MMU_MODES >= 3)
1941 for(i = 0; i < CPU_TLB_SIZE; i++)
1942 tlb_update_dirty(&env->tlb_table[2][i]);
1943 #if (NB_MMU_MODES == 4)
1944 for(i = 0; i < CPU_TLB_SIZE; i++)
1945 tlb_update_dirty(&env->tlb_table[3][i]);
1946 #endif
1947 #endif
1950 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1952 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1953 tlb_entry->addr_write = vaddr;
1956 /* update the TLB corresponding to virtual page vaddr
1957 so that it is no longer dirty */
1958 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1960 int i;
1962 vaddr &= TARGET_PAGE_MASK;
1963 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1964 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1965 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1966 #if (NB_MMU_MODES >= 3)
1967 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1968 #if (NB_MMU_MODES == 4)
1969 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1970 #endif
1971 #endif
1974 /* add a new TLB entry. At most one entry for a given virtual address
1975 is permitted. Return 0 if OK or 2 if the page could not be mapped
1976 (can only happen in non SOFTMMU mode for I/O pages or pages
1977 conflicting with the host address space). */
1978 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1979 target_phys_addr_t paddr, int prot,
1980 int mmu_idx, int is_softmmu)
1982 PhysPageDesc *p;
1983 unsigned long pd;
1984 unsigned int index;
1985 target_ulong address;
1986 target_ulong code_address;
1987 target_phys_addr_t addend;
1988 int ret;
1989 CPUTLBEntry *te;
1990 CPUWatchpoint *wp;
1991 target_phys_addr_t iotlb;
1993 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1994 if (!p) {
1995 pd = IO_MEM_UNASSIGNED;
1996 } else {
1997 pd = p->phys_offset;
1999 #if defined(DEBUG_TLB)
2000 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2001 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2002 #endif
2004 ret = 0;
2005 address = vaddr;
2006 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2007 /* IO memory case (romd handled later) */
2008 address |= TLB_MMIO;
2010 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2011 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2012 /* Normal RAM. */
2013 iotlb = pd & TARGET_PAGE_MASK;
2014 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2015 iotlb |= IO_MEM_NOTDIRTY;
2016 else
2017 iotlb |= IO_MEM_ROM;
2018 } else {
2019 /* IO handlers are currently passed a phsical address.
2020 It would be nice to pass an offset from the base address
2021 of that region. This would avoid having to special case RAM,
2022 and avoid full address decoding in every device.
2023 We can't use the high bits of pd for this because
2024 IO_MEM_ROMD uses these as a ram address. */
2025 iotlb = (pd & ~TARGET_PAGE_MASK);
2026 if (p) {
2027 iotlb += p->region_offset;
2028 } else {
2029 iotlb += paddr;
2033 code_address = address;
2034 /* Make accesses to pages with watchpoints go via the
2035 watchpoint trap routines. */
2036 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2037 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2038 iotlb = io_mem_watch + paddr;
2039 /* TODO: The memory case can be optimized by not trapping
2040 reads of pages with a write breakpoint. */
2041 address |= TLB_MMIO;
2045 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2046 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2047 te = &env->tlb_table[mmu_idx][index];
2048 te->addend = addend - vaddr;
2049 if (prot & PAGE_READ) {
2050 te->addr_read = address;
2051 } else {
2052 te->addr_read = -1;
2055 if (prot & PAGE_EXEC) {
2056 te->addr_code = code_address;
2057 } else {
2058 te->addr_code = -1;
2060 if (prot & PAGE_WRITE) {
2061 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2062 (pd & IO_MEM_ROMD)) {
2063 /* Write access calls the I/O callback. */
2064 te->addr_write = address | TLB_MMIO;
2065 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2066 !cpu_physical_memory_is_dirty(pd)) {
2067 te->addr_write = address | TLB_NOTDIRTY;
2068 } else {
2069 te->addr_write = address;
2071 } else {
2072 te->addr_write = -1;
2074 return ret;
2077 #else
2079 void tlb_flush(CPUState *env, int flush_global)
2083 void tlb_flush_page(CPUState *env, target_ulong addr)
2087 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2088 target_phys_addr_t paddr, int prot,
2089 int mmu_idx, int is_softmmu)
2091 return 0;
2094 /* dump memory mappings */
2095 void page_dump(FILE *f)
2097 unsigned long start, end;
2098 int i, j, prot, prot1;
2099 PageDesc *p;
2101 fprintf(f, "%-8s %-8s %-8s %s\n",
2102 "start", "end", "size", "prot");
2103 start = -1;
2104 end = -1;
2105 prot = 0;
2106 for(i = 0; i <= L1_SIZE; i++) {
2107 if (i < L1_SIZE)
2108 p = l1_map[i];
2109 else
2110 p = NULL;
2111 for(j = 0;j < L2_SIZE; j++) {
2112 if (!p)
2113 prot1 = 0;
2114 else
2115 prot1 = p[j].flags;
2116 if (prot1 != prot) {
2117 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2118 if (start != -1) {
2119 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2120 start, end, end - start,
2121 prot & PAGE_READ ? 'r' : '-',
2122 prot & PAGE_WRITE ? 'w' : '-',
2123 prot & PAGE_EXEC ? 'x' : '-');
2125 if (prot1 != 0)
2126 start = end;
2127 else
2128 start = -1;
2129 prot = prot1;
2131 if (!p)
2132 break;
2137 int page_get_flags(target_ulong address)
2139 PageDesc *p;
2141 p = page_find(address >> TARGET_PAGE_BITS);
2142 if (!p)
2143 return 0;
2144 return p->flags;
2147 /* modify the flags of a page and invalidate the code if
2148 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2149 depending on PAGE_WRITE */
2150 void page_set_flags(target_ulong start, target_ulong end, int flags)
2152 PageDesc *p;
2153 target_ulong addr;
2155 /* mmap_lock should already be held. */
2156 start = start & TARGET_PAGE_MASK;
2157 end = TARGET_PAGE_ALIGN(end);
2158 if (flags & PAGE_WRITE)
2159 flags |= PAGE_WRITE_ORG;
2160 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2161 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2162 /* We may be called for host regions that are outside guest
2163 address space. */
2164 if (!p)
2165 return;
2166 /* if the write protection is set, then we invalidate the code
2167 inside */
2168 if (!(p->flags & PAGE_WRITE) &&
2169 (flags & PAGE_WRITE) &&
2170 p->first_tb) {
2171 tb_invalidate_phys_page(addr, 0, NULL);
2173 p->flags = flags;
2177 int page_check_range(target_ulong start, target_ulong len, int flags)
2179 PageDesc *p;
2180 target_ulong end;
2181 target_ulong addr;
2183 if (start + len < start)
2184 /* we've wrapped around */
2185 return -1;
2187 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2188 start = start & TARGET_PAGE_MASK;
2190 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2191 p = page_find(addr >> TARGET_PAGE_BITS);
2192 if( !p )
2193 return -1;
2194 if( !(p->flags & PAGE_VALID) )
2195 return -1;
2197 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2198 return -1;
2199 if (flags & PAGE_WRITE) {
2200 if (!(p->flags & PAGE_WRITE_ORG))
2201 return -1;
2202 /* unprotect the page if it was put read-only because it
2203 contains translated code */
2204 if (!(p->flags & PAGE_WRITE)) {
2205 if (!page_unprotect(addr, 0, NULL))
2206 return -1;
2208 return 0;
2211 return 0;
2214 /* called from signal handler: invalidate the code and unprotect the
2215 page. Return TRUE if the fault was succesfully handled. */
2216 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2218 unsigned int page_index, prot, pindex;
2219 PageDesc *p, *p1;
2220 target_ulong host_start, host_end, addr;
2222 /* Technically this isn't safe inside a signal handler. However we
2223 know this only ever happens in a synchronous SEGV handler, so in
2224 practice it seems to be ok. */
2225 mmap_lock();
2227 host_start = address & qemu_host_page_mask;
2228 page_index = host_start >> TARGET_PAGE_BITS;
2229 p1 = page_find(page_index);
2230 if (!p1) {
2231 mmap_unlock();
2232 return 0;
2234 host_end = host_start + qemu_host_page_size;
2235 p = p1;
2236 prot = 0;
2237 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2238 prot |= p->flags;
2239 p++;
2241 /* if the page was really writable, then we change its
2242 protection back to writable */
2243 if (prot & PAGE_WRITE_ORG) {
2244 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2245 if (!(p1[pindex].flags & PAGE_WRITE)) {
2246 mprotect((void *)g2h(host_start), qemu_host_page_size,
2247 (prot & PAGE_BITS) | PAGE_WRITE);
2248 p1[pindex].flags |= PAGE_WRITE;
2249 /* and since the content will be modified, we must invalidate
2250 the corresponding translated code. */
2251 tb_invalidate_phys_page(address, pc, puc);
2252 #ifdef DEBUG_TB_CHECK
2253 tb_invalidate_check(address);
2254 #endif
2255 mmap_unlock();
2256 return 1;
2259 mmap_unlock();
2260 return 0;
2263 static inline void tlb_set_dirty(CPUState *env,
2264 unsigned long addr, target_ulong vaddr)
2267 #endif /* defined(CONFIG_USER_ONLY) */
2269 #if !defined(CONFIG_USER_ONLY)
2271 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2272 ram_addr_t memory, ram_addr_t region_offset);
2273 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2274 ram_addr_t orig_memory, ram_addr_t region_offset);
2275 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2276 need_subpage) \
2277 do { \
2278 if (addr > start_addr) \
2279 start_addr2 = 0; \
2280 else { \
2281 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2282 if (start_addr2 > 0) \
2283 need_subpage = 1; \
2286 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2287 end_addr2 = TARGET_PAGE_SIZE - 1; \
2288 else { \
2289 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2290 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2291 need_subpage = 1; \
2293 } while (0)
2295 /* register physical memory. 'size' must be a multiple of the target
2296 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2297 io memory page. The address used when calling the IO function is
2298 the offset from the start of the region, plus region_offset. Both
2299 start_region and regon_offset are rounded down to a page boundary
2300 before calculating this offset. This should not be a problem unless
2301 the low bits of start_addr and region_offset differ. */
2302 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2303 ram_addr_t size,
2304 ram_addr_t phys_offset,
2305 ram_addr_t region_offset)
2307 target_phys_addr_t addr, end_addr;
2308 PhysPageDesc *p;
2309 CPUState *env;
2310 ram_addr_t orig_size = size;
2311 void *subpage;
2313 #ifdef USE_KQEMU
2314 /* XXX: should not depend on cpu context */
2315 env = first_cpu;
2316 if (env->kqemu_enabled) {
2317 kqemu_set_phys_mem(start_addr, size, phys_offset);
2319 #endif
2320 if (kvm_enabled())
2321 kvm_set_phys_mem(start_addr, size, phys_offset);
2323 if (phys_offset == IO_MEM_UNASSIGNED) {
2324 region_offset = start_addr;
2326 region_offset &= TARGET_PAGE_MASK;
2327 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2328 end_addr = start_addr + (target_phys_addr_t)size;
2329 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2330 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2331 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2332 ram_addr_t orig_memory = p->phys_offset;
2333 target_phys_addr_t start_addr2, end_addr2;
2334 int need_subpage = 0;
2336 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2337 need_subpage);
2338 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2339 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2340 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2341 &p->phys_offset, orig_memory,
2342 p->region_offset);
2343 } else {
2344 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2345 >> IO_MEM_SHIFT];
2347 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2348 region_offset);
2349 p->region_offset = 0;
2350 } else {
2351 p->phys_offset = phys_offset;
2352 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2353 (phys_offset & IO_MEM_ROMD))
2354 phys_offset += TARGET_PAGE_SIZE;
2356 } else {
2357 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2358 p->phys_offset = phys_offset;
2359 p->region_offset = region_offset;
2360 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2361 (phys_offset & IO_MEM_ROMD)) {
2362 phys_offset += TARGET_PAGE_SIZE;
2363 } else {
2364 target_phys_addr_t start_addr2, end_addr2;
2365 int need_subpage = 0;
2367 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2368 end_addr2, need_subpage);
2370 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2371 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2372 &p->phys_offset, IO_MEM_UNASSIGNED,
2373 addr & TARGET_PAGE_MASK);
2374 subpage_register(subpage, start_addr2, end_addr2,
2375 phys_offset, region_offset);
2376 p->region_offset = 0;
2380 region_offset += TARGET_PAGE_SIZE;
2383 /* since each CPU stores ram addresses in its TLB cache, we must
2384 reset the modified entries */
2385 /* XXX: slow ! */
2386 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2387 tlb_flush(env, 1);
2391 /* XXX: temporary until new memory mapping API */
2392 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2394 PhysPageDesc *p;
2396 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2397 if (!p)
2398 return IO_MEM_UNASSIGNED;
2399 return p->phys_offset;
2402 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2404 if (kvm_enabled())
2405 kvm_coalesce_mmio_region(addr, size);
2408 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2410 if (kvm_enabled())
2411 kvm_uncoalesce_mmio_region(addr, size);
2414 /* XXX: better than nothing */
2415 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2417 ram_addr_t addr;
2418 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2419 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2420 (uint64_t)size, (uint64_t)phys_ram_size);
2421 abort();
2423 addr = phys_ram_alloc_offset;
2424 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2425 return addr;
2428 void qemu_ram_free(ram_addr_t addr)
2432 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2434 #ifdef DEBUG_UNASSIGNED
2435 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2436 #endif
2437 #if defined(TARGET_SPARC)
2438 do_unassigned_access(addr, 0, 0, 0, 1);
2439 #endif
2440 return 0;
2443 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2445 #ifdef DEBUG_UNASSIGNED
2446 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2447 #endif
2448 #if defined(TARGET_SPARC)
2449 do_unassigned_access(addr, 0, 0, 0, 2);
2450 #endif
2451 return 0;
2454 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2456 #ifdef DEBUG_UNASSIGNED
2457 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2458 #endif
2459 #if defined(TARGET_SPARC)
2460 do_unassigned_access(addr, 0, 0, 0, 4);
2461 #endif
2462 return 0;
2465 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2467 #ifdef DEBUG_UNASSIGNED
2468 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2469 #endif
2470 #if defined(TARGET_SPARC)
2471 do_unassigned_access(addr, 1, 0, 0, 1);
2472 #endif
2475 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2477 #ifdef DEBUG_UNASSIGNED
2478 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2479 #endif
2480 #if defined(TARGET_SPARC)
2481 do_unassigned_access(addr, 1, 0, 0, 2);
2482 #endif
2485 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2487 #ifdef DEBUG_UNASSIGNED
2488 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2489 #endif
2490 #if defined(TARGET_SPARC)
2491 do_unassigned_access(addr, 1, 0, 0, 4);
2492 #endif
2495 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2496 unassigned_mem_readb,
2497 unassigned_mem_readw,
2498 unassigned_mem_readl,
2501 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2502 unassigned_mem_writeb,
2503 unassigned_mem_writew,
2504 unassigned_mem_writel,
2507 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2508 uint32_t val)
2510 int dirty_flags;
2511 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2512 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2513 #if !defined(CONFIG_USER_ONLY)
2514 tb_invalidate_phys_page_fast(ram_addr, 1);
2515 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2516 #endif
2518 stb_p(phys_ram_base + ram_addr, val);
2519 #ifdef USE_KQEMU
2520 if (cpu_single_env->kqemu_enabled &&
2521 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2522 kqemu_modify_page(cpu_single_env, ram_addr);
2523 #endif
2524 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2525 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2526 /* we remove the notdirty callback only if the code has been
2527 flushed */
2528 if (dirty_flags == 0xff)
2529 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2532 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2533 uint32_t val)
2535 int dirty_flags;
2536 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2537 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2538 #if !defined(CONFIG_USER_ONLY)
2539 tb_invalidate_phys_page_fast(ram_addr, 2);
2540 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2541 #endif
2543 stw_p(phys_ram_base + ram_addr, val);
2544 #ifdef USE_KQEMU
2545 if (cpu_single_env->kqemu_enabled &&
2546 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2547 kqemu_modify_page(cpu_single_env, ram_addr);
2548 #endif
2549 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2550 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2551 /* we remove the notdirty callback only if the code has been
2552 flushed */
2553 if (dirty_flags == 0xff)
2554 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2557 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2558 uint32_t val)
2560 int dirty_flags;
2561 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2562 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2563 #if !defined(CONFIG_USER_ONLY)
2564 tb_invalidate_phys_page_fast(ram_addr, 4);
2565 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2566 #endif
2568 stl_p(phys_ram_base + ram_addr, val);
2569 #ifdef USE_KQEMU
2570 if (cpu_single_env->kqemu_enabled &&
2571 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2572 kqemu_modify_page(cpu_single_env, ram_addr);
2573 #endif
2574 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2575 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2576 /* we remove the notdirty callback only if the code has been
2577 flushed */
2578 if (dirty_flags == 0xff)
2579 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2582 static CPUReadMemoryFunc *error_mem_read[3] = {
2583 NULL, /* never used */
2584 NULL, /* never used */
2585 NULL, /* never used */
2588 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2589 notdirty_mem_writeb,
2590 notdirty_mem_writew,
2591 notdirty_mem_writel,
2594 /* Generate a debug exception if a watchpoint has been hit. */
2595 static void check_watchpoint(int offset, int len_mask, int flags)
2597 CPUState *env = cpu_single_env;
2598 target_ulong pc, cs_base;
2599 TranslationBlock *tb;
2600 target_ulong vaddr;
2601 CPUWatchpoint *wp;
2602 int cpu_flags;
2604 if (env->watchpoint_hit) {
2605 /* We re-entered the check after replacing the TB. Now raise
2606 * the debug interrupt so that is will trigger after the
2607 * current instruction. */
2608 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2609 return;
2611 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2612 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2613 if ((vaddr == (wp->vaddr & len_mask) ||
2614 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2615 wp->flags |= BP_WATCHPOINT_HIT;
2616 if (!env->watchpoint_hit) {
2617 env->watchpoint_hit = wp;
2618 tb = tb_find_pc(env->mem_io_pc);
2619 if (!tb) {
2620 cpu_abort(env, "check_watchpoint: could not find TB for "
2621 "pc=%p", (void *)env->mem_io_pc);
2623 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2624 tb_phys_invalidate(tb, -1);
2625 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2626 env->exception_index = EXCP_DEBUG;
2627 } else {
2628 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2629 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2631 cpu_resume_from_signal(env, NULL);
2633 } else {
2634 wp->flags &= ~BP_WATCHPOINT_HIT;
2639 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2640 so these check for a hit then pass through to the normal out-of-line
2641 phys routines. */
2642 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2644 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2645 return ldub_phys(addr);
2648 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2650 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2651 return lduw_phys(addr);
2654 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2656 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2657 return ldl_phys(addr);
2660 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2661 uint32_t val)
2663 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2664 stb_phys(addr, val);
2667 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2668 uint32_t val)
2670 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2671 stw_phys(addr, val);
2674 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2675 uint32_t val)
2677 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2678 stl_phys(addr, val);
2681 static CPUReadMemoryFunc *watch_mem_read[3] = {
2682 watch_mem_readb,
2683 watch_mem_readw,
2684 watch_mem_readl,
2687 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2688 watch_mem_writeb,
2689 watch_mem_writew,
2690 watch_mem_writel,
2693 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2694 unsigned int len)
2696 uint32_t ret;
2697 unsigned int idx;
2699 idx = SUBPAGE_IDX(addr);
2700 #if defined(DEBUG_SUBPAGE)
2701 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2702 mmio, len, addr, idx);
2703 #endif
2704 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2705 addr + mmio->region_offset[idx][0][len]);
2707 return ret;
2710 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2711 uint32_t value, unsigned int len)
2713 unsigned int idx;
2715 idx = SUBPAGE_IDX(addr);
2716 #if defined(DEBUG_SUBPAGE)
2717 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2718 mmio, len, addr, idx, value);
2719 #endif
2720 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2721 addr + mmio->region_offset[idx][1][len],
2722 value);
2725 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2727 #if defined(DEBUG_SUBPAGE)
2728 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2729 #endif
2731 return subpage_readlen(opaque, addr, 0);
2734 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2735 uint32_t value)
2737 #if defined(DEBUG_SUBPAGE)
2738 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2739 #endif
2740 subpage_writelen(opaque, addr, value, 0);
2743 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2745 #if defined(DEBUG_SUBPAGE)
2746 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2747 #endif
2749 return subpage_readlen(opaque, addr, 1);
2752 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2753 uint32_t value)
2755 #if defined(DEBUG_SUBPAGE)
2756 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2757 #endif
2758 subpage_writelen(opaque, addr, value, 1);
2761 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2763 #if defined(DEBUG_SUBPAGE)
2764 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2765 #endif
2767 return subpage_readlen(opaque, addr, 2);
2770 static void subpage_writel (void *opaque,
2771 target_phys_addr_t addr, uint32_t value)
2773 #if defined(DEBUG_SUBPAGE)
2774 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2775 #endif
2776 subpage_writelen(opaque, addr, value, 2);
2779 static CPUReadMemoryFunc *subpage_read[] = {
2780 &subpage_readb,
2781 &subpage_readw,
2782 &subpage_readl,
2785 static CPUWriteMemoryFunc *subpage_write[] = {
2786 &subpage_writeb,
2787 &subpage_writew,
2788 &subpage_writel,
2791 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2792 ram_addr_t memory, ram_addr_t region_offset)
2794 int idx, eidx;
2795 unsigned int i;
2797 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2798 return -1;
2799 idx = SUBPAGE_IDX(start);
2800 eidx = SUBPAGE_IDX(end);
2801 #if defined(DEBUG_SUBPAGE)
2802 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2803 mmio, start, end, idx, eidx, memory);
2804 #endif
2805 memory >>= IO_MEM_SHIFT;
2806 for (; idx <= eidx; idx++) {
2807 for (i = 0; i < 4; i++) {
2808 if (io_mem_read[memory][i]) {
2809 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2810 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2811 mmio->region_offset[idx][0][i] = region_offset;
2813 if (io_mem_write[memory][i]) {
2814 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2815 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2816 mmio->region_offset[idx][1][i] = region_offset;
2821 return 0;
2824 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2825 ram_addr_t orig_memory, ram_addr_t region_offset)
2827 subpage_t *mmio;
2828 int subpage_memory;
2830 mmio = qemu_mallocz(sizeof(subpage_t));
2832 mmio->base = base;
2833 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2834 #if defined(DEBUG_SUBPAGE)
2835 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2836 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2837 #endif
2838 *phys = subpage_memory | IO_MEM_SUBPAGE;
2839 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2840 region_offset);
2842 return mmio;
2845 static int get_free_io_mem_idx(void)
2847 int i;
2849 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2850 if (!io_mem_used[i]) {
2851 io_mem_used[i] = 1;
2852 return i;
2855 return -1;
2858 static void io_mem_init(void)
2860 int i;
2862 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2863 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2864 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2865 for (i=0; i<5; i++)
2866 io_mem_used[i] = 1;
2868 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2869 watch_mem_write, NULL);
2870 /* alloc dirty bits array */
2871 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2872 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2875 /* mem_read and mem_write are arrays of functions containing the
2876 function to access byte (index 0), word (index 1) and dword (index
2877 2). Functions can be omitted with a NULL function pointer. The
2878 registered functions may be modified dynamically later.
2879 If io_index is non zero, the corresponding io zone is
2880 modified. If it is zero, a new io zone is allocated. The return
2881 value can be used with cpu_register_physical_memory(). (-1) is
2882 returned if error. */
2883 int cpu_register_io_memory(int io_index,
2884 CPUReadMemoryFunc **mem_read,
2885 CPUWriteMemoryFunc **mem_write,
2886 void *opaque)
2888 int i, subwidth = 0;
2890 if (io_index <= 0) {
2891 io_index = get_free_io_mem_idx();
2892 if (io_index == -1)
2893 return io_index;
2894 } else {
2895 if (io_index >= IO_MEM_NB_ENTRIES)
2896 return -1;
2899 for(i = 0;i < 3; i++) {
2900 if (!mem_read[i] || !mem_write[i])
2901 subwidth = IO_MEM_SUBWIDTH;
2902 io_mem_read[io_index][i] = mem_read[i];
2903 io_mem_write[io_index][i] = mem_write[i];
2905 io_mem_opaque[io_index] = opaque;
2906 return (io_index << IO_MEM_SHIFT) | subwidth;
2909 void cpu_unregister_io_memory(int io_table_address)
2911 int i;
2912 int io_index = io_table_address >> IO_MEM_SHIFT;
2914 for (i=0;i < 3; i++) {
2915 io_mem_read[io_index][i] = unassigned_mem_read[i];
2916 io_mem_write[io_index][i] = unassigned_mem_write[i];
2918 io_mem_opaque[io_index] = NULL;
2919 io_mem_used[io_index] = 0;
2922 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2924 return io_mem_write[io_index >> IO_MEM_SHIFT];
2927 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2929 return io_mem_read[io_index >> IO_MEM_SHIFT];
2932 #endif /* !defined(CONFIG_USER_ONLY) */
2934 /* physical memory access (slow version, mainly for debug) */
2935 #if defined(CONFIG_USER_ONLY)
2936 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2937 int len, int is_write)
2939 int l, flags;
2940 target_ulong page;
2941 void * p;
2943 while (len > 0) {
2944 page = addr & TARGET_PAGE_MASK;
2945 l = (page + TARGET_PAGE_SIZE) - addr;
2946 if (l > len)
2947 l = len;
2948 flags = page_get_flags(page);
2949 if (!(flags & PAGE_VALID))
2950 return;
2951 if (is_write) {
2952 if (!(flags & PAGE_WRITE))
2953 return;
2954 /* XXX: this code should not depend on lock_user */
2955 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2956 /* FIXME - should this return an error rather than just fail? */
2957 return;
2958 memcpy(p, buf, l);
2959 unlock_user(p, addr, l);
2960 } else {
2961 if (!(flags & PAGE_READ))
2962 return;
2963 /* XXX: this code should not depend on lock_user */
2964 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2965 /* FIXME - should this return an error rather than just fail? */
2966 return;
2967 memcpy(buf, p, l);
2968 unlock_user(p, addr, 0);
2970 len -= l;
2971 buf += l;
2972 addr += l;
2976 #else
2977 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2978 int len, int is_write)
2980 int l, io_index;
2981 uint8_t *ptr;
2982 uint32_t val;
2983 target_phys_addr_t page;
2984 unsigned long pd;
2985 PhysPageDesc *p;
2987 while (len > 0) {
2988 page = addr & TARGET_PAGE_MASK;
2989 l = (page + TARGET_PAGE_SIZE) - addr;
2990 if (l > len)
2991 l = len;
2992 p = phys_page_find(page >> TARGET_PAGE_BITS);
2993 if (!p) {
2994 pd = IO_MEM_UNASSIGNED;
2995 } else {
2996 pd = p->phys_offset;
2999 if (is_write) {
3000 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3001 target_phys_addr_t addr1 = addr;
3002 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3003 if (p)
3004 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3005 /* XXX: could force cpu_single_env to NULL to avoid
3006 potential bugs */
3007 if (l >= 4 && ((addr1 & 3) == 0)) {
3008 /* 32 bit write access */
3009 val = ldl_p(buf);
3010 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3011 l = 4;
3012 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3013 /* 16 bit write access */
3014 val = lduw_p(buf);
3015 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3016 l = 2;
3017 } else {
3018 /* 8 bit write access */
3019 val = ldub_p(buf);
3020 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3021 l = 1;
3023 } else {
3024 unsigned long addr1;
3025 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3026 /* RAM case */
3027 ptr = phys_ram_base + addr1;
3028 memcpy(ptr, buf, l);
3029 if (!cpu_physical_memory_is_dirty(addr1)) {
3030 /* invalidate code */
3031 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3032 /* set dirty bit */
3033 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3034 (0xff & ~CODE_DIRTY_FLAG);
3036 /* qemu doesn't execute guest code directly, but kvm does
3037 therefore fluch instruction caches */
3038 if (kvm_enabled())
3039 flush_icache_range((unsigned long)ptr,
3040 ((unsigned long)ptr)+l);
3042 } else {
3043 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3044 !(pd & IO_MEM_ROMD)) {
3045 target_phys_addr_t addr1 = addr;
3046 /* I/O case */
3047 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3048 if (p)
3049 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3050 if (l >= 4 && ((addr1 & 3) == 0)) {
3051 /* 32 bit read access */
3052 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3053 stl_p(buf, val);
3054 l = 4;
3055 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3056 /* 16 bit read access */
3057 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3058 stw_p(buf, val);
3059 l = 2;
3060 } else {
3061 /* 8 bit read access */
3062 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3063 stb_p(buf, val);
3064 l = 1;
3066 } else {
3067 /* RAM case */
3068 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3069 (addr & ~TARGET_PAGE_MASK);
3070 memcpy(buf, ptr, l);
3073 len -= l;
3074 buf += l;
3075 addr += l;
3079 /* used for ROM loading : can write in RAM and ROM */
3080 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3081 const uint8_t *buf, int len)
3083 int l;
3084 uint8_t *ptr;
3085 target_phys_addr_t page;
3086 unsigned long pd;
3087 PhysPageDesc *p;
3089 while (len > 0) {
3090 page = addr & TARGET_PAGE_MASK;
3091 l = (page + TARGET_PAGE_SIZE) - addr;
3092 if (l > len)
3093 l = len;
3094 p = phys_page_find(page >> TARGET_PAGE_BITS);
3095 if (!p) {
3096 pd = IO_MEM_UNASSIGNED;
3097 } else {
3098 pd = p->phys_offset;
3101 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3102 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3103 !(pd & IO_MEM_ROMD)) {
3104 /* do nothing */
3105 } else {
3106 unsigned long addr1;
3107 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3108 /* ROM/RAM case */
3109 ptr = phys_ram_base + addr1;
3110 memcpy(ptr, buf, l);
3112 len -= l;
3113 buf += l;
3114 addr += l;
3118 typedef struct {
3119 void *buffer;
3120 target_phys_addr_t addr;
3121 target_phys_addr_t len;
3122 } BounceBuffer;
3124 static BounceBuffer bounce;
3126 typedef struct MapClient {
3127 void *opaque;
3128 void (*callback)(void *opaque);
3129 LIST_ENTRY(MapClient) link;
3130 } MapClient;
3132 static LIST_HEAD(map_client_list, MapClient) map_client_list
3133 = LIST_HEAD_INITIALIZER(map_client_list);
3135 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3137 MapClient *client = qemu_malloc(sizeof(*client));
3139 client->opaque = opaque;
3140 client->callback = callback;
3141 LIST_INSERT_HEAD(&map_client_list, client, link);
3142 return client;
3145 void cpu_unregister_map_client(void *_client)
3147 MapClient *client = (MapClient *)_client;
3149 LIST_REMOVE(client, link);
3152 static void cpu_notify_map_clients(void)
3154 MapClient *client;
3156 while (!LIST_EMPTY(&map_client_list)) {
3157 client = LIST_FIRST(&map_client_list);
3158 client->callback(client->opaque);
3159 LIST_REMOVE(client, link);
3163 /* Map a physical memory region into a host virtual address.
3164 * May map a subset of the requested range, given by and returned in *plen.
3165 * May return NULL if resources needed to perform the mapping are exhausted.
3166 * Use only for reads OR writes - not for read-modify-write operations.
3167 * Use cpu_register_map_client() to know when retrying the map operation is
3168 * likely to succeed.
3170 void *cpu_physical_memory_map(target_phys_addr_t addr,
3171 target_phys_addr_t *plen,
3172 int is_write)
3174 target_phys_addr_t len = *plen;
3175 target_phys_addr_t done = 0;
3176 int l;
3177 uint8_t *ret = NULL;
3178 uint8_t *ptr;
3179 target_phys_addr_t page;
3180 unsigned long pd;
3181 PhysPageDesc *p;
3182 unsigned long addr1;
3184 while (len > 0) {
3185 page = addr & TARGET_PAGE_MASK;
3186 l = (page + TARGET_PAGE_SIZE) - addr;
3187 if (l > len)
3188 l = len;
3189 p = phys_page_find(page >> TARGET_PAGE_BITS);
3190 if (!p) {
3191 pd = IO_MEM_UNASSIGNED;
3192 } else {
3193 pd = p->phys_offset;
3196 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3197 if (done || bounce.buffer) {
3198 break;
3200 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3201 bounce.addr = addr;
3202 bounce.len = l;
3203 if (!is_write) {
3204 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3206 ptr = bounce.buffer;
3207 } else {
3208 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3209 ptr = phys_ram_base + addr1;
3211 if (!done) {
3212 ret = ptr;
3213 } else if (ret + done != ptr) {
3214 break;
3217 len -= l;
3218 addr += l;
3219 done += l;
3221 *plen = done;
3222 return ret;
3225 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3226 * Will also mark the memory as dirty if is_write == 1. access_len gives
3227 * the amount of memory that was actually read or written by the caller.
3229 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3230 int is_write, target_phys_addr_t access_len)
3232 if (buffer != bounce.buffer) {
3233 if (is_write) {
3234 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3235 while (access_len) {
3236 unsigned l;
3237 l = TARGET_PAGE_SIZE;
3238 if (l > access_len)
3239 l = access_len;
3240 if (!cpu_physical_memory_is_dirty(addr1)) {
3241 /* invalidate code */
3242 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3243 /* set dirty bit */
3244 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3245 (0xff & ~CODE_DIRTY_FLAG);
3247 addr1 += l;
3248 access_len -= l;
3250 if (kvm_enabled())
3251 flush_icache_range((unsigned long)buffer,
3252 (unsigned long)buffer + access_len);
3254 return;
3256 if (is_write) {
3257 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3259 qemu_free(bounce.buffer);
3260 bounce.buffer = NULL;
3261 cpu_notify_map_clients();
3264 /* warning: addr must be aligned */
3265 uint32_t ldl_phys(target_phys_addr_t addr)
3267 int io_index;
3268 uint8_t *ptr;
3269 uint32_t val;
3270 unsigned long pd;
3271 PhysPageDesc *p;
3273 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3274 if (!p) {
3275 pd = IO_MEM_UNASSIGNED;
3276 } else {
3277 pd = p->phys_offset;
3280 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3281 !(pd & IO_MEM_ROMD)) {
3282 /* I/O case */
3283 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3284 if (p)
3285 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3286 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3287 } else {
3288 /* RAM case */
3289 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3290 (addr & ~TARGET_PAGE_MASK);
3291 val = ldl_p(ptr);
3293 return val;
3296 /* warning: addr must be aligned */
3297 uint64_t ldq_phys(target_phys_addr_t addr)
3299 int io_index;
3300 uint8_t *ptr;
3301 uint64_t val;
3302 unsigned long pd;
3303 PhysPageDesc *p;
3305 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3306 if (!p) {
3307 pd = IO_MEM_UNASSIGNED;
3308 } else {
3309 pd = p->phys_offset;
3312 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3313 !(pd & IO_MEM_ROMD)) {
3314 /* I/O case */
3315 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3316 if (p)
3317 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3318 #ifdef TARGET_WORDS_BIGENDIAN
3319 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3320 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3321 #else
3322 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3323 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3324 #endif
3325 } else {
3326 /* RAM case */
3327 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3328 (addr & ~TARGET_PAGE_MASK);
3329 val = ldq_p(ptr);
3331 return val;
3334 /* XXX: optimize */
3335 uint32_t ldub_phys(target_phys_addr_t addr)
3337 uint8_t val;
3338 cpu_physical_memory_read(addr, &val, 1);
3339 return val;
3342 /* XXX: optimize */
3343 uint32_t lduw_phys(target_phys_addr_t addr)
3345 uint16_t val;
3346 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3347 return tswap16(val);
3350 #ifdef __GNUC__
3351 #define likely(x) __builtin_expect(!!(x), 1)
3352 #define unlikely(x) __builtin_expect(!!(x), 0)
3353 #else
3354 #define likely(x) x
3355 #define unlikely(x) x
3356 #endif
3358 /* warning: addr must be aligned. The ram page is not masked as dirty
3359 and the code inside is not invalidated. It is useful if the dirty
3360 bits are used to track modified PTEs */
3361 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3363 int io_index;
3364 uint8_t *ptr;
3365 unsigned long pd;
3366 PhysPageDesc *p;
3368 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3369 if (!p) {
3370 pd = IO_MEM_UNASSIGNED;
3371 } else {
3372 pd = p->phys_offset;
3375 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3376 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3377 if (p)
3378 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3379 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3380 } else {
3381 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3382 ptr = phys_ram_base + addr1;
3383 stl_p(ptr, val);
3385 if (unlikely(in_migration)) {
3386 if (!cpu_physical_memory_is_dirty(addr1)) {
3387 /* invalidate code */
3388 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3389 /* set dirty bit */
3390 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3391 (0xff & ~CODE_DIRTY_FLAG);
3397 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3399 int io_index;
3400 uint8_t *ptr;
3401 unsigned long pd;
3402 PhysPageDesc *p;
3404 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3405 if (!p) {
3406 pd = IO_MEM_UNASSIGNED;
3407 } else {
3408 pd = p->phys_offset;
3411 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3412 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3413 if (p)
3414 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3415 #ifdef TARGET_WORDS_BIGENDIAN
3416 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3417 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3418 #else
3419 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3420 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3421 #endif
3422 } else {
3423 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3424 (addr & ~TARGET_PAGE_MASK);
3425 stq_p(ptr, val);
3429 /* warning: addr must be aligned */
3430 void stl_phys(target_phys_addr_t addr, uint32_t val)
3432 int io_index;
3433 uint8_t *ptr;
3434 unsigned long pd;
3435 PhysPageDesc *p;
3437 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3438 if (!p) {
3439 pd = IO_MEM_UNASSIGNED;
3440 } else {
3441 pd = p->phys_offset;
3444 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3445 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3446 if (p)
3447 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3448 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3449 } else {
3450 unsigned long addr1;
3451 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3452 /* RAM case */
3453 ptr = phys_ram_base + addr1;
3454 stl_p(ptr, val);
3455 if (!cpu_physical_memory_is_dirty(addr1)) {
3456 /* invalidate code */
3457 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3458 /* set dirty bit */
3459 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3460 (0xff & ~CODE_DIRTY_FLAG);
3465 /* XXX: optimize */
3466 void stb_phys(target_phys_addr_t addr, uint32_t val)
3468 uint8_t v = val;
3469 cpu_physical_memory_write(addr, &v, 1);
3472 /* XXX: optimize */
3473 void stw_phys(target_phys_addr_t addr, uint32_t val)
3475 uint16_t v = tswap16(val);
3476 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3479 /* XXX: optimize */
3480 void stq_phys(target_phys_addr_t addr, uint64_t val)
3482 val = tswap64(val);
3483 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3486 #endif
3488 /* virtual memory access for debug (includes writing to ROM) */
3489 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3490 uint8_t *buf, int len, int is_write)
3492 int l;
3493 target_phys_addr_t phys_addr;
3494 target_ulong page;
3496 while (len > 0) {
3497 page = addr & TARGET_PAGE_MASK;
3498 phys_addr = cpu_get_phys_page_debug(env, page);
3499 /* if no physical page mapped, return an error */
3500 if (phys_addr == -1)
3501 return -1;
3502 l = (page + TARGET_PAGE_SIZE) - addr;
3503 if (l > len)
3504 l = len;
3505 phys_addr += (addr & ~TARGET_PAGE_MASK);
3506 #if !defined(CONFIG_USER_ONLY)
3507 if (is_write)
3508 cpu_physical_memory_write_rom(phys_addr, buf, l);
3509 else
3510 #endif
3511 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3512 len -= l;
3513 buf += l;
3514 addr += l;
3516 return 0;
3519 /* in deterministic execution mode, instructions doing device I/Os
3520 must be at the end of the TB */
3521 void cpu_io_recompile(CPUState *env, void *retaddr)
3523 TranslationBlock *tb;
3524 uint32_t n, cflags;
3525 target_ulong pc, cs_base;
3526 uint64_t flags;
3528 tb = tb_find_pc((unsigned long)retaddr);
3529 if (!tb) {
3530 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3531 retaddr);
3533 n = env->icount_decr.u16.low + tb->icount;
3534 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3535 /* Calculate how many instructions had been executed before the fault
3536 occurred. */
3537 n = n - env->icount_decr.u16.low;
3538 /* Generate a new TB ending on the I/O insn. */
3539 n++;
3540 /* On MIPS and SH, delay slot instructions can only be restarted if
3541 they were already the first instruction in the TB. If this is not
3542 the first instruction in a TB then re-execute the preceding
3543 branch. */
3544 #if defined(TARGET_MIPS)
3545 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3546 env->active_tc.PC -= 4;
3547 env->icount_decr.u16.low++;
3548 env->hflags &= ~MIPS_HFLAG_BMASK;
3550 #elif defined(TARGET_SH4)
3551 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3552 && n > 1) {
3553 env->pc -= 2;
3554 env->icount_decr.u16.low++;
3555 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3557 #endif
3558 /* This should never happen. */
3559 if (n > CF_COUNT_MASK)
3560 cpu_abort(env, "TB too big during recompile");
3562 cflags = n | CF_LAST_IO;
3563 pc = tb->pc;
3564 cs_base = tb->cs_base;
3565 flags = tb->flags;
3566 tb_phys_invalidate(tb, -1);
3567 /* FIXME: In theory this could raise an exception. In practice
3568 we have already translated the block once so it's probably ok. */
3569 tb_gen_code(env, pc, cs_base, flags, cflags);
3570 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3571 the first in the TB) then we end up generating a whole new TB and
3572 repeating the fault, which is horribly inefficient.
3573 Better would be to execute just this insn uncached, or generate a
3574 second new TB. */
3575 cpu_resume_from_signal(env, NULL);
3578 void dump_exec_info(FILE *f,
3579 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3581 int i, target_code_size, max_target_code_size;
3582 int direct_jmp_count, direct_jmp2_count, cross_page;
3583 TranslationBlock *tb;
3585 target_code_size = 0;
3586 max_target_code_size = 0;
3587 cross_page = 0;
3588 direct_jmp_count = 0;
3589 direct_jmp2_count = 0;
3590 for(i = 0; i < nb_tbs; i++) {
3591 tb = &tbs[i];
3592 target_code_size += tb->size;
3593 if (tb->size > max_target_code_size)
3594 max_target_code_size = tb->size;
3595 if (tb->page_addr[1] != -1)
3596 cross_page++;
3597 if (tb->tb_next_offset[0] != 0xffff) {
3598 direct_jmp_count++;
3599 if (tb->tb_next_offset[1] != 0xffff) {
3600 direct_jmp2_count++;
3604 /* XXX: avoid using doubles ? */
3605 cpu_fprintf(f, "Translation buffer state:\n");
3606 cpu_fprintf(f, "gen code size %ld/%ld\n",
3607 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3608 cpu_fprintf(f, "TB count %d/%d\n",
3609 nb_tbs, code_gen_max_blocks);
3610 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3611 nb_tbs ? target_code_size / nb_tbs : 0,
3612 max_target_code_size);
3613 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3614 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3615 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3616 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3617 cross_page,
3618 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3619 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3620 direct_jmp_count,
3621 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3622 direct_jmp2_count,
3623 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3624 cpu_fprintf(f, "\nStatistics:\n");
3625 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3626 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3627 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3628 tcg_dump_info(f, cpu_fprintf);
3631 #if !defined(CONFIG_USER_ONLY)
3633 #define MMUSUFFIX _cmmu
3634 #define GETPC() NULL
3635 #define env cpu_single_env
3636 #define SOFTMMU_CODE_ACCESS
3638 #define SHIFT 0
3639 #include "softmmu_template.h"
3641 #define SHIFT 1
3642 #include "softmmu_template.h"
3644 #define SHIFT 2
3645 #include "softmmu_template.h"
3647 #define SHIFT 3
3648 #include "softmmu_template.h"
3650 #undef env
3652 #endif