Fix typo
[qemu-kvm/fedora.git] / exec.c
blob4f7a027d638e039854a67b6dd8dcb7d8d23495c2
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #include "hw/hw.h"
41 #if defined(CONFIG_USER_ONLY)
42 #include <qemu.h>
43 #endif
45 //#define DEBUG_TB_INVALIDATE
46 //#define DEBUG_FLUSH
47 //#define DEBUG_TLB
48 //#define DEBUG_UNASSIGNED
50 /* make various TB consistency checks */
51 //#define DEBUG_TB_CHECK
52 //#define DEBUG_TLB_CHECK
54 //#define DEBUG_IOPORT
55 //#define DEBUG_SUBPAGE
57 #if !defined(CONFIG_USER_ONLY)
58 /* TB consistency checks only implemented for usermode emulation. */
59 #undef DEBUG_TB_CHECK
60 #endif
62 #define SMC_BITMAP_USE_THRESHOLD 10
64 #define MMAP_AREA_START 0x00000000
65 #define MMAP_AREA_END 0xa8000000
67 #if defined(TARGET_SPARC64)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 41
69 #elif defined(TARGET_SPARC)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 36
71 #elif defined(TARGET_ALPHA)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 42
73 #define TARGET_VIRT_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_PPC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 36
80 #else
81 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82 #define TARGET_PHYS_ADDR_SPACE_BITS 32
83 #endif
85 TranslationBlock *tbs;
86 int code_gen_max_blocks;
87 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88 int nb_tbs;
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
92 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
93 uint8_t *code_gen_buffer;
94 unsigned long code_gen_buffer_size;
95 /* threshold to flush the translated code buffer */
96 unsigned long code_gen_buffer_max_size;
97 uint8_t *code_gen_ptr;
99 #if !defined(CONFIG_USER_ONLY)
100 ram_addr_t phys_ram_size;
101 int phys_ram_fd;
102 uint8_t *phys_ram_base;
103 uint8_t *phys_ram_dirty;
104 static ram_addr_t phys_ram_alloc_offset = 0;
105 #endif
107 CPUState *first_cpu;
108 /* current CPU in the current thread. It is only valid inside
109 cpu_exec() */
110 CPUState *cpu_single_env;
111 /* 0 = Do not count executed instructions.
112 1 = Precise instruction counting.
113 2 = Adaptive rate instruction counting. */
114 int use_icount = 0;
115 /* Current instruction counter. While executing translated code this may
116 include some instructions that have not yet been executed. */
117 int64_t qemu_icount;
119 typedef struct PageDesc {
120 /* list of TBs intersecting this ram page */
121 TranslationBlock *first_tb;
122 /* in order to optimize self modifying code, we count the number
123 of lookups we do to a given page to use a bitmap */
124 unsigned int code_write_count;
125 uint8_t *code_bitmap;
126 #if defined(CONFIG_USER_ONLY)
127 unsigned long flags;
128 #endif
129 } PageDesc;
131 typedef struct PhysPageDesc {
132 /* offset in host memory of the page + io_index in the low bits */
133 ram_addr_t phys_offset;
134 } PhysPageDesc;
136 #define L2_BITS 10
137 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
138 /* XXX: this is a temporary hack for alpha target.
139 * In the future, this is to be replaced by a multi-level table
140 * to actually be able to handle the complete 64 bits address space.
142 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
143 #else
144 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
145 #endif
147 #define L1_SIZE (1 << L1_BITS)
148 #define L2_SIZE (1 << L2_BITS)
150 unsigned long qemu_real_host_page_size;
151 unsigned long qemu_host_page_bits;
152 unsigned long qemu_host_page_size;
153 unsigned long qemu_host_page_mask;
155 /* XXX: for system emulation, it could just be an array */
156 static PageDesc *l1_map[L1_SIZE];
157 PhysPageDesc **l1_phys_map;
159 #if !defined(CONFIG_USER_ONLY)
160 static void io_mem_init(void);
162 /* io memory support */
163 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
164 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
165 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
166 static int io_mem_nb;
167 static int io_mem_watch;
168 #endif
170 /* log support */
171 char *logfilename = "/tmp/qemu.log";
172 FILE *logfile;
173 int loglevel;
174 static int log_append = 0;
176 /* statistics */
177 static int tlb_flush_count;
178 static int tb_flush_count;
179 static int tb_phys_invalidate_count;
181 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
182 typedef struct subpage_t {
183 target_phys_addr_t base;
184 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
185 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
186 void *opaque[TARGET_PAGE_SIZE][2][4];
187 } subpage_t;
189 #ifdef _WIN32
190 static void map_exec(void *addr, long size)
192 DWORD old_protect;
193 VirtualProtect(addr, size,
194 PAGE_EXECUTE_READWRITE, &old_protect);
197 #else
198 static void map_exec(void *addr, long size)
200 unsigned long start, end, page_size;
202 page_size = getpagesize();
203 start = (unsigned long)addr;
204 start &= ~(page_size - 1);
206 end = (unsigned long)addr + size;
207 end += page_size - 1;
208 end &= ~(page_size - 1);
210 mprotect((void *)start, end - start,
211 PROT_READ | PROT_WRITE | PROT_EXEC);
213 #endif
215 static void page_init(void)
217 /* NOTE: we can always suppose that qemu_host_page_size >=
218 TARGET_PAGE_SIZE */
219 #ifdef _WIN32
221 SYSTEM_INFO system_info;
222 DWORD old_protect;
224 GetSystemInfo(&system_info);
225 qemu_real_host_page_size = system_info.dwPageSize;
227 #else
228 qemu_real_host_page_size = getpagesize();
229 #endif
230 if (qemu_host_page_size == 0)
231 qemu_host_page_size = qemu_real_host_page_size;
232 if (qemu_host_page_size < TARGET_PAGE_SIZE)
233 qemu_host_page_size = TARGET_PAGE_SIZE;
234 qemu_host_page_bits = 0;
235 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
236 qemu_host_page_bits++;
237 qemu_host_page_mask = ~(qemu_host_page_size - 1);
238 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
239 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
241 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
243 long long startaddr, endaddr;
244 FILE *f;
245 int n;
247 mmap_lock();
248 last_brk = (unsigned long)sbrk(0);
249 f = fopen("/proc/self/maps", "r");
250 if (f) {
251 do {
252 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
253 if (n == 2) {
254 startaddr = MIN(startaddr,
255 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
256 endaddr = MIN(endaddr,
257 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
258 page_set_flags(startaddr & TARGET_PAGE_MASK,
259 TARGET_PAGE_ALIGN(endaddr),
260 PAGE_RESERVED);
262 } while (!feof(f));
263 fclose(f);
265 mmap_unlock();
267 #endif
270 static inline PageDesc *page_find_alloc(target_ulong index)
272 PageDesc **lp, *p;
274 #if TARGET_LONG_BITS > 32
275 /* Host memory outside guest VM. For 32-bit targets we have already
276 excluded high addresses. */
277 if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
278 return NULL;
279 #endif
280 lp = &l1_map[index >> L2_BITS];
281 p = *lp;
282 if (!p) {
283 /* allocate if not found */
284 #if defined(CONFIG_USER_ONLY)
285 unsigned long addr;
286 size_t len = sizeof(PageDesc) * L2_SIZE;
287 /* Don't use qemu_malloc because it may recurse. */
288 p = mmap(0, len, PROT_READ | PROT_WRITE,
289 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
290 *lp = p;
291 addr = h2g(p);
292 if (addr == (target_ulong)addr) {
293 page_set_flags(addr & TARGET_PAGE_MASK,
294 TARGET_PAGE_ALIGN(addr + len),
295 PAGE_RESERVED);
297 #else
298 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
299 *lp = p;
300 #endif
302 return p + (index & (L2_SIZE - 1));
305 static inline PageDesc *page_find(target_ulong index)
307 PageDesc *p;
309 p = l1_map[index >> L2_BITS];
310 if (!p)
311 return 0;
312 return p + (index & (L2_SIZE - 1));
315 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
317 void **lp, **p;
318 PhysPageDesc *pd;
320 p = (void **)l1_phys_map;
321 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
323 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
324 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
325 #endif
326 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
327 p = *lp;
328 if (!p) {
329 /* allocate if not found */
330 if (!alloc)
331 return NULL;
332 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
333 memset(p, 0, sizeof(void *) * L1_SIZE);
334 *lp = p;
336 #endif
337 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
338 pd = *lp;
339 if (!pd) {
340 int i;
341 /* allocate if not found */
342 if (!alloc)
343 return NULL;
344 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
345 *lp = pd;
346 for (i = 0; i < L2_SIZE; i++)
347 pd[i].phys_offset = IO_MEM_UNASSIGNED;
349 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
352 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
354 return phys_page_find_alloc(index, 0);
357 #if !defined(CONFIG_USER_ONLY)
358 static void tlb_protect_code(ram_addr_t ram_addr);
359 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
360 target_ulong vaddr);
361 #define mmap_lock() do { } while(0)
362 #define mmap_unlock() do { } while(0)
363 #endif
365 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
367 #if defined(CONFIG_USER_ONLY)
368 /* Currently it is not recommanded to allocate big chunks of data in
369 user mode. It will change when a dedicated libc will be used */
370 #define USE_STATIC_CODE_GEN_BUFFER
371 #endif
373 #ifdef USE_STATIC_CODE_GEN_BUFFER
374 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
375 #endif
377 void code_gen_alloc(unsigned long tb_size)
379 #ifdef USE_STATIC_CODE_GEN_BUFFER
380 code_gen_buffer = static_code_gen_buffer;
381 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
382 map_exec(code_gen_buffer, code_gen_buffer_size);
383 #else
384 code_gen_buffer_size = tb_size;
385 if (code_gen_buffer_size == 0) {
386 #if defined(CONFIG_USER_ONLY)
387 /* in user mode, phys_ram_size is not meaningful */
388 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
389 #else
390 /* XXX: needs ajustments */
391 code_gen_buffer_size = (int)(phys_ram_size / 4);
392 #endif
394 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
395 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
396 /* The code gen buffer location may have constraints depending on
397 the host cpu and OS */
398 #if defined(__linux__)
400 int flags;
401 flags = MAP_PRIVATE | MAP_ANONYMOUS;
402 #if defined(__x86_64__)
403 flags |= MAP_32BIT;
404 /* Cannot map more than that */
405 if (code_gen_buffer_size > (800 * 1024 * 1024))
406 code_gen_buffer_size = (800 * 1024 * 1024);
407 #endif
408 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
409 PROT_WRITE | PROT_READ | PROT_EXEC,
410 flags, -1, 0);
411 if (code_gen_buffer == MAP_FAILED) {
412 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
413 exit(1);
416 #else
417 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
418 if (!code_gen_buffer) {
419 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
420 exit(1);
422 map_exec(code_gen_buffer, code_gen_buffer_size);
423 #endif
424 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
425 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
426 code_gen_buffer_max_size = code_gen_buffer_size -
427 code_gen_max_block_size();
428 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
429 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
432 /* Must be called before using the QEMU cpus. 'tb_size' is the size
433 (in bytes) allocated to the translation buffer. Zero means default
434 size. */
435 void cpu_exec_init_all(unsigned long tb_size)
437 cpu_gen_init();
438 code_gen_alloc(tb_size);
439 code_gen_ptr = code_gen_buffer;
440 page_init();
441 #if !defined(CONFIG_USER_ONLY)
442 io_mem_init();
443 #endif
446 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
448 #define CPU_COMMON_SAVE_VERSION 1
450 static void cpu_common_save(QEMUFile *f, void *opaque)
452 CPUState *env = opaque;
454 qemu_put_be32s(f, &env->halted);
455 qemu_put_be32s(f, &env->interrupt_request);
458 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
460 CPUState *env = opaque;
462 if (version_id != CPU_COMMON_SAVE_VERSION)
463 return -EINVAL;
465 qemu_get_be32s(f, &env->halted);
466 qemu_get_be32s(f, &env->interrupt_request);
467 tlb_flush(env, 1);
469 return 0;
471 #endif
473 void cpu_exec_init(CPUState *env)
475 CPUState **penv;
476 int cpu_index;
478 env->next_cpu = NULL;
479 penv = &first_cpu;
480 cpu_index = 0;
481 while (*penv != NULL) {
482 penv = (CPUState **)&(*penv)->next_cpu;
483 cpu_index++;
485 env->cpu_index = cpu_index;
486 env->nb_watchpoints = 0;
487 *penv = env;
488 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
489 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
490 cpu_common_save, cpu_common_load, env);
491 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
492 cpu_save, cpu_load, env);
493 #endif
496 static inline void invalidate_page_bitmap(PageDesc *p)
498 if (p->code_bitmap) {
499 qemu_free(p->code_bitmap);
500 p->code_bitmap = NULL;
502 p->code_write_count = 0;
505 /* set to NULL all the 'first_tb' fields in all PageDescs */
506 static void page_flush_tb(void)
508 int i, j;
509 PageDesc *p;
511 for(i = 0; i < L1_SIZE; i++) {
512 p = l1_map[i];
513 if (p) {
514 for(j = 0; j < L2_SIZE; j++) {
515 p->first_tb = NULL;
516 invalidate_page_bitmap(p);
517 p++;
523 /* flush all the translation blocks */
524 /* XXX: tb_flush is currently not thread safe */
525 void tb_flush(CPUState *env1)
527 CPUState *env;
528 #if defined(DEBUG_FLUSH)
529 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
530 (unsigned long)(code_gen_ptr - code_gen_buffer),
531 nb_tbs, nb_tbs > 0 ?
532 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
533 #endif
534 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
535 cpu_abort(env1, "Internal error: code buffer overflow\n");
537 nb_tbs = 0;
539 for(env = first_cpu; env != NULL; env = env->next_cpu) {
540 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
543 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
544 page_flush_tb();
546 code_gen_ptr = code_gen_buffer;
547 /* XXX: flush processor icache at this point if cache flush is
548 expensive */
549 tb_flush_count++;
552 #ifdef DEBUG_TB_CHECK
554 static void tb_invalidate_check(target_ulong address)
556 TranslationBlock *tb;
557 int i;
558 address &= TARGET_PAGE_MASK;
559 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
560 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
561 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
562 address >= tb->pc + tb->size)) {
563 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
564 address, (long)tb->pc, tb->size);
570 /* verify that all the pages have correct rights for code */
571 static void tb_page_check(void)
573 TranslationBlock *tb;
574 int i, flags1, flags2;
576 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
577 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
578 flags1 = page_get_flags(tb->pc);
579 flags2 = page_get_flags(tb->pc + tb->size - 1);
580 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
581 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
582 (long)tb->pc, tb->size, flags1, flags2);
588 void tb_jmp_check(TranslationBlock *tb)
590 TranslationBlock *tb1;
591 unsigned int n1;
593 /* suppress any remaining jumps to this TB */
594 tb1 = tb->jmp_first;
595 for(;;) {
596 n1 = (long)tb1 & 3;
597 tb1 = (TranslationBlock *)((long)tb1 & ~3);
598 if (n1 == 2)
599 break;
600 tb1 = tb1->jmp_next[n1];
602 /* check end of list */
603 if (tb1 != tb) {
604 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
608 #endif
610 /* invalidate one TB */
611 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
612 int next_offset)
614 TranslationBlock *tb1;
615 for(;;) {
616 tb1 = *ptb;
617 if (tb1 == tb) {
618 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
619 break;
621 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
625 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
627 TranslationBlock *tb1;
628 unsigned int n1;
630 for(;;) {
631 tb1 = *ptb;
632 n1 = (long)tb1 & 3;
633 tb1 = (TranslationBlock *)((long)tb1 & ~3);
634 if (tb1 == tb) {
635 *ptb = tb1->page_next[n1];
636 break;
638 ptb = &tb1->page_next[n1];
642 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
644 TranslationBlock *tb1, **ptb;
645 unsigned int n1;
647 ptb = &tb->jmp_next[n];
648 tb1 = *ptb;
649 if (tb1) {
650 /* find tb(n) in circular list */
651 for(;;) {
652 tb1 = *ptb;
653 n1 = (long)tb1 & 3;
654 tb1 = (TranslationBlock *)((long)tb1 & ~3);
655 if (n1 == n && tb1 == tb)
656 break;
657 if (n1 == 2) {
658 ptb = &tb1->jmp_first;
659 } else {
660 ptb = &tb1->jmp_next[n1];
663 /* now we can suppress tb(n) from the list */
664 *ptb = tb->jmp_next[n];
666 tb->jmp_next[n] = NULL;
670 /* reset the jump entry 'n' of a TB so that it is not chained to
671 another TB */
672 static inline void tb_reset_jump(TranslationBlock *tb, int n)
674 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
677 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
679 CPUState *env;
680 PageDesc *p;
681 unsigned int h, n1;
682 target_phys_addr_t phys_pc;
683 TranslationBlock *tb1, *tb2;
685 /* remove the TB from the hash list */
686 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
687 h = tb_phys_hash_func(phys_pc);
688 tb_remove(&tb_phys_hash[h], tb,
689 offsetof(TranslationBlock, phys_hash_next));
691 /* remove the TB from the page list */
692 if (tb->page_addr[0] != page_addr) {
693 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
694 tb_page_remove(&p->first_tb, tb);
695 invalidate_page_bitmap(p);
697 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
698 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
699 tb_page_remove(&p->first_tb, tb);
700 invalidate_page_bitmap(p);
703 tb_invalidated_flag = 1;
705 /* remove the TB from the hash list */
706 h = tb_jmp_cache_hash_func(tb->pc);
707 for(env = first_cpu; env != NULL; env = env->next_cpu) {
708 if (env->tb_jmp_cache[h] == tb)
709 env->tb_jmp_cache[h] = NULL;
712 /* suppress this TB from the two jump lists */
713 tb_jmp_remove(tb, 0);
714 tb_jmp_remove(tb, 1);
716 /* suppress any remaining jumps to this TB */
717 tb1 = tb->jmp_first;
718 for(;;) {
719 n1 = (long)tb1 & 3;
720 if (n1 == 2)
721 break;
722 tb1 = (TranslationBlock *)((long)tb1 & ~3);
723 tb2 = tb1->jmp_next[n1];
724 tb_reset_jump(tb1, n1);
725 tb1->jmp_next[n1] = NULL;
726 tb1 = tb2;
728 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
730 tb_phys_invalidate_count++;
733 static inline void set_bits(uint8_t *tab, int start, int len)
735 int end, mask, end1;
737 end = start + len;
738 tab += start >> 3;
739 mask = 0xff << (start & 7);
740 if ((start & ~7) == (end & ~7)) {
741 if (start < end) {
742 mask &= ~(0xff << (end & 7));
743 *tab |= mask;
745 } else {
746 *tab++ |= mask;
747 start = (start + 8) & ~7;
748 end1 = end & ~7;
749 while (start < end1) {
750 *tab++ = 0xff;
751 start += 8;
753 if (start < end) {
754 mask = ~(0xff << (end & 7));
755 *tab |= mask;
760 static void build_page_bitmap(PageDesc *p)
762 int n, tb_start, tb_end;
763 TranslationBlock *tb;
765 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
766 if (!p->code_bitmap)
767 return;
769 tb = p->first_tb;
770 while (tb != NULL) {
771 n = (long)tb & 3;
772 tb = (TranslationBlock *)((long)tb & ~3);
773 /* NOTE: this is subtle as a TB may span two physical pages */
774 if (n == 0) {
775 /* NOTE: tb_end may be after the end of the page, but
776 it is not a problem */
777 tb_start = tb->pc & ~TARGET_PAGE_MASK;
778 tb_end = tb_start + tb->size;
779 if (tb_end > TARGET_PAGE_SIZE)
780 tb_end = TARGET_PAGE_SIZE;
781 } else {
782 tb_start = 0;
783 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
785 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
786 tb = tb->page_next[n];
790 TranslationBlock *tb_gen_code(CPUState *env,
791 target_ulong pc, target_ulong cs_base,
792 int flags, int cflags)
794 TranslationBlock *tb;
795 uint8_t *tc_ptr;
796 target_ulong phys_pc, phys_page2, virt_page2;
797 int code_gen_size;
799 phys_pc = get_phys_addr_code(env, pc);
800 tb = tb_alloc(pc);
801 if (!tb) {
802 /* flush must be done */
803 tb_flush(env);
804 /* cannot fail at this point */
805 tb = tb_alloc(pc);
806 /* Don't forget to invalidate previous TB info. */
807 tb_invalidated_flag = 1;
809 tc_ptr = code_gen_ptr;
810 tb->tc_ptr = tc_ptr;
811 tb->cs_base = cs_base;
812 tb->flags = flags;
813 tb->cflags = cflags;
814 cpu_gen_code(env, tb, &code_gen_size);
815 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
817 /* check next page if needed */
818 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
819 phys_page2 = -1;
820 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
821 phys_page2 = get_phys_addr_code(env, virt_page2);
823 tb_link_phys(tb, phys_pc, phys_page2);
824 return tb;
827 /* invalidate all TBs which intersect with the target physical page
828 starting in range [start;end[. NOTE: start and end must refer to
829 the same physical page. 'is_cpu_write_access' should be true if called
830 from a real cpu write access: the virtual CPU will exit the current
831 TB if code is modified inside this TB. */
832 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
833 int is_cpu_write_access)
835 int n, current_tb_modified, current_tb_not_found, current_flags;
836 CPUState *env = cpu_single_env;
837 PageDesc *p;
838 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
839 target_ulong tb_start, tb_end;
840 target_ulong current_pc, current_cs_base;
842 p = page_find(start >> TARGET_PAGE_BITS);
843 if (!p)
844 return;
845 if (!p->code_bitmap &&
846 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
847 is_cpu_write_access) {
848 /* build code bitmap */
849 build_page_bitmap(p);
852 /* we remove all the TBs in the range [start, end[ */
853 /* XXX: see if in some cases it could be faster to invalidate all the code */
854 current_tb_not_found = is_cpu_write_access;
855 current_tb_modified = 0;
856 current_tb = NULL; /* avoid warning */
857 current_pc = 0; /* avoid warning */
858 current_cs_base = 0; /* avoid warning */
859 current_flags = 0; /* avoid warning */
860 tb = p->first_tb;
861 while (tb != NULL) {
862 n = (long)tb & 3;
863 tb = (TranslationBlock *)((long)tb & ~3);
864 tb_next = tb->page_next[n];
865 /* NOTE: this is subtle as a TB may span two physical pages */
866 if (n == 0) {
867 /* NOTE: tb_end may be after the end of the page, but
868 it is not a problem */
869 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
870 tb_end = tb_start + tb->size;
871 } else {
872 tb_start = tb->page_addr[1];
873 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
875 if (!(tb_end <= start || tb_start >= end)) {
876 #ifdef TARGET_HAS_PRECISE_SMC
877 if (current_tb_not_found) {
878 current_tb_not_found = 0;
879 current_tb = NULL;
880 if (env->mem_io_pc) {
881 /* now we have a real cpu fault */
882 current_tb = tb_find_pc(env->mem_io_pc);
885 if (current_tb == tb &&
886 (current_tb->cflags & CF_COUNT_MASK) != 1) {
887 /* If we are modifying the current TB, we must stop
888 its execution. We could be more precise by checking
889 that the modification is after the current PC, but it
890 would require a specialized function to partially
891 restore the CPU state */
893 current_tb_modified = 1;
894 cpu_restore_state(current_tb, env,
895 env->mem_io_pc, NULL);
896 #if defined(TARGET_I386)
897 current_flags = env->hflags;
898 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
899 current_cs_base = (target_ulong)env->segs[R_CS].base;
900 current_pc = current_cs_base + env->eip;
901 #else
902 #error unsupported CPU
903 #endif
905 #endif /* TARGET_HAS_PRECISE_SMC */
906 /* we need to do that to handle the case where a signal
907 occurs while doing tb_phys_invalidate() */
908 saved_tb = NULL;
909 if (env) {
910 saved_tb = env->current_tb;
911 env->current_tb = NULL;
913 tb_phys_invalidate(tb, -1);
914 if (env) {
915 env->current_tb = saved_tb;
916 if (env->interrupt_request && env->current_tb)
917 cpu_interrupt(env, env->interrupt_request);
920 tb = tb_next;
922 #if !defined(CONFIG_USER_ONLY)
923 /* if no code remaining, no need to continue to use slow writes */
924 if (!p->first_tb) {
925 invalidate_page_bitmap(p);
926 if (is_cpu_write_access) {
927 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
930 #endif
931 #ifdef TARGET_HAS_PRECISE_SMC
932 if (current_tb_modified) {
933 /* we generate a block containing just the instruction
934 modifying the memory. It will ensure that it cannot modify
935 itself */
936 env->current_tb = NULL;
937 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
938 cpu_resume_from_signal(env, NULL);
940 #endif
943 /* len must be <= 8 and start must be a multiple of len */
944 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
946 PageDesc *p;
947 int offset, b;
948 #if 0
949 if (1) {
950 if (loglevel) {
951 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
952 cpu_single_env->mem_io_vaddr, len,
953 cpu_single_env->eip,
954 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
957 #endif
958 p = page_find(start >> TARGET_PAGE_BITS);
959 if (!p)
960 return;
961 if (p->code_bitmap) {
962 offset = start & ~TARGET_PAGE_MASK;
963 b = p->code_bitmap[offset >> 3] >> (offset & 7);
964 if (b & ((1 << len) - 1))
965 goto do_invalidate;
966 } else {
967 do_invalidate:
968 tb_invalidate_phys_page_range(start, start + len, 1);
972 #if !defined(CONFIG_SOFTMMU)
973 static void tb_invalidate_phys_page(target_phys_addr_t addr,
974 unsigned long pc, void *puc)
976 int n, current_flags, current_tb_modified;
977 target_ulong current_pc, current_cs_base;
978 PageDesc *p;
979 TranslationBlock *tb, *current_tb;
980 #ifdef TARGET_HAS_PRECISE_SMC
981 CPUState *env = cpu_single_env;
982 #endif
984 addr &= TARGET_PAGE_MASK;
985 p = page_find(addr >> TARGET_PAGE_BITS);
986 if (!p)
987 return;
988 tb = p->first_tb;
989 current_tb_modified = 0;
990 current_tb = NULL;
991 current_pc = 0; /* avoid warning */
992 current_cs_base = 0; /* avoid warning */
993 current_flags = 0; /* avoid warning */
994 #ifdef TARGET_HAS_PRECISE_SMC
995 if (tb && pc != 0) {
996 current_tb = tb_find_pc(pc);
998 #endif
999 while (tb != NULL) {
1000 n = (long)tb & 3;
1001 tb = (TranslationBlock *)((long)tb & ~3);
1002 #ifdef TARGET_HAS_PRECISE_SMC
1003 if (current_tb == tb &&
1004 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1005 /* If we are modifying the current TB, we must stop
1006 its execution. We could be more precise by checking
1007 that the modification is after the current PC, but it
1008 would require a specialized function to partially
1009 restore the CPU state */
1011 current_tb_modified = 1;
1012 cpu_restore_state(current_tb, env, pc, puc);
1013 #if defined(TARGET_I386)
1014 current_flags = env->hflags;
1015 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1016 current_cs_base = (target_ulong)env->segs[R_CS].base;
1017 current_pc = current_cs_base + env->eip;
1018 #else
1019 #error unsupported CPU
1020 #endif
1022 #endif /* TARGET_HAS_PRECISE_SMC */
1023 tb_phys_invalidate(tb, addr);
1024 tb = tb->page_next[n];
1026 p->first_tb = NULL;
1027 #ifdef TARGET_HAS_PRECISE_SMC
1028 if (current_tb_modified) {
1029 /* we generate a block containing just the instruction
1030 modifying the memory. It will ensure that it cannot modify
1031 itself */
1032 env->current_tb = NULL;
1033 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1034 cpu_resume_from_signal(env, puc);
1036 #endif
1038 #endif
1040 /* add the tb in the target page and protect it if necessary */
1041 static inline void tb_alloc_page(TranslationBlock *tb,
1042 unsigned int n, target_ulong page_addr)
1044 PageDesc *p;
1045 TranslationBlock *last_first_tb;
1047 tb->page_addr[n] = page_addr;
1048 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1049 tb->page_next[n] = p->first_tb;
1050 last_first_tb = p->first_tb;
1051 p->first_tb = (TranslationBlock *)((long)tb | n);
1052 invalidate_page_bitmap(p);
1054 #if defined(TARGET_HAS_SMC) || 1
1056 #if defined(CONFIG_USER_ONLY)
1057 if (p->flags & PAGE_WRITE) {
1058 target_ulong addr;
1059 PageDesc *p2;
1060 int prot;
1062 /* force the host page as non writable (writes will have a
1063 page fault + mprotect overhead) */
1064 page_addr &= qemu_host_page_mask;
1065 prot = 0;
1066 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1067 addr += TARGET_PAGE_SIZE) {
1069 p2 = page_find (addr >> TARGET_PAGE_BITS);
1070 if (!p2)
1071 continue;
1072 prot |= p2->flags;
1073 p2->flags &= ~PAGE_WRITE;
1074 page_get_flags(addr);
1076 mprotect(g2h(page_addr), qemu_host_page_size,
1077 (prot & PAGE_BITS) & ~PAGE_WRITE);
1078 #ifdef DEBUG_TB_INVALIDATE
1079 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1080 page_addr);
1081 #endif
1083 #else
1084 /* if some code is already present, then the pages are already
1085 protected. So we handle the case where only the first TB is
1086 allocated in a physical page */
1087 if (!last_first_tb) {
1088 tlb_protect_code(page_addr);
1090 #endif
1092 #endif /* TARGET_HAS_SMC */
1095 /* Allocate a new translation block. Flush the translation buffer if
1096 too many translation blocks or too much generated code. */
1097 TranslationBlock *tb_alloc(target_ulong pc)
1099 TranslationBlock *tb;
1101 if (nb_tbs >= code_gen_max_blocks ||
1102 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1103 return NULL;
1104 tb = &tbs[nb_tbs++];
1105 tb->pc = pc;
1106 tb->cflags = 0;
1107 return tb;
1110 void tb_free(TranslationBlock *tb)
1112 /* In practice this is mostly used for single use temporary TB
1113 Ignore the hard cases and just back up if this TB happens to
1114 be the last one generated. */
1115 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1116 code_gen_ptr = tb->tc_ptr;
1117 nb_tbs--;
1121 /* add a new TB and link it to the physical page tables. phys_page2 is
1122 (-1) to indicate that only one page contains the TB. */
1123 void tb_link_phys(TranslationBlock *tb,
1124 target_ulong phys_pc, target_ulong phys_page2)
1126 unsigned int h;
1127 TranslationBlock **ptb;
1129 /* Grab the mmap lock to stop another thread invalidating this TB
1130 before we are done. */
1131 mmap_lock();
1132 /* add in the physical hash table */
1133 h = tb_phys_hash_func(phys_pc);
1134 ptb = &tb_phys_hash[h];
1135 tb->phys_hash_next = *ptb;
1136 *ptb = tb;
1138 /* add in the page list */
1139 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1140 if (phys_page2 != -1)
1141 tb_alloc_page(tb, 1, phys_page2);
1142 else
1143 tb->page_addr[1] = -1;
1145 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1146 tb->jmp_next[0] = NULL;
1147 tb->jmp_next[1] = NULL;
1149 /* init original jump addresses */
1150 if (tb->tb_next_offset[0] != 0xffff)
1151 tb_reset_jump(tb, 0);
1152 if (tb->tb_next_offset[1] != 0xffff)
1153 tb_reset_jump(tb, 1);
1155 #ifdef DEBUG_TB_CHECK
1156 tb_page_check();
1157 #endif
1158 mmap_unlock();
1161 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1162 tb[1].tc_ptr. Return NULL if not found */
1163 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1165 int m_min, m_max, m;
1166 unsigned long v;
1167 TranslationBlock *tb;
1169 if (nb_tbs <= 0)
1170 return NULL;
1171 if (tc_ptr < (unsigned long)code_gen_buffer ||
1172 tc_ptr >= (unsigned long)code_gen_ptr)
1173 return NULL;
1174 /* binary search (cf Knuth) */
1175 m_min = 0;
1176 m_max = nb_tbs - 1;
1177 while (m_min <= m_max) {
1178 m = (m_min + m_max) >> 1;
1179 tb = &tbs[m];
1180 v = (unsigned long)tb->tc_ptr;
1181 if (v == tc_ptr)
1182 return tb;
1183 else if (tc_ptr < v) {
1184 m_max = m - 1;
1185 } else {
1186 m_min = m + 1;
1189 return &tbs[m_max];
1192 static void tb_reset_jump_recursive(TranslationBlock *tb);
1194 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1196 TranslationBlock *tb1, *tb_next, **ptb;
1197 unsigned int n1;
1199 tb1 = tb->jmp_next[n];
1200 if (tb1 != NULL) {
1201 /* find head of list */
1202 for(;;) {
1203 n1 = (long)tb1 & 3;
1204 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1205 if (n1 == 2)
1206 break;
1207 tb1 = tb1->jmp_next[n1];
1209 /* we are now sure now that tb jumps to tb1 */
1210 tb_next = tb1;
1212 /* remove tb from the jmp_first list */
1213 ptb = &tb_next->jmp_first;
1214 for(;;) {
1215 tb1 = *ptb;
1216 n1 = (long)tb1 & 3;
1217 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1218 if (n1 == n && tb1 == tb)
1219 break;
1220 ptb = &tb1->jmp_next[n1];
1222 *ptb = tb->jmp_next[n];
1223 tb->jmp_next[n] = NULL;
1225 /* suppress the jump to next tb in generated code */
1226 tb_reset_jump(tb, n);
1228 /* suppress jumps in the tb on which we could have jumped */
1229 tb_reset_jump_recursive(tb_next);
1233 static void tb_reset_jump_recursive(TranslationBlock *tb)
1235 tb_reset_jump_recursive2(tb, 0);
1236 tb_reset_jump_recursive2(tb, 1);
1239 #if defined(TARGET_HAS_ICE)
1240 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1242 target_phys_addr_t addr;
1243 target_ulong pd;
1244 ram_addr_t ram_addr;
1245 PhysPageDesc *p;
1247 addr = cpu_get_phys_page_debug(env, pc);
1248 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1249 if (!p) {
1250 pd = IO_MEM_UNASSIGNED;
1251 } else {
1252 pd = p->phys_offset;
1254 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1255 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1257 #endif
1259 /* Add a watchpoint. */
1260 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1262 int i;
1264 for (i = 0; i < env->nb_watchpoints; i++) {
1265 if (addr == env->watchpoint[i].vaddr)
1266 return 0;
1268 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1269 return -1;
1271 i = env->nb_watchpoints++;
1272 env->watchpoint[i].vaddr = addr;
1273 env->watchpoint[i].type = type;
1274 tlb_flush_page(env, addr);
1275 /* FIXME: This flush is needed because of the hack to make memory ops
1276 terminate the TB. It can be removed once the proper IO trap and
1277 re-execute bits are in. */
1278 tb_flush(env);
1279 return i;
1282 /* Remove a watchpoint. */
1283 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1285 int i;
1287 for (i = 0; i < env->nb_watchpoints; i++) {
1288 if (addr == env->watchpoint[i].vaddr) {
1289 env->nb_watchpoints--;
1290 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1291 tlb_flush_page(env, addr);
1292 return 0;
1295 return -1;
1298 /* Remove all watchpoints. */
1299 void cpu_watchpoint_remove_all(CPUState *env) {
1300 int i;
1302 for (i = 0; i < env->nb_watchpoints; i++) {
1303 tlb_flush_page(env, env->watchpoint[i].vaddr);
1305 env->nb_watchpoints = 0;
1308 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1309 breakpoint is reached */
1310 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1312 #if defined(TARGET_HAS_ICE)
1313 int i;
1315 for(i = 0; i < env->nb_breakpoints; i++) {
1316 if (env->breakpoints[i] == pc)
1317 return 0;
1320 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1321 return -1;
1322 env->breakpoints[env->nb_breakpoints++] = pc;
1324 breakpoint_invalidate(env, pc);
1325 return 0;
1326 #else
1327 return -1;
1328 #endif
1331 /* remove all breakpoints */
1332 void cpu_breakpoint_remove_all(CPUState *env) {
1333 #if defined(TARGET_HAS_ICE)
1334 int i;
1335 for(i = 0; i < env->nb_breakpoints; i++) {
1336 breakpoint_invalidate(env, env->breakpoints[i]);
1338 env->nb_breakpoints = 0;
1339 #endif
1342 /* remove a breakpoint */
1343 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1345 #if defined(TARGET_HAS_ICE)
1346 int i;
1347 for(i = 0; i < env->nb_breakpoints; i++) {
1348 if (env->breakpoints[i] == pc)
1349 goto found;
1351 return -1;
1352 found:
1353 env->nb_breakpoints--;
1354 if (i < env->nb_breakpoints)
1355 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1357 breakpoint_invalidate(env, pc);
1358 return 0;
1359 #else
1360 return -1;
1361 #endif
1364 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1365 CPU loop after each instruction */
1366 void cpu_single_step(CPUState *env, int enabled)
1368 #if defined(TARGET_HAS_ICE)
1369 if (env->singlestep_enabled != enabled) {
1370 env->singlestep_enabled = enabled;
1371 /* must flush all the translated code to avoid inconsistancies */
1372 /* XXX: only flush what is necessary */
1373 tb_flush(env);
1375 #endif
1378 /* enable or disable low levels log */
1379 void cpu_set_log(int log_flags)
1381 loglevel = log_flags;
1382 if (loglevel && !logfile) {
1383 logfile = fopen(logfilename, log_append ? "a" : "w");
1384 if (!logfile) {
1385 perror(logfilename);
1386 _exit(1);
1388 #if !defined(CONFIG_SOFTMMU)
1389 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1391 static uint8_t logfile_buf[4096];
1392 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1394 #else
1395 setvbuf(logfile, NULL, _IOLBF, 0);
1396 #endif
1397 log_append = 1;
1399 if (!loglevel && logfile) {
1400 fclose(logfile);
1401 logfile = NULL;
1405 void cpu_set_log_filename(const char *filename)
1407 logfilename = strdup(filename);
1408 if (logfile) {
1409 fclose(logfile);
1410 logfile = NULL;
1412 cpu_set_log(loglevel);
1415 /* mask must never be zero, except for A20 change call */
1416 void cpu_interrupt(CPUState *env, int mask)
1418 #if !defined(USE_NPTL)
1419 TranslationBlock *tb;
1420 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1421 #endif
1422 int old_mask;
1424 old_mask = env->interrupt_request;
1425 /* FIXME: This is probably not threadsafe. A different thread could
1426 be in the middle of a read-modify-write operation. */
1427 env->interrupt_request |= mask;
1428 #if defined(USE_NPTL)
1429 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1430 problem and hope the cpu will stop of its own accord. For userspace
1431 emulation this often isn't actually as bad as it sounds. Often
1432 signals are used primarily to interrupt blocking syscalls. */
1433 #else
1434 if (use_icount) {
1435 env->icount_decr.u16.high = 0x8000;
1436 #ifndef CONFIG_USER_ONLY
1437 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1438 an async event happened and we need to process it. */
1439 if (!can_do_io(env)
1440 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1441 cpu_abort(env, "Raised interrupt while not in I/O function");
1443 #endif
1444 } else {
1445 tb = env->current_tb;
1446 /* if the cpu is currently executing code, we must unlink it and
1447 all the potentially executing TB */
1448 if (tb && !testandset(&interrupt_lock)) {
1449 env->current_tb = NULL;
1450 tb_reset_jump_recursive(tb);
1451 resetlock(&interrupt_lock);
1454 #endif
1457 void cpu_reset_interrupt(CPUState *env, int mask)
1459 env->interrupt_request &= ~mask;
1462 CPULogItem cpu_log_items[] = {
1463 { CPU_LOG_TB_OUT_ASM, "out_asm",
1464 "show generated host assembly code for each compiled TB" },
1465 { CPU_LOG_TB_IN_ASM, "in_asm",
1466 "show target assembly code for each compiled TB" },
1467 { CPU_LOG_TB_OP, "op",
1468 "show micro ops for each compiled TB" },
1469 { CPU_LOG_TB_OP_OPT, "op_opt",
1470 "show micro ops "
1471 #ifdef TARGET_I386
1472 "before eflags optimization and "
1473 #endif
1474 "after liveness analysis" },
1475 { CPU_LOG_INT, "int",
1476 "show interrupts/exceptions in short format" },
1477 { CPU_LOG_EXEC, "exec",
1478 "show trace before each executed TB (lots of logs)" },
1479 { CPU_LOG_TB_CPU, "cpu",
1480 "show CPU state before block translation" },
1481 #ifdef TARGET_I386
1482 { CPU_LOG_PCALL, "pcall",
1483 "show protected mode far calls/returns/exceptions" },
1484 #endif
1485 #ifdef DEBUG_IOPORT
1486 { CPU_LOG_IOPORT, "ioport",
1487 "show all i/o ports accesses" },
1488 #endif
1489 { 0, NULL, NULL },
1492 static int cmp1(const char *s1, int n, const char *s2)
1494 if (strlen(s2) != n)
1495 return 0;
1496 return memcmp(s1, s2, n) == 0;
1499 /* takes a comma separated list of log masks. Return 0 if error. */
1500 int cpu_str_to_log_mask(const char *str)
1502 CPULogItem *item;
1503 int mask;
1504 const char *p, *p1;
1506 p = str;
1507 mask = 0;
1508 for(;;) {
1509 p1 = strchr(p, ',');
1510 if (!p1)
1511 p1 = p + strlen(p);
1512 if(cmp1(p,p1-p,"all")) {
1513 for(item = cpu_log_items; item->mask != 0; item++) {
1514 mask |= item->mask;
1516 } else {
1517 for(item = cpu_log_items; item->mask != 0; item++) {
1518 if (cmp1(p, p1 - p, item->name))
1519 goto found;
1521 return 0;
1523 found:
1524 mask |= item->mask;
1525 if (*p1 != ',')
1526 break;
1527 p = p1 + 1;
1529 return mask;
1532 void cpu_abort(CPUState *env, const char *fmt, ...)
1534 va_list ap;
1535 va_list ap2;
1537 va_start(ap, fmt);
1538 va_copy(ap2, ap);
1539 fprintf(stderr, "qemu: fatal: ");
1540 vfprintf(stderr, fmt, ap);
1541 fprintf(stderr, "\n");
1542 #ifdef TARGET_I386
1543 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1544 #else
1545 cpu_dump_state(env, stderr, fprintf, 0);
1546 #endif
1547 if (logfile) {
1548 fprintf(logfile, "qemu: fatal: ");
1549 vfprintf(logfile, fmt, ap2);
1550 fprintf(logfile, "\n");
1551 #ifdef TARGET_I386
1552 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1553 #else
1554 cpu_dump_state(env, logfile, fprintf, 0);
1555 #endif
1556 fflush(logfile);
1557 fclose(logfile);
1559 va_end(ap2);
1560 va_end(ap);
1561 abort();
1564 CPUState *cpu_copy(CPUState *env)
1566 CPUState *new_env = cpu_init(env->cpu_model_str);
1567 /* preserve chaining and index */
1568 CPUState *next_cpu = new_env->next_cpu;
1569 int cpu_index = new_env->cpu_index;
1570 memcpy(new_env, env, sizeof(CPUState));
1571 new_env->next_cpu = next_cpu;
1572 new_env->cpu_index = cpu_index;
1573 return new_env;
1576 #if !defined(CONFIG_USER_ONLY)
1578 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1580 unsigned int i;
1582 /* Discard jump cache entries for any tb which might potentially
1583 overlap the flushed page. */
1584 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1585 memset (&env->tb_jmp_cache[i], 0,
1586 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1588 i = tb_jmp_cache_hash_page(addr);
1589 memset (&env->tb_jmp_cache[i], 0,
1590 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1593 /* NOTE: if flush_global is true, also flush global entries (not
1594 implemented yet) */
1595 void tlb_flush(CPUState *env, int flush_global)
1597 int i;
1599 #if defined(DEBUG_TLB)
1600 printf("tlb_flush:\n");
1601 #endif
1602 /* must reset current TB so that interrupts cannot modify the
1603 links while we are modifying them */
1604 env->current_tb = NULL;
1606 for(i = 0; i < CPU_TLB_SIZE; i++) {
1607 env->tlb_table[0][i].addr_read = -1;
1608 env->tlb_table[0][i].addr_write = -1;
1609 env->tlb_table[0][i].addr_code = -1;
1610 env->tlb_table[1][i].addr_read = -1;
1611 env->tlb_table[1][i].addr_write = -1;
1612 env->tlb_table[1][i].addr_code = -1;
1613 #if (NB_MMU_MODES >= 3)
1614 env->tlb_table[2][i].addr_read = -1;
1615 env->tlb_table[2][i].addr_write = -1;
1616 env->tlb_table[2][i].addr_code = -1;
1617 #if (NB_MMU_MODES == 4)
1618 env->tlb_table[3][i].addr_read = -1;
1619 env->tlb_table[3][i].addr_write = -1;
1620 env->tlb_table[3][i].addr_code = -1;
1621 #endif
1622 #endif
1625 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1627 #ifdef USE_KQEMU
1628 if (env->kqemu_enabled) {
1629 kqemu_flush(env, flush_global);
1631 #endif
1632 tlb_flush_count++;
1635 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1637 if (addr == (tlb_entry->addr_read &
1638 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1639 addr == (tlb_entry->addr_write &
1640 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1641 addr == (tlb_entry->addr_code &
1642 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1643 tlb_entry->addr_read = -1;
1644 tlb_entry->addr_write = -1;
1645 tlb_entry->addr_code = -1;
1649 void tlb_flush_page(CPUState *env, target_ulong addr)
1651 int i;
1653 #if defined(DEBUG_TLB)
1654 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1655 #endif
1656 /* must reset current TB so that interrupts cannot modify the
1657 links while we are modifying them */
1658 env->current_tb = NULL;
1660 addr &= TARGET_PAGE_MASK;
1661 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1662 tlb_flush_entry(&env->tlb_table[0][i], addr);
1663 tlb_flush_entry(&env->tlb_table[1][i], addr);
1664 #if (NB_MMU_MODES >= 3)
1665 tlb_flush_entry(&env->tlb_table[2][i], addr);
1666 #if (NB_MMU_MODES == 4)
1667 tlb_flush_entry(&env->tlb_table[3][i], addr);
1668 #endif
1669 #endif
1671 tlb_flush_jmp_cache(env, addr);
1673 #ifdef USE_KQEMU
1674 if (env->kqemu_enabled) {
1675 kqemu_flush_page(env, addr);
1677 #endif
1680 /* update the TLBs so that writes to code in the virtual page 'addr'
1681 can be detected */
1682 static void tlb_protect_code(ram_addr_t ram_addr)
1684 cpu_physical_memory_reset_dirty(ram_addr,
1685 ram_addr + TARGET_PAGE_SIZE,
1686 CODE_DIRTY_FLAG);
1689 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1690 tested for self modifying code */
1691 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1692 target_ulong vaddr)
1694 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1697 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1698 unsigned long start, unsigned long length)
1700 unsigned long addr;
1701 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1702 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1703 if ((addr - start) < length) {
1704 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1709 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1710 int dirty_flags)
1712 CPUState *env;
1713 unsigned long length, start1;
1714 int i, mask, len;
1715 uint8_t *p;
1717 start &= TARGET_PAGE_MASK;
1718 end = TARGET_PAGE_ALIGN(end);
1720 length = end - start;
1721 if (length == 0)
1722 return;
1723 len = length >> TARGET_PAGE_BITS;
1724 #ifdef USE_KQEMU
1725 /* XXX: should not depend on cpu context */
1726 env = first_cpu;
1727 if (env->kqemu_enabled) {
1728 ram_addr_t addr;
1729 addr = start;
1730 for(i = 0; i < len; i++) {
1731 kqemu_set_notdirty(env, addr);
1732 addr += TARGET_PAGE_SIZE;
1735 #endif
1736 mask = ~dirty_flags;
1737 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1738 for(i = 0; i < len; i++)
1739 p[i] &= mask;
1741 /* we modify the TLB cache so that the dirty bit will be set again
1742 when accessing the range */
1743 start1 = start + (unsigned long)phys_ram_base;
1744 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1745 for(i = 0; i < CPU_TLB_SIZE; i++)
1746 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1747 for(i = 0; i < CPU_TLB_SIZE; i++)
1748 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1749 #if (NB_MMU_MODES >= 3)
1750 for(i = 0; i < CPU_TLB_SIZE; i++)
1751 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1752 #if (NB_MMU_MODES == 4)
1753 for(i = 0; i < CPU_TLB_SIZE; i++)
1754 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1755 #endif
1756 #endif
1760 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1762 ram_addr_t ram_addr;
1764 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1765 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1766 tlb_entry->addend - (unsigned long)phys_ram_base;
1767 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1768 tlb_entry->addr_write |= TLB_NOTDIRTY;
1773 /* update the TLB according to the current state of the dirty bits */
1774 void cpu_tlb_update_dirty(CPUState *env)
1776 int i;
1777 for(i = 0; i < CPU_TLB_SIZE; i++)
1778 tlb_update_dirty(&env->tlb_table[0][i]);
1779 for(i = 0; i < CPU_TLB_SIZE; i++)
1780 tlb_update_dirty(&env->tlb_table[1][i]);
1781 #if (NB_MMU_MODES >= 3)
1782 for(i = 0; i < CPU_TLB_SIZE; i++)
1783 tlb_update_dirty(&env->tlb_table[2][i]);
1784 #if (NB_MMU_MODES == 4)
1785 for(i = 0; i < CPU_TLB_SIZE; i++)
1786 tlb_update_dirty(&env->tlb_table[3][i]);
1787 #endif
1788 #endif
1791 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1793 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1794 tlb_entry->addr_write = vaddr;
1797 /* update the TLB corresponding to virtual page vaddr
1798 so that it is no longer dirty */
1799 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1801 int i;
1803 vaddr &= TARGET_PAGE_MASK;
1804 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1805 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1806 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1807 #if (NB_MMU_MODES >= 3)
1808 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1809 #if (NB_MMU_MODES == 4)
1810 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1811 #endif
1812 #endif
1815 /* add a new TLB entry. At most one entry for a given virtual address
1816 is permitted. Return 0 if OK or 2 if the page could not be mapped
1817 (can only happen in non SOFTMMU mode for I/O pages or pages
1818 conflicting with the host address space). */
1819 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1820 target_phys_addr_t paddr, int prot,
1821 int mmu_idx, int is_softmmu)
1823 PhysPageDesc *p;
1824 unsigned long pd;
1825 unsigned int index;
1826 target_ulong address;
1827 target_ulong code_address;
1828 target_phys_addr_t addend;
1829 int ret;
1830 CPUTLBEntry *te;
1831 int i;
1832 target_phys_addr_t iotlb;
1834 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1835 if (!p) {
1836 pd = IO_MEM_UNASSIGNED;
1837 } else {
1838 pd = p->phys_offset;
1840 #if defined(DEBUG_TLB)
1841 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1842 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1843 #endif
1845 ret = 0;
1846 address = vaddr;
1847 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1848 /* IO memory case (romd handled later) */
1849 address |= TLB_MMIO;
1851 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1852 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1853 /* Normal RAM. */
1854 iotlb = pd & TARGET_PAGE_MASK;
1855 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1856 iotlb |= IO_MEM_NOTDIRTY;
1857 else
1858 iotlb |= IO_MEM_ROM;
1859 } else {
1860 /* IO handlers are currently passed a phsical address.
1861 It would be nice to pass an offset from the base address
1862 of that region. This would avoid having to special case RAM,
1863 and avoid full address decoding in every device.
1864 We can't use the high bits of pd for this because
1865 IO_MEM_ROMD uses these as a ram address. */
1866 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1869 code_address = address;
1870 /* Make accesses to pages with watchpoints go via the
1871 watchpoint trap routines. */
1872 for (i = 0; i < env->nb_watchpoints; i++) {
1873 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1874 iotlb = io_mem_watch + paddr;
1875 /* TODO: The memory case can be optimized by not trapping
1876 reads of pages with a write breakpoint. */
1877 address |= TLB_MMIO;
1881 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1882 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1883 te = &env->tlb_table[mmu_idx][index];
1884 te->addend = addend - vaddr;
1885 if (prot & PAGE_READ) {
1886 te->addr_read = address;
1887 } else {
1888 te->addr_read = -1;
1891 if (prot & PAGE_EXEC) {
1892 te->addr_code = code_address;
1893 } else {
1894 te->addr_code = -1;
1896 if (prot & PAGE_WRITE) {
1897 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1898 (pd & IO_MEM_ROMD)) {
1899 /* Write access calls the I/O callback. */
1900 te->addr_write = address | TLB_MMIO;
1901 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1902 !cpu_physical_memory_is_dirty(pd)) {
1903 te->addr_write = address | TLB_NOTDIRTY;
1904 } else {
1905 te->addr_write = address;
1907 } else {
1908 te->addr_write = -1;
1910 return ret;
1913 #else
1915 void tlb_flush(CPUState *env, int flush_global)
1919 void tlb_flush_page(CPUState *env, target_ulong addr)
1923 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1924 target_phys_addr_t paddr, int prot,
1925 int mmu_idx, int is_softmmu)
1927 return 0;
1930 /* dump memory mappings */
1931 void page_dump(FILE *f)
1933 unsigned long start, end;
1934 int i, j, prot, prot1;
1935 PageDesc *p;
1937 fprintf(f, "%-8s %-8s %-8s %s\n",
1938 "start", "end", "size", "prot");
1939 start = -1;
1940 end = -1;
1941 prot = 0;
1942 for(i = 0; i <= L1_SIZE; i++) {
1943 if (i < L1_SIZE)
1944 p = l1_map[i];
1945 else
1946 p = NULL;
1947 for(j = 0;j < L2_SIZE; j++) {
1948 if (!p)
1949 prot1 = 0;
1950 else
1951 prot1 = p[j].flags;
1952 if (prot1 != prot) {
1953 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1954 if (start != -1) {
1955 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1956 start, end, end - start,
1957 prot & PAGE_READ ? 'r' : '-',
1958 prot & PAGE_WRITE ? 'w' : '-',
1959 prot & PAGE_EXEC ? 'x' : '-');
1961 if (prot1 != 0)
1962 start = end;
1963 else
1964 start = -1;
1965 prot = prot1;
1967 if (!p)
1968 break;
1973 int page_get_flags(target_ulong address)
1975 PageDesc *p;
1977 p = page_find(address >> TARGET_PAGE_BITS);
1978 if (!p)
1979 return 0;
1980 return p->flags;
1983 /* modify the flags of a page and invalidate the code if
1984 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1985 depending on PAGE_WRITE */
1986 void page_set_flags(target_ulong start, target_ulong end, int flags)
1988 PageDesc *p;
1989 target_ulong addr;
1991 /* mmap_lock should already be held. */
1992 start = start & TARGET_PAGE_MASK;
1993 end = TARGET_PAGE_ALIGN(end);
1994 if (flags & PAGE_WRITE)
1995 flags |= PAGE_WRITE_ORG;
1996 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1997 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1998 /* We may be called for host regions that are outside guest
1999 address space. */
2000 if (!p)
2001 return;
2002 /* if the write protection is set, then we invalidate the code
2003 inside */
2004 if (!(p->flags & PAGE_WRITE) &&
2005 (flags & PAGE_WRITE) &&
2006 p->first_tb) {
2007 tb_invalidate_phys_page(addr, 0, NULL);
2009 p->flags = flags;
2013 int page_check_range(target_ulong start, target_ulong len, int flags)
2015 PageDesc *p;
2016 target_ulong end;
2017 target_ulong addr;
2019 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2020 start = start & TARGET_PAGE_MASK;
2022 if( end < start )
2023 /* we've wrapped around */
2024 return -1;
2025 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2026 p = page_find(addr >> TARGET_PAGE_BITS);
2027 if( !p )
2028 return -1;
2029 if( !(p->flags & PAGE_VALID) )
2030 return -1;
2032 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2033 return -1;
2034 if (flags & PAGE_WRITE) {
2035 if (!(p->flags & PAGE_WRITE_ORG))
2036 return -1;
2037 /* unprotect the page if it was put read-only because it
2038 contains translated code */
2039 if (!(p->flags & PAGE_WRITE)) {
2040 if (!page_unprotect(addr, 0, NULL))
2041 return -1;
2043 return 0;
2046 return 0;
2049 /* called from signal handler: invalidate the code and unprotect the
2050 page. Return TRUE if the fault was succesfully handled. */
2051 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2053 unsigned int page_index, prot, pindex;
2054 PageDesc *p, *p1;
2055 target_ulong host_start, host_end, addr;
2057 /* Technically this isn't safe inside a signal handler. However we
2058 know this only ever happens in a synchronous SEGV handler, so in
2059 practice it seems to be ok. */
2060 mmap_lock();
2062 host_start = address & qemu_host_page_mask;
2063 page_index = host_start >> TARGET_PAGE_BITS;
2064 p1 = page_find(page_index);
2065 if (!p1) {
2066 mmap_unlock();
2067 return 0;
2069 host_end = host_start + qemu_host_page_size;
2070 p = p1;
2071 prot = 0;
2072 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2073 prot |= p->flags;
2074 p++;
2076 /* if the page was really writable, then we change its
2077 protection back to writable */
2078 if (prot & PAGE_WRITE_ORG) {
2079 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2080 if (!(p1[pindex].flags & PAGE_WRITE)) {
2081 mprotect((void *)g2h(host_start), qemu_host_page_size,
2082 (prot & PAGE_BITS) | PAGE_WRITE);
2083 p1[pindex].flags |= PAGE_WRITE;
2084 /* and since the content will be modified, we must invalidate
2085 the corresponding translated code. */
2086 tb_invalidate_phys_page(address, pc, puc);
2087 #ifdef DEBUG_TB_CHECK
2088 tb_invalidate_check(address);
2089 #endif
2090 mmap_unlock();
2091 return 1;
2094 mmap_unlock();
2095 return 0;
2098 static inline void tlb_set_dirty(CPUState *env,
2099 unsigned long addr, target_ulong vaddr)
2102 #endif /* defined(CONFIG_USER_ONLY) */
2104 #if !defined(CONFIG_USER_ONLY)
2105 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2106 ram_addr_t memory);
2107 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2108 ram_addr_t orig_memory);
2109 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2110 need_subpage) \
2111 do { \
2112 if (addr > start_addr) \
2113 start_addr2 = 0; \
2114 else { \
2115 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2116 if (start_addr2 > 0) \
2117 need_subpage = 1; \
2120 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2121 end_addr2 = TARGET_PAGE_SIZE - 1; \
2122 else { \
2123 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2124 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2125 need_subpage = 1; \
2127 } while (0)
2129 /* register physical memory. 'size' must be a multiple of the target
2130 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2131 io memory page */
2132 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2133 ram_addr_t size,
2134 ram_addr_t phys_offset)
2136 target_phys_addr_t addr, end_addr;
2137 PhysPageDesc *p;
2138 CPUState *env;
2139 ram_addr_t orig_size = size;
2140 void *subpage;
2142 #ifdef USE_KQEMU
2143 /* XXX: should not depend on cpu context */
2144 env = first_cpu;
2145 if (env->kqemu_enabled) {
2146 kqemu_set_phys_mem(start_addr, size, phys_offset);
2148 #endif
2149 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2150 end_addr = start_addr + (target_phys_addr_t)size;
2151 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2152 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2153 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2154 ram_addr_t orig_memory = p->phys_offset;
2155 target_phys_addr_t start_addr2, end_addr2;
2156 int need_subpage = 0;
2158 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2159 need_subpage);
2160 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2161 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2162 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2163 &p->phys_offset, orig_memory);
2164 } else {
2165 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2166 >> IO_MEM_SHIFT];
2168 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2169 } else {
2170 p->phys_offset = phys_offset;
2171 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2172 (phys_offset & IO_MEM_ROMD))
2173 phys_offset += TARGET_PAGE_SIZE;
2175 } else {
2176 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2177 p->phys_offset = phys_offset;
2178 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2179 (phys_offset & IO_MEM_ROMD))
2180 phys_offset += TARGET_PAGE_SIZE;
2181 else {
2182 target_phys_addr_t start_addr2, end_addr2;
2183 int need_subpage = 0;
2185 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2186 end_addr2, need_subpage);
2188 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2189 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2190 &p->phys_offset, IO_MEM_UNASSIGNED);
2191 subpage_register(subpage, start_addr2, end_addr2,
2192 phys_offset);
2198 /* since each CPU stores ram addresses in its TLB cache, we must
2199 reset the modified entries */
2200 /* XXX: slow ! */
2201 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2202 tlb_flush(env, 1);
2206 /* XXX: temporary until new memory mapping API */
2207 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2209 PhysPageDesc *p;
2211 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2212 if (!p)
2213 return IO_MEM_UNASSIGNED;
2214 return p->phys_offset;
2217 /* XXX: better than nothing */
2218 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2220 ram_addr_t addr;
2221 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2222 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2223 (uint64_t)size, (uint64_t)phys_ram_size);
2224 abort();
2226 addr = phys_ram_alloc_offset;
2227 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2228 return addr;
2231 void qemu_ram_free(ram_addr_t addr)
2235 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2237 #ifdef DEBUG_UNASSIGNED
2238 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2239 #endif
2240 #ifdef TARGET_SPARC
2241 do_unassigned_access(addr, 0, 0, 0);
2242 #elif TARGET_CRIS
2243 do_unassigned_access(addr, 0, 0, 0);
2244 #endif
2245 return 0;
2248 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2250 #ifdef DEBUG_UNASSIGNED
2251 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2252 #endif
2253 #ifdef TARGET_SPARC
2254 do_unassigned_access(addr, 1, 0, 0);
2255 #elif TARGET_CRIS
2256 do_unassigned_access(addr, 1, 0, 0);
2257 #endif
2260 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2261 unassigned_mem_readb,
2262 unassigned_mem_readb,
2263 unassigned_mem_readb,
2266 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2267 unassigned_mem_writeb,
2268 unassigned_mem_writeb,
2269 unassigned_mem_writeb,
2272 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2273 uint32_t val)
2275 int dirty_flags;
2276 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2277 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2278 #if !defined(CONFIG_USER_ONLY)
2279 tb_invalidate_phys_page_fast(ram_addr, 1);
2280 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2281 #endif
2283 stb_p(phys_ram_base + ram_addr, val);
2284 #ifdef USE_KQEMU
2285 if (cpu_single_env->kqemu_enabled &&
2286 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2287 kqemu_modify_page(cpu_single_env, ram_addr);
2288 #endif
2289 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2290 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2291 /* we remove the notdirty callback only if the code has been
2292 flushed */
2293 if (dirty_flags == 0xff)
2294 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2297 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2298 uint32_t val)
2300 int dirty_flags;
2301 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2302 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2303 #if !defined(CONFIG_USER_ONLY)
2304 tb_invalidate_phys_page_fast(ram_addr, 2);
2305 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2306 #endif
2308 stw_p(phys_ram_base + ram_addr, val);
2309 #ifdef USE_KQEMU
2310 if (cpu_single_env->kqemu_enabled &&
2311 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2312 kqemu_modify_page(cpu_single_env, ram_addr);
2313 #endif
2314 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2315 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2316 /* we remove the notdirty callback only if the code has been
2317 flushed */
2318 if (dirty_flags == 0xff)
2319 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2322 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2323 uint32_t val)
2325 int dirty_flags;
2326 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2327 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2328 #if !defined(CONFIG_USER_ONLY)
2329 tb_invalidate_phys_page_fast(ram_addr, 4);
2330 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2331 #endif
2333 stl_p(phys_ram_base + ram_addr, val);
2334 #ifdef USE_KQEMU
2335 if (cpu_single_env->kqemu_enabled &&
2336 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2337 kqemu_modify_page(cpu_single_env, ram_addr);
2338 #endif
2339 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2340 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2341 /* we remove the notdirty callback only if the code has been
2342 flushed */
2343 if (dirty_flags == 0xff)
2344 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2347 static CPUReadMemoryFunc *error_mem_read[3] = {
2348 NULL, /* never used */
2349 NULL, /* never used */
2350 NULL, /* never used */
2353 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2354 notdirty_mem_writeb,
2355 notdirty_mem_writew,
2356 notdirty_mem_writel,
2359 /* Generate a debug exception if a watchpoint has been hit. */
2360 static void check_watchpoint(int offset, int flags)
2362 CPUState *env = cpu_single_env;
2363 target_ulong vaddr;
2364 int i;
2366 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2367 for (i = 0; i < env->nb_watchpoints; i++) {
2368 if (vaddr == env->watchpoint[i].vaddr
2369 && (env->watchpoint[i].type & flags)) {
2370 env->watchpoint_hit = i + 1;
2371 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2372 break;
2377 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2378 so these check for a hit then pass through to the normal out-of-line
2379 phys routines. */
2380 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2382 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2383 return ldub_phys(addr);
2386 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2388 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2389 return lduw_phys(addr);
2392 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2394 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2395 return ldl_phys(addr);
2398 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2399 uint32_t val)
2401 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2402 stb_phys(addr, val);
2405 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2406 uint32_t val)
2408 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2409 stw_phys(addr, val);
2412 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2413 uint32_t val)
2415 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2416 stl_phys(addr, val);
2419 static CPUReadMemoryFunc *watch_mem_read[3] = {
2420 watch_mem_readb,
2421 watch_mem_readw,
2422 watch_mem_readl,
2425 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2426 watch_mem_writeb,
2427 watch_mem_writew,
2428 watch_mem_writel,
2431 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2432 unsigned int len)
2434 uint32_t ret;
2435 unsigned int idx;
2437 idx = SUBPAGE_IDX(addr - mmio->base);
2438 #if defined(DEBUG_SUBPAGE)
2439 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2440 mmio, len, addr, idx);
2441 #endif
2442 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2444 return ret;
2447 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2448 uint32_t value, unsigned int len)
2450 unsigned int idx;
2452 idx = SUBPAGE_IDX(addr - mmio->base);
2453 #if defined(DEBUG_SUBPAGE)
2454 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2455 mmio, len, addr, idx, value);
2456 #endif
2457 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2460 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2462 #if defined(DEBUG_SUBPAGE)
2463 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2464 #endif
2466 return subpage_readlen(opaque, addr, 0);
2469 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2470 uint32_t value)
2472 #if defined(DEBUG_SUBPAGE)
2473 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2474 #endif
2475 subpage_writelen(opaque, addr, value, 0);
2478 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2480 #if defined(DEBUG_SUBPAGE)
2481 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2482 #endif
2484 return subpage_readlen(opaque, addr, 1);
2487 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2488 uint32_t value)
2490 #if defined(DEBUG_SUBPAGE)
2491 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2492 #endif
2493 subpage_writelen(opaque, addr, value, 1);
2496 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2498 #if defined(DEBUG_SUBPAGE)
2499 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2500 #endif
2502 return subpage_readlen(opaque, addr, 2);
2505 static void subpage_writel (void *opaque,
2506 target_phys_addr_t addr, uint32_t value)
2508 #if defined(DEBUG_SUBPAGE)
2509 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2510 #endif
2511 subpage_writelen(opaque, addr, value, 2);
2514 static CPUReadMemoryFunc *subpage_read[] = {
2515 &subpage_readb,
2516 &subpage_readw,
2517 &subpage_readl,
2520 static CPUWriteMemoryFunc *subpage_write[] = {
2521 &subpage_writeb,
2522 &subpage_writew,
2523 &subpage_writel,
2526 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2527 ram_addr_t memory)
2529 int idx, eidx;
2530 unsigned int i;
2532 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2533 return -1;
2534 idx = SUBPAGE_IDX(start);
2535 eidx = SUBPAGE_IDX(end);
2536 #if defined(DEBUG_SUBPAGE)
2537 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2538 mmio, start, end, idx, eidx, memory);
2539 #endif
2540 memory >>= IO_MEM_SHIFT;
2541 for (; idx <= eidx; idx++) {
2542 for (i = 0; i < 4; i++) {
2543 if (io_mem_read[memory][i]) {
2544 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2545 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2547 if (io_mem_write[memory][i]) {
2548 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2549 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2554 return 0;
2557 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2558 ram_addr_t orig_memory)
2560 subpage_t *mmio;
2561 int subpage_memory;
2563 mmio = qemu_mallocz(sizeof(subpage_t));
2564 if (mmio != NULL) {
2565 mmio->base = base;
2566 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2567 #if defined(DEBUG_SUBPAGE)
2568 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2569 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2570 #endif
2571 *phys = subpage_memory | IO_MEM_SUBPAGE;
2572 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2575 return mmio;
2578 static void io_mem_init(void)
2580 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2581 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2582 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2583 io_mem_nb = 5;
2585 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2586 watch_mem_write, NULL);
2587 /* alloc dirty bits array */
2588 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2589 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2592 /* mem_read and mem_write are arrays of functions containing the
2593 function to access byte (index 0), word (index 1) and dword (index
2594 2). Functions can be omitted with a NULL function pointer. The
2595 registered functions may be modified dynamically later.
2596 If io_index is non zero, the corresponding io zone is
2597 modified. If it is zero, a new io zone is allocated. The return
2598 value can be used with cpu_register_physical_memory(). (-1) is
2599 returned if error. */
2600 int cpu_register_io_memory(int io_index,
2601 CPUReadMemoryFunc **mem_read,
2602 CPUWriteMemoryFunc **mem_write,
2603 void *opaque)
2605 int i, subwidth = 0;
2607 if (io_index <= 0) {
2608 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2609 return -1;
2610 io_index = io_mem_nb++;
2611 } else {
2612 if (io_index >= IO_MEM_NB_ENTRIES)
2613 return -1;
2616 for(i = 0;i < 3; i++) {
2617 if (!mem_read[i] || !mem_write[i])
2618 subwidth = IO_MEM_SUBWIDTH;
2619 io_mem_read[io_index][i] = mem_read[i];
2620 io_mem_write[io_index][i] = mem_write[i];
2622 io_mem_opaque[io_index] = opaque;
2623 return (io_index << IO_MEM_SHIFT) | subwidth;
2626 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2628 return io_mem_write[io_index >> IO_MEM_SHIFT];
2631 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2633 return io_mem_read[io_index >> IO_MEM_SHIFT];
2636 #endif /* !defined(CONFIG_USER_ONLY) */
2638 /* physical memory access (slow version, mainly for debug) */
2639 #if defined(CONFIG_USER_ONLY)
2640 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2641 int len, int is_write)
2643 int l, flags;
2644 target_ulong page;
2645 void * p;
2647 while (len > 0) {
2648 page = addr & TARGET_PAGE_MASK;
2649 l = (page + TARGET_PAGE_SIZE) - addr;
2650 if (l > len)
2651 l = len;
2652 flags = page_get_flags(page);
2653 if (!(flags & PAGE_VALID))
2654 return;
2655 if (is_write) {
2656 if (!(flags & PAGE_WRITE))
2657 return;
2658 /* XXX: this code should not depend on lock_user */
2659 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2660 /* FIXME - should this return an error rather than just fail? */
2661 return;
2662 memcpy(p, buf, l);
2663 unlock_user(p, addr, l);
2664 } else {
2665 if (!(flags & PAGE_READ))
2666 return;
2667 /* XXX: this code should not depend on lock_user */
2668 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2669 /* FIXME - should this return an error rather than just fail? */
2670 return;
2671 memcpy(buf, p, l);
2672 unlock_user(p, addr, 0);
2674 len -= l;
2675 buf += l;
2676 addr += l;
2680 #else
2681 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2682 int len, int is_write)
2684 int l, io_index;
2685 uint8_t *ptr;
2686 uint32_t val;
2687 target_phys_addr_t page;
2688 unsigned long pd;
2689 PhysPageDesc *p;
2691 while (len > 0) {
2692 page = addr & TARGET_PAGE_MASK;
2693 l = (page + TARGET_PAGE_SIZE) - addr;
2694 if (l > len)
2695 l = len;
2696 p = phys_page_find(page >> TARGET_PAGE_BITS);
2697 if (!p) {
2698 pd = IO_MEM_UNASSIGNED;
2699 } else {
2700 pd = p->phys_offset;
2703 if (is_write) {
2704 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2705 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2706 /* XXX: could force cpu_single_env to NULL to avoid
2707 potential bugs */
2708 if (l >= 4 && ((addr & 3) == 0)) {
2709 /* 32 bit write access */
2710 val = ldl_p(buf);
2711 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2712 l = 4;
2713 } else if (l >= 2 && ((addr & 1) == 0)) {
2714 /* 16 bit write access */
2715 val = lduw_p(buf);
2716 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2717 l = 2;
2718 } else {
2719 /* 8 bit write access */
2720 val = ldub_p(buf);
2721 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2722 l = 1;
2724 } else {
2725 unsigned long addr1;
2726 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2727 /* RAM case */
2728 ptr = phys_ram_base + addr1;
2729 memcpy(ptr, buf, l);
2730 if (!cpu_physical_memory_is_dirty(addr1)) {
2731 /* invalidate code */
2732 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2733 /* set dirty bit */
2734 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2735 (0xff & ~CODE_DIRTY_FLAG);
2738 } else {
2739 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2740 !(pd & IO_MEM_ROMD)) {
2741 /* I/O case */
2742 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2743 if (l >= 4 && ((addr & 3) == 0)) {
2744 /* 32 bit read access */
2745 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2746 stl_p(buf, val);
2747 l = 4;
2748 } else if (l >= 2 && ((addr & 1) == 0)) {
2749 /* 16 bit read access */
2750 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2751 stw_p(buf, val);
2752 l = 2;
2753 } else {
2754 /* 8 bit read access */
2755 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2756 stb_p(buf, val);
2757 l = 1;
2759 } else {
2760 /* RAM case */
2761 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2762 (addr & ~TARGET_PAGE_MASK);
2763 memcpy(buf, ptr, l);
2766 len -= l;
2767 buf += l;
2768 addr += l;
2772 /* used for ROM loading : can write in RAM and ROM */
2773 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2774 const uint8_t *buf, int len)
2776 int l;
2777 uint8_t *ptr;
2778 target_phys_addr_t page;
2779 unsigned long pd;
2780 PhysPageDesc *p;
2782 while (len > 0) {
2783 page = addr & TARGET_PAGE_MASK;
2784 l = (page + TARGET_PAGE_SIZE) - addr;
2785 if (l > len)
2786 l = len;
2787 p = phys_page_find(page >> TARGET_PAGE_BITS);
2788 if (!p) {
2789 pd = IO_MEM_UNASSIGNED;
2790 } else {
2791 pd = p->phys_offset;
2794 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2795 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2796 !(pd & IO_MEM_ROMD)) {
2797 /* do nothing */
2798 } else {
2799 unsigned long addr1;
2800 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2801 /* ROM/RAM case */
2802 ptr = phys_ram_base + addr1;
2803 memcpy(ptr, buf, l);
2805 len -= l;
2806 buf += l;
2807 addr += l;
2812 /* warning: addr must be aligned */
2813 uint32_t ldl_phys(target_phys_addr_t addr)
2815 int io_index;
2816 uint8_t *ptr;
2817 uint32_t val;
2818 unsigned long pd;
2819 PhysPageDesc *p;
2821 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2822 if (!p) {
2823 pd = IO_MEM_UNASSIGNED;
2824 } else {
2825 pd = p->phys_offset;
2828 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2829 !(pd & IO_MEM_ROMD)) {
2830 /* I/O case */
2831 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2832 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2833 } else {
2834 /* RAM case */
2835 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2836 (addr & ~TARGET_PAGE_MASK);
2837 val = ldl_p(ptr);
2839 return val;
2842 /* warning: addr must be aligned */
2843 uint64_t ldq_phys(target_phys_addr_t addr)
2845 int io_index;
2846 uint8_t *ptr;
2847 uint64_t val;
2848 unsigned long pd;
2849 PhysPageDesc *p;
2851 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2852 if (!p) {
2853 pd = IO_MEM_UNASSIGNED;
2854 } else {
2855 pd = p->phys_offset;
2858 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2859 !(pd & IO_MEM_ROMD)) {
2860 /* I/O case */
2861 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2862 #ifdef TARGET_WORDS_BIGENDIAN
2863 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2864 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2865 #else
2866 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2867 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2868 #endif
2869 } else {
2870 /* RAM case */
2871 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2872 (addr & ~TARGET_PAGE_MASK);
2873 val = ldq_p(ptr);
2875 return val;
2878 /* XXX: optimize */
2879 uint32_t ldub_phys(target_phys_addr_t addr)
2881 uint8_t val;
2882 cpu_physical_memory_read(addr, &val, 1);
2883 return val;
2886 /* XXX: optimize */
2887 uint32_t lduw_phys(target_phys_addr_t addr)
2889 uint16_t val;
2890 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2891 return tswap16(val);
2894 /* warning: addr must be aligned. The ram page is not masked as dirty
2895 and the code inside is not invalidated. It is useful if the dirty
2896 bits are used to track modified PTEs */
2897 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2899 int io_index;
2900 uint8_t *ptr;
2901 unsigned long pd;
2902 PhysPageDesc *p;
2904 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2905 if (!p) {
2906 pd = IO_MEM_UNASSIGNED;
2907 } else {
2908 pd = p->phys_offset;
2911 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2912 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2913 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2914 } else {
2915 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2916 (addr & ~TARGET_PAGE_MASK);
2917 stl_p(ptr, val);
2921 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2923 int io_index;
2924 uint8_t *ptr;
2925 unsigned long pd;
2926 PhysPageDesc *p;
2928 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2929 if (!p) {
2930 pd = IO_MEM_UNASSIGNED;
2931 } else {
2932 pd = p->phys_offset;
2935 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2936 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2937 #ifdef TARGET_WORDS_BIGENDIAN
2938 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2939 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2940 #else
2941 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2942 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2943 #endif
2944 } else {
2945 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2946 (addr & ~TARGET_PAGE_MASK);
2947 stq_p(ptr, val);
2951 /* warning: addr must be aligned */
2952 void stl_phys(target_phys_addr_t addr, uint32_t val)
2954 int io_index;
2955 uint8_t *ptr;
2956 unsigned long pd;
2957 PhysPageDesc *p;
2959 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2960 if (!p) {
2961 pd = IO_MEM_UNASSIGNED;
2962 } else {
2963 pd = p->phys_offset;
2966 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2967 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2968 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2969 } else {
2970 unsigned long addr1;
2971 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2972 /* RAM case */
2973 ptr = phys_ram_base + addr1;
2974 stl_p(ptr, val);
2975 if (!cpu_physical_memory_is_dirty(addr1)) {
2976 /* invalidate code */
2977 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2978 /* set dirty bit */
2979 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2980 (0xff & ~CODE_DIRTY_FLAG);
2985 /* XXX: optimize */
2986 void stb_phys(target_phys_addr_t addr, uint32_t val)
2988 uint8_t v = val;
2989 cpu_physical_memory_write(addr, &v, 1);
2992 /* XXX: optimize */
2993 void stw_phys(target_phys_addr_t addr, uint32_t val)
2995 uint16_t v = tswap16(val);
2996 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2999 /* XXX: optimize */
3000 void stq_phys(target_phys_addr_t addr, uint64_t val)
3002 val = tswap64(val);
3003 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3006 #endif
3008 /* virtual memory access for debug */
3009 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3010 uint8_t *buf, int len, int is_write)
3012 int l;
3013 target_phys_addr_t phys_addr;
3014 target_ulong page;
3016 while (len > 0) {
3017 page = addr & TARGET_PAGE_MASK;
3018 phys_addr = cpu_get_phys_page_debug(env, page);
3019 /* if no physical page mapped, return an error */
3020 if (phys_addr == -1)
3021 return -1;
3022 l = (page + TARGET_PAGE_SIZE) - addr;
3023 if (l > len)
3024 l = len;
3025 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3026 buf, l, is_write);
3027 len -= l;
3028 buf += l;
3029 addr += l;
3031 return 0;
3034 /* in deterministic execution mode, instructions doing device I/Os
3035 must be at the end of the TB */
3036 void cpu_io_recompile(CPUState *env, void *retaddr)
3038 TranslationBlock *tb;
3039 uint32_t n, cflags;
3040 target_ulong pc, cs_base;
3041 uint64_t flags;
3043 tb = tb_find_pc((unsigned long)retaddr);
3044 if (!tb) {
3045 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3046 retaddr);
3048 n = env->icount_decr.u16.low + tb->icount;
3049 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3050 /* Calculate how many instructions had been executed before the fault
3051 occurred. */
3052 n = n - env->icount_decr.u16.low;
3053 /* Generate a new TB ending on the I/O insn. */
3054 n++;
3055 /* On MIPS and SH, delay slot instructions can only be restarted if
3056 they were already the first instruction in the TB. If this is not
3057 the first instruction in a TB then re-execute the preceding
3058 branch. */
3059 #if defined(TARGET_MIPS)
3060 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3061 env->active_tc.PC -= 4;
3062 env->icount_decr.u16.low++;
3063 env->hflags &= ~MIPS_HFLAG_BMASK;
3065 #elif defined(TARGET_SH4)
3066 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3067 && n > 1) {
3068 env->pc -= 2;
3069 env->icount_decr.u16.low++;
3070 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3072 #endif
3073 /* This should never happen. */
3074 if (n > CF_COUNT_MASK)
3075 cpu_abort(env, "TB too big during recompile");
3077 cflags = n | CF_LAST_IO;
3078 pc = tb->pc;
3079 cs_base = tb->cs_base;
3080 flags = tb->flags;
3081 tb_phys_invalidate(tb, -1);
3082 /* FIXME: In theory this could raise an exception. In practice
3083 we have already translated the block once so it's probably ok. */
3084 tb_gen_code(env, pc, cs_base, flags, cflags);
3085 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3086 the first in the TB) then we end up generating a whole new TB and
3087 repeating the fault, which is horribly inefficient.
3088 Better would be to execute just this insn uncached, or generate a
3089 second new TB. */
3090 cpu_resume_from_signal(env, NULL);
3093 void dump_exec_info(FILE *f,
3094 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3096 int i, target_code_size, max_target_code_size;
3097 int direct_jmp_count, direct_jmp2_count, cross_page;
3098 TranslationBlock *tb;
3100 target_code_size = 0;
3101 max_target_code_size = 0;
3102 cross_page = 0;
3103 direct_jmp_count = 0;
3104 direct_jmp2_count = 0;
3105 for(i = 0; i < nb_tbs; i++) {
3106 tb = &tbs[i];
3107 target_code_size += tb->size;
3108 if (tb->size > max_target_code_size)
3109 max_target_code_size = tb->size;
3110 if (tb->page_addr[1] != -1)
3111 cross_page++;
3112 if (tb->tb_next_offset[0] != 0xffff) {
3113 direct_jmp_count++;
3114 if (tb->tb_next_offset[1] != 0xffff) {
3115 direct_jmp2_count++;
3119 /* XXX: avoid using doubles ? */
3120 cpu_fprintf(f, "Translation buffer state:\n");
3121 cpu_fprintf(f, "gen code size %ld/%ld\n",
3122 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3123 cpu_fprintf(f, "TB count %d/%d\n",
3124 nb_tbs, code_gen_max_blocks);
3125 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3126 nb_tbs ? target_code_size / nb_tbs : 0,
3127 max_target_code_size);
3128 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3129 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3130 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3131 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3132 cross_page,
3133 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3134 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3135 direct_jmp_count,
3136 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3137 direct_jmp2_count,
3138 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3139 cpu_fprintf(f, "\nStatistics:\n");
3140 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3141 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3142 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3143 tcg_dump_info(f, cpu_fprintf);
3146 #if !defined(CONFIG_USER_ONLY)
3148 #define MMUSUFFIX _cmmu
3149 #define GETPC() NULL
3150 #define env cpu_single_env
3151 #define SOFTMMU_CODE_ACCESS
3153 #define SHIFT 0
3154 #include "softmmu_template.h"
3156 #define SHIFT 1
3157 #include "softmmu_template.h"
3159 #define SHIFT 2
3160 #include "softmmu_template.h"
3162 #define SHIFT 3
3163 #include "softmmu_template.h"
3165 #undef env
3167 #endif