64 bit virtual addressing fix
[qemu/mini2440.git] / exec.c
blob178c237fbaa98604c0686f5468a3e87459cae16f
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
38 //#define DEBUG_TB_INVALIDATE
39 //#define DEBUG_FLUSH
40 //#define DEBUG_TLB
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
55 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
56 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
57 int nb_tbs;
58 /* any access to the tbs or the page table must use this lock */
59 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
61 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
62 uint8_t *code_gen_ptr;
64 int phys_ram_size;
65 int phys_ram_fd;
66 uint8_t *phys_ram_base;
67 uint8_t *phys_ram_dirty;
69 typedef struct PageDesc {
70 /* list of TBs intersecting this ram page */
71 TranslationBlock *first_tb;
72 /* in order to optimize self modifying code, we count the number
73 of lookups we do to a given page to use a bitmap */
74 unsigned int code_write_count;
75 uint8_t *code_bitmap;
76 #if defined(CONFIG_USER_ONLY)
77 unsigned long flags;
78 #endif
79 } PageDesc;
81 typedef struct PhysPageDesc {
82 /* offset in host memory of the page + io_index in the low 12 bits */
83 uint32_t phys_offset;
84 } PhysPageDesc;
86 /* Note: the VirtPage handling is absolete and will be suppressed
87 ASAP */
88 typedef struct VirtPageDesc {
89 /* physical address of code page. It is valid only if 'valid_tag'
90 matches 'virt_valid_tag' */
91 target_ulong phys_addr;
92 unsigned int valid_tag;
93 #if !defined(CONFIG_SOFTMMU)
94 /* original page access rights. It is valid only if 'valid_tag'
95 matches 'virt_valid_tag' */
96 unsigned int prot;
97 #endif
98 } VirtPageDesc;
100 #define L2_BITS 10
101 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
103 #define L1_SIZE (1 << L1_BITS)
104 #define L2_SIZE (1 << L2_BITS)
106 static void io_mem_init(void);
108 unsigned long qemu_real_host_page_size;
109 unsigned long qemu_host_page_bits;
110 unsigned long qemu_host_page_size;
111 unsigned long qemu_host_page_mask;
113 /* XXX: for system emulation, it could just be an array */
114 static PageDesc *l1_map[L1_SIZE];
115 PhysPageDesc **l1_phys_map;
117 #if !defined(CONFIG_USER_ONLY)
118 #if TARGET_LONG_BITS > 32
119 #define VIRT_L_BITS 9
120 #define VIRT_L_SIZE (1 << VIRT_L_BITS)
121 static void *l1_virt_map[VIRT_L_SIZE];
122 #else
123 static VirtPageDesc *l1_virt_map[L1_SIZE];
124 #endif
125 static unsigned int virt_valid_tag;
126 #endif
128 /* io memory support */
129 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
130 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
131 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
132 static int io_mem_nb;
134 /* log support */
135 char *logfilename = "/tmp/qemu.log";
136 FILE *logfile;
137 int loglevel;
139 /* statistics */
140 static int tlb_flush_count;
141 static int tb_flush_count;
142 static int tb_phys_invalidate_count;
144 static void page_init(void)
146 /* NOTE: we can always suppose that qemu_host_page_size >=
147 TARGET_PAGE_SIZE */
148 #ifdef _WIN32
150 SYSTEM_INFO system_info;
151 DWORD old_protect;
153 GetSystemInfo(&system_info);
154 qemu_real_host_page_size = system_info.dwPageSize;
156 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
157 PAGE_EXECUTE_READWRITE, &old_protect);
159 #else
160 qemu_real_host_page_size = getpagesize();
162 unsigned long start, end;
164 start = (unsigned long)code_gen_buffer;
165 start &= ~(qemu_real_host_page_size - 1);
167 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
168 end += qemu_real_host_page_size - 1;
169 end &= ~(qemu_real_host_page_size - 1);
171 mprotect((void *)start, end - start,
172 PROT_READ | PROT_WRITE | PROT_EXEC);
174 #endif
176 if (qemu_host_page_size == 0)
177 qemu_host_page_size = qemu_real_host_page_size;
178 if (qemu_host_page_size < TARGET_PAGE_SIZE)
179 qemu_host_page_size = TARGET_PAGE_SIZE;
180 qemu_host_page_bits = 0;
181 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
182 qemu_host_page_bits++;
183 qemu_host_page_mask = ~(qemu_host_page_size - 1);
184 #if !defined(CONFIG_USER_ONLY)
185 virt_valid_tag = 1;
186 #endif
187 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(PhysPageDesc *));
188 memset(l1_phys_map, 0, L1_SIZE * sizeof(PhysPageDesc *));
191 static inline PageDesc *page_find_alloc(unsigned int index)
193 PageDesc **lp, *p;
195 lp = &l1_map[index >> L2_BITS];
196 p = *lp;
197 if (!p) {
198 /* allocate if not found */
199 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
200 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
201 *lp = p;
203 return p + (index & (L2_SIZE - 1));
206 static inline PageDesc *page_find(unsigned int index)
208 PageDesc *p;
210 p = l1_map[index >> L2_BITS];
211 if (!p)
212 return 0;
213 return p + (index & (L2_SIZE - 1));
216 static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
218 PhysPageDesc **lp, *p;
220 lp = &l1_phys_map[index >> L2_BITS];
221 p = *lp;
222 if (!p) {
223 /* allocate if not found */
224 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
225 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
226 *lp = p;
228 return p + (index & (L2_SIZE - 1));
231 static inline PhysPageDesc *phys_page_find(unsigned int index)
233 PhysPageDesc *p;
235 p = l1_phys_map[index >> L2_BITS];
236 if (!p)
237 return 0;
238 return p + (index & (L2_SIZE - 1));
241 #if !defined(CONFIG_USER_ONLY)
242 static void tlb_protect_code(CPUState *env, target_ulong addr);
243 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
245 static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc)
247 #if TARGET_LONG_BITS > 32
248 void **p, **lp;
250 p = l1_virt_map;
251 lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
252 p = *lp;
253 if (!p) {
254 if (!alloc)
255 return NULL;
256 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
257 *lp = p;
259 lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
260 p = *lp;
261 if (!p) {
262 if (!alloc)
263 return NULL;
264 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
265 *lp = p;
267 lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
268 p = *lp;
269 if (!p) {
270 if (!alloc)
271 return NULL;
272 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
273 *lp = p;
275 lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
276 p = *lp;
277 if (!p) {
278 if (!alloc)
279 return NULL;
280 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
281 *lp = p;
283 lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
284 p = *lp;
285 if (!p) {
286 if (!alloc)
287 return NULL;
288 p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE);
289 *lp = p;
291 return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1));
292 #else
293 VirtPageDesc *p, **lp;
295 lp = &l1_virt_map[index >> L2_BITS];
296 p = *lp;
297 if (!p) {
298 /* allocate if not found */
299 if (!alloc)
300 return NULL;
301 p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE);
302 *lp = p;
304 return p + (index & (L2_SIZE - 1));
305 #endif
308 static inline VirtPageDesc *virt_page_find(target_ulong index)
310 return virt_page_find_alloc(index, 0);
313 #if TARGET_LONG_BITS > 32
314 static void virt_page_flush_internal(void **p, int level)
316 int i;
317 if (level == 0) {
318 VirtPageDesc *q = (VirtPageDesc *)p;
319 for(i = 0; i < VIRT_L_SIZE; i++)
320 q[i].valid_tag = 0;
321 } else {
322 level--;
323 for(i = 0; i < VIRT_L_SIZE; i++) {
324 if (p[i])
325 virt_page_flush_internal(p[i], level);
329 #endif
331 static void virt_page_flush(void)
333 virt_valid_tag++;
335 if (virt_valid_tag == 0) {
336 virt_valid_tag = 1;
337 #if TARGET_LONG_BITS > 32
338 virt_page_flush_internal(l1_virt_map, 5);
339 #else
341 int i, j;
342 VirtPageDesc *p;
343 for(i = 0; i < L1_SIZE; i++) {
344 p = l1_virt_map[i];
345 if (p) {
346 for(j = 0; j < L2_SIZE; j++)
347 p[j].valid_tag = 0;
351 #endif
354 #else
355 static void virt_page_flush(void)
358 #endif
360 void cpu_exec_init(void)
362 if (!code_gen_ptr) {
363 code_gen_ptr = code_gen_buffer;
364 page_init();
365 io_mem_init();
369 static inline void invalidate_page_bitmap(PageDesc *p)
371 if (p->code_bitmap) {
372 qemu_free(p->code_bitmap);
373 p->code_bitmap = NULL;
375 p->code_write_count = 0;
378 /* set to NULL all the 'first_tb' fields in all PageDescs */
379 static void page_flush_tb(void)
381 int i, j;
382 PageDesc *p;
384 for(i = 0; i < L1_SIZE; i++) {
385 p = l1_map[i];
386 if (p) {
387 for(j = 0; j < L2_SIZE; j++) {
388 p->first_tb = NULL;
389 invalidate_page_bitmap(p);
390 p++;
396 /* flush all the translation blocks */
397 /* XXX: tb_flush is currently not thread safe */
398 void tb_flush(CPUState *env)
400 #if defined(DEBUG_FLUSH)
401 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
402 code_gen_ptr - code_gen_buffer,
403 nb_tbs,
404 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
405 #endif
406 nb_tbs = 0;
407 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
408 virt_page_flush();
410 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
411 page_flush_tb();
413 code_gen_ptr = code_gen_buffer;
414 /* XXX: flush processor icache at this point if cache flush is
415 expensive */
416 tb_flush_count++;
419 #ifdef DEBUG_TB_CHECK
421 static void tb_invalidate_check(unsigned long address)
423 TranslationBlock *tb;
424 int i;
425 address &= TARGET_PAGE_MASK;
426 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
427 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
428 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
429 address >= tb->pc + tb->size)) {
430 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
431 address, tb->pc, tb->size);
437 /* verify that all the pages have correct rights for code */
438 static void tb_page_check(void)
440 TranslationBlock *tb;
441 int i, flags1, flags2;
443 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
444 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
445 flags1 = page_get_flags(tb->pc);
446 flags2 = page_get_flags(tb->pc + tb->size - 1);
447 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
448 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
449 tb->pc, tb->size, flags1, flags2);
455 void tb_jmp_check(TranslationBlock *tb)
457 TranslationBlock *tb1;
458 unsigned int n1;
460 /* suppress any remaining jumps to this TB */
461 tb1 = tb->jmp_first;
462 for(;;) {
463 n1 = (long)tb1 & 3;
464 tb1 = (TranslationBlock *)((long)tb1 & ~3);
465 if (n1 == 2)
466 break;
467 tb1 = tb1->jmp_next[n1];
469 /* check end of list */
470 if (tb1 != tb) {
471 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
475 #endif
477 /* invalidate one TB */
478 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
479 int next_offset)
481 TranslationBlock *tb1;
482 for(;;) {
483 tb1 = *ptb;
484 if (tb1 == tb) {
485 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
486 break;
488 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
492 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
494 TranslationBlock *tb1;
495 unsigned int n1;
497 for(;;) {
498 tb1 = *ptb;
499 n1 = (long)tb1 & 3;
500 tb1 = (TranslationBlock *)((long)tb1 & ~3);
501 if (tb1 == tb) {
502 *ptb = tb1->page_next[n1];
503 break;
505 ptb = &tb1->page_next[n1];
509 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
511 TranslationBlock *tb1, **ptb;
512 unsigned int n1;
514 ptb = &tb->jmp_next[n];
515 tb1 = *ptb;
516 if (tb1) {
517 /* find tb(n) in circular list */
518 for(;;) {
519 tb1 = *ptb;
520 n1 = (long)tb1 & 3;
521 tb1 = (TranslationBlock *)((long)tb1 & ~3);
522 if (n1 == n && tb1 == tb)
523 break;
524 if (n1 == 2) {
525 ptb = &tb1->jmp_first;
526 } else {
527 ptb = &tb1->jmp_next[n1];
530 /* now we can suppress tb(n) from the list */
531 *ptb = tb->jmp_next[n];
533 tb->jmp_next[n] = NULL;
537 /* reset the jump entry 'n' of a TB so that it is not chained to
538 another TB */
539 static inline void tb_reset_jump(TranslationBlock *tb, int n)
541 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
544 static inline void tb_invalidate(TranslationBlock *tb)
546 unsigned int h, n1;
547 TranslationBlock *tb1, *tb2, **ptb;
549 tb_invalidated_flag = 1;
551 /* remove the TB from the hash list */
552 h = tb_hash_func(tb->pc);
553 ptb = &tb_hash[h];
554 for(;;) {
555 tb1 = *ptb;
556 /* NOTE: the TB is not necessarily linked in the hash. It
557 indicates that it is not currently used */
558 if (tb1 == NULL)
559 return;
560 if (tb1 == tb) {
561 *ptb = tb1->hash_next;
562 break;
564 ptb = &tb1->hash_next;
567 /* suppress this TB from the two jump lists */
568 tb_jmp_remove(tb, 0);
569 tb_jmp_remove(tb, 1);
571 /* suppress any remaining jumps to this TB */
572 tb1 = tb->jmp_first;
573 for(;;) {
574 n1 = (long)tb1 & 3;
575 if (n1 == 2)
576 break;
577 tb1 = (TranslationBlock *)((long)tb1 & ~3);
578 tb2 = tb1->jmp_next[n1];
579 tb_reset_jump(tb1, n1);
580 tb1->jmp_next[n1] = NULL;
581 tb1 = tb2;
583 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
586 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
588 PageDesc *p;
589 unsigned int h;
590 target_ulong phys_pc;
592 /* remove the TB from the hash list */
593 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
594 h = tb_phys_hash_func(phys_pc);
595 tb_remove(&tb_phys_hash[h], tb,
596 offsetof(TranslationBlock, phys_hash_next));
598 /* remove the TB from the page list */
599 if (tb->page_addr[0] != page_addr) {
600 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
601 tb_page_remove(&p->first_tb, tb);
602 invalidate_page_bitmap(p);
604 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
605 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
606 tb_page_remove(&p->first_tb, tb);
607 invalidate_page_bitmap(p);
610 tb_invalidate(tb);
611 tb_phys_invalidate_count++;
614 static inline void set_bits(uint8_t *tab, int start, int len)
616 int end, mask, end1;
618 end = start + len;
619 tab += start >> 3;
620 mask = 0xff << (start & 7);
621 if ((start & ~7) == (end & ~7)) {
622 if (start < end) {
623 mask &= ~(0xff << (end & 7));
624 *tab |= mask;
626 } else {
627 *tab++ |= mask;
628 start = (start + 8) & ~7;
629 end1 = end & ~7;
630 while (start < end1) {
631 *tab++ = 0xff;
632 start += 8;
634 if (start < end) {
635 mask = ~(0xff << (end & 7));
636 *tab |= mask;
641 static void build_page_bitmap(PageDesc *p)
643 int n, tb_start, tb_end;
644 TranslationBlock *tb;
646 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
647 if (!p->code_bitmap)
648 return;
649 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
651 tb = p->first_tb;
652 while (tb != NULL) {
653 n = (long)tb & 3;
654 tb = (TranslationBlock *)((long)tb & ~3);
655 /* NOTE: this is subtle as a TB may span two physical pages */
656 if (n == 0) {
657 /* NOTE: tb_end may be after the end of the page, but
658 it is not a problem */
659 tb_start = tb->pc & ~TARGET_PAGE_MASK;
660 tb_end = tb_start + tb->size;
661 if (tb_end > TARGET_PAGE_SIZE)
662 tb_end = TARGET_PAGE_SIZE;
663 } else {
664 tb_start = 0;
665 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
667 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
668 tb = tb->page_next[n];
672 #ifdef TARGET_HAS_PRECISE_SMC
674 static void tb_gen_code(CPUState *env,
675 target_ulong pc, target_ulong cs_base, int flags,
676 int cflags)
678 TranslationBlock *tb;
679 uint8_t *tc_ptr;
680 target_ulong phys_pc, phys_page2, virt_page2;
681 int code_gen_size;
683 phys_pc = get_phys_addr_code(env, pc);
684 tb = tb_alloc(pc);
685 if (!tb) {
686 /* flush must be done */
687 tb_flush(env);
688 /* cannot fail at this point */
689 tb = tb_alloc(pc);
691 tc_ptr = code_gen_ptr;
692 tb->tc_ptr = tc_ptr;
693 tb->cs_base = cs_base;
694 tb->flags = flags;
695 tb->cflags = cflags;
696 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
697 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
699 /* check next page if needed */
700 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
701 phys_page2 = -1;
702 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
703 phys_page2 = get_phys_addr_code(env, virt_page2);
705 tb_link_phys(tb, phys_pc, phys_page2);
707 #endif
709 /* invalidate all TBs which intersect with the target physical page
710 starting in range [start;end[. NOTE: start and end must refer to
711 the same physical page. 'is_cpu_write_access' should be true if called
712 from a real cpu write access: the virtual CPU will exit the current
713 TB if code is modified inside this TB. */
714 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
715 int is_cpu_write_access)
717 int n, current_tb_modified, current_tb_not_found, current_flags;
718 CPUState *env = cpu_single_env;
719 PageDesc *p;
720 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
721 target_ulong tb_start, tb_end;
722 target_ulong current_pc, current_cs_base;
724 p = page_find(start >> TARGET_PAGE_BITS);
725 if (!p)
726 return;
727 if (!p->code_bitmap &&
728 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
729 is_cpu_write_access) {
730 /* build code bitmap */
731 build_page_bitmap(p);
734 /* we remove all the TBs in the range [start, end[ */
735 /* XXX: see if in some cases it could be faster to invalidate all the code */
736 current_tb_not_found = is_cpu_write_access;
737 current_tb_modified = 0;
738 current_tb = NULL; /* avoid warning */
739 current_pc = 0; /* avoid warning */
740 current_cs_base = 0; /* avoid warning */
741 current_flags = 0; /* avoid warning */
742 tb = p->first_tb;
743 while (tb != NULL) {
744 n = (long)tb & 3;
745 tb = (TranslationBlock *)((long)tb & ~3);
746 tb_next = tb->page_next[n];
747 /* NOTE: this is subtle as a TB may span two physical pages */
748 if (n == 0) {
749 /* NOTE: tb_end may be after the end of the page, but
750 it is not a problem */
751 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
752 tb_end = tb_start + tb->size;
753 } else {
754 tb_start = tb->page_addr[1];
755 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
757 if (!(tb_end <= start || tb_start >= end)) {
758 #ifdef TARGET_HAS_PRECISE_SMC
759 if (current_tb_not_found) {
760 current_tb_not_found = 0;
761 current_tb = NULL;
762 if (env->mem_write_pc) {
763 /* now we have a real cpu fault */
764 current_tb = tb_find_pc(env->mem_write_pc);
767 if (current_tb == tb &&
768 !(current_tb->cflags & CF_SINGLE_INSN)) {
769 /* If we are modifying the current TB, we must stop
770 its execution. We could be more precise by checking
771 that the modification is after the current PC, but it
772 would require a specialized function to partially
773 restore the CPU state */
775 current_tb_modified = 1;
776 cpu_restore_state(current_tb, env,
777 env->mem_write_pc, NULL);
778 #if defined(TARGET_I386)
779 current_flags = env->hflags;
780 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
781 current_cs_base = (target_ulong)env->segs[R_CS].base;
782 current_pc = current_cs_base + env->eip;
783 #else
784 #error unsupported CPU
785 #endif
787 #endif /* TARGET_HAS_PRECISE_SMC */
788 saved_tb = env->current_tb;
789 env->current_tb = NULL;
790 tb_phys_invalidate(tb, -1);
791 env->current_tb = saved_tb;
792 if (env->interrupt_request && env->current_tb)
793 cpu_interrupt(env, env->interrupt_request);
795 tb = tb_next;
797 #if !defined(CONFIG_USER_ONLY)
798 /* if no code remaining, no need to continue to use slow writes */
799 if (!p->first_tb) {
800 invalidate_page_bitmap(p);
801 if (is_cpu_write_access) {
802 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
805 #endif
806 #ifdef TARGET_HAS_PRECISE_SMC
807 if (current_tb_modified) {
808 /* we generate a block containing just the instruction
809 modifying the memory. It will ensure that it cannot modify
810 itself */
811 env->current_tb = NULL;
812 tb_gen_code(env, current_pc, current_cs_base, current_flags,
813 CF_SINGLE_INSN);
814 cpu_resume_from_signal(env, NULL);
816 #endif
819 /* len must be <= 8 and start must be a multiple of len */
820 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
822 PageDesc *p;
823 int offset, b;
824 #if 0
825 if (1) {
826 if (loglevel) {
827 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
828 cpu_single_env->mem_write_vaddr, len,
829 cpu_single_env->eip,
830 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
833 #endif
834 p = page_find(start >> TARGET_PAGE_BITS);
835 if (!p)
836 return;
837 if (p->code_bitmap) {
838 offset = start & ~TARGET_PAGE_MASK;
839 b = p->code_bitmap[offset >> 3] >> (offset & 7);
840 if (b & ((1 << len) - 1))
841 goto do_invalidate;
842 } else {
843 do_invalidate:
844 tb_invalidate_phys_page_range(start, start + len, 1);
848 #if !defined(CONFIG_SOFTMMU)
849 static void tb_invalidate_phys_page(target_ulong addr,
850 unsigned long pc, void *puc)
852 int n, current_flags, current_tb_modified;
853 target_ulong current_pc, current_cs_base;
854 PageDesc *p;
855 TranslationBlock *tb, *current_tb;
856 #ifdef TARGET_HAS_PRECISE_SMC
857 CPUState *env = cpu_single_env;
858 #endif
860 addr &= TARGET_PAGE_MASK;
861 p = page_find(addr >> TARGET_PAGE_BITS);
862 if (!p)
863 return;
864 tb = p->first_tb;
865 current_tb_modified = 0;
866 current_tb = NULL;
867 current_pc = 0; /* avoid warning */
868 current_cs_base = 0; /* avoid warning */
869 current_flags = 0; /* avoid warning */
870 #ifdef TARGET_HAS_PRECISE_SMC
871 if (tb && pc != 0) {
872 current_tb = tb_find_pc(pc);
874 #endif
875 while (tb != NULL) {
876 n = (long)tb & 3;
877 tb = (TranslationBlock *)((long)tb & ~3);
878 #ifdef TARGET_HAS_PRECISE_SMC
879 if (current_tb == tb &&
880 !(current_tb->cflags & CF_SINGLE_INSN)) {
881 /* If we are modifying the current TB, we must stop
882 its execution. We could be more precise by checking
883 that the modification is after the current PC, but it
884 would require a specialized function to partially
885 restore the CPU state */
887 current_tb_modified = 1;
888 cpu_restore_state(current_tb, env, pc, puc);
889 #if defined(TARGET_I386)
890 current_flags = env->hflags;
891 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
892 current_cs_base = (target_ulong)env->segs[R_CS].base;
893 current_pc = current_cs_base + env->eip;
894 #else
895 #error unsupported CPU
896 #endif
898 #endif /* TARGET_HAS_PRECISE_SMC */
899 tb_phys_invalidate(tb, addr);
900 tb = tb->page_next[n];
902 p->first_tb = NULL;
903 #ifdef TARGET_HAS_PRECISE_SMC
904 if (current_tb_modified) {
905 /* we generate a block containing just the instruction
906 modifying the memory. It will ensure that it cannot modify
907 itself */
908 env->current_tb = NULL;
909 tb_gen_code(env, current_pc, current_cs_base, current_flags,
910 CF_SINGLE_INSN);
911 cpu_resume_from_signal(env, puc);
913 #endif
915 #endif
917 /* add the tb in the target page and protect it if necessary */
918 static inline void tb_alloc_page(TranslationBlock *tb,
919 unsigned int n, unsigned int page_addr)
921 PageDesc *p;
922 TranslationBlock *last_first_tb;
924 tb->page_addr[n] = page_addr;
925 p = page_find(page_addr >> TARGET_PAGE_BITS);
926 tb->page_next[n] = p->first_tb;
927 last_first_tb = p->first_tb;
928 p->first_tb = (TranslationBlock *)((long)tb | n);
929 invalidate_page_bitmap(p);
931 #if defined(TARGET_HAS_SMC) || 1
933 #if defined(CONFIG_USER_ONLY)
934 if (p->flags & PAGE_WRITE) {
935 unsigned long host_start, host_end, addr;
936 int prot;
938 /* force the host page as non writable (writes will have a
939 page fault + mprotect overhead) */
940 host_start = page_addr & qemu_host_page_mask;
941 host_end = host_start + qemu_host_page_size;
942 prot = 0;
943 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
944 prot |= page_get_flags(addr);
945 mprotect((void *)host_start, qemu_host_page_size,
946 (prot & PAGE_BITS) & ~PAGE_WRITE);
947 #ifdef DEBUG_TB_INVALIDATE
948 printf("protecting code page: 0x%08lx\n",
949 host_start);
950 #endif
951 p->flags &= ~PAGE_WRITE;
953 #else
954 /* if some code is already present, then the pages are already
955 protected. So we handle the case where only the first TB is
956 allocated in a physical page */
957 if (!last_first_tb) {
958 target_ulong virt_addr;
960 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
961 tlb_protect_code(cpu_single_env, virt_addr);
963 #endif
965 #endif /* TARGET_HAS_SMC */
968 /* Allocate a new translation block. Flush the translation buffer if
969 too many translation blocks or too much generated code. */
970 TranslationBlock *tb_alloc(target_ulong pc)
972 TranslationBlock *tb;
974 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
975 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
976 return NULL;
977 tb = &tbs[nb_tbs++];
978 tb->pc = pc;
979 tb->cflags = 0;
980 return tb;
983 /* add a new TB and link it to the physical page tables. phys_page2 is
984 (-1) to indicate that only one page contains the TB. */
985 void tb_link_phys(TranslationBlock *tb,
986 target_ulong phys_pc, target_ulong phys_page2)
988 unsigned int h;
989 TranslationBlock **ptb;
991 /* add in the physical hash table */
992 h = tb_phys_hash_func(phys_pc);
993 ptb = &tb_phys_hash[h];
994 tb->phys_hash_next = *ptb;
995 *ptb = tb;
997 /* add in the page list */
998 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
999 if (phys_page2 != -1)
1000 tb_alloc_page(tb, 1, phys_page2);
1001 else
1002 tb->page_addr[1] = -1;
1003 #ifdef DEBUG_TB_CHECK
1004 tb_page_check();
1005 #endif
1008 /* link the tb with the other TBs */
1009 void tb_link(TranslationBlock *tb)
1011 #if !defined(CONFIG_USER_ONLY)
1013 VirtPageDesc *vp;
1014 target_ulong addr;
1016 /* save the code memory mappings (needed to invalidate the code) */
1017 addr = tb->pc & TARGET_PAGE_MASK;
1018 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1019 #ifdef DEBUG_TLB_CHECK
1020 if (vp->valid_tag == virt_valid_tag &&
1021 vp->phys_addr != tb->page_addr[0]) {
1022 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1023 addr, tb->page_addr[0], vp->phys_addr);
1025 #endif
1026 vp->phys_addr = tb->page_addr[0];
1027 if (vp->valid_tag != virt_valid_tag) {
1028 vp->valid_tag = virt_valid_tag;
1029 #if !defined(CONFIG_SOFTMMU)
1030 vp->prot = 0;
1031 #endif
1034 if (tb->page_addr[1] != -1) {
1035 addr += TARGET_PAGE_SIZE;
1036 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1037 #ifdef DEBUG_TLB_CHECK
1038 if (vp->valid_tag == virt_valid_tag &&
1039 vp->phys_addr != tb->page_addr[1]) {
1040 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1041 addr, tb->page_addr[1], vp->phys_addr);
1043 #endif
1044 vp->phys_addr = tb->page_addr[1];
1045 if (vp->valid_tag != virt_valid_tag) {
1046 vp->valid_tag = virt_valid_tag;
1047 #if !defined(CONFIG_SOFTMMU)
1048 vp->prot = 0;
1049 #endif
1053 #endif
1055 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1056 tb->jmp_next[0] = NULL;
1057 tb->jmp_next[1] = NULL;
1058 #ifdef USE_CODE_COPY
1059 tb->cflags &= ~CF_FP_USED;
1060 if (tb->cflags & CF_TB_FP_USED)
1061 tb->cflags |= CF_FP_USED;
1062 #endif
1064 /* init original jump addresses */
1065 if (tb->tb_next_offset[0] != 0xffff)
1066 tb_reset_jump(tb, 0);
1067 if (tb->tb_next_offset[1] != 0xffff)
1068 tb_reset_jump(tb, 1);
1071 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1072 tb[1].tc_ptr. Return NULL if not found */
1073 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1075 int m_min, m_max, m;
1076 unsigned long v;
1077 TranslationBlock *tb;
1079 if (nb_tbs <= 0)
1080 return NULL;
1081 if (tc_ptr < (unsigned long)code_gen_buffer ||
1082 tc_ptr >= (unsigned long)code_gen_ptr)
1083 return NULL;
1084 /* binary search (cf Knuth) */
1085 m_min = 0;
1086 m_max = nb_tbs - 1;
1087 while (m_min <= m_max) {
1088 m = (m_min + m_max) >> 1;
1089 tb = &tbs[m];
1090 v = (unsigned long)tb->tc_ptr;
1091 if (v == tc_ptr)
1092 return tb;
1093 else if (tc_ptr < v) {
1094 m_max = m - 1;
1095 } else {
1096 m_min = m + 1;
1099 return &tbs[m_max];
1102 static void tb_reset_jump_recursive(TranslationBlock *tb);
1104 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1106 TranslationBlock *tb1, *tb_next, **ptb;
1107 unsigned int n1;
1109 tb1 = tb->jmp_next[n];
1110 if (tb1 != NULL) {
1111 /* find head of list */
1112 for(;;) {
1113 n1 = (long)tb1 & 3;
1114 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1115 if (n1 == 2)
1116 break;
1117 tb1 = tb1->jmp_next[n1];
1119 /* we are now sure now that tb jumps to tb1 */
1120 tb_next = tb1;
1122 /* remove tb from the jmp_first list */
1123 ptb = &tb_next->jmp_first;
1124 for(;;) {
1125 tb1 = *ptb;
1126 n1 = (long)tb1 & 3;
1127 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1128 if (n1 == n && tb1 == tb)
1129 break;
1130 ptb = &tb1->jmp_next[n1];
1132 *ptb = tb->jmp_next[n];
1133 tb->jmp_next[n] = NULL;
1135 /* suppress the jump to next tb in generated code */
1136 tb_reset_jump(tb, n);
1138 /* suppress jumps in the tb on which we could have jumped */
1139 tb_reset_jump_recursive(tb_next);
1143 static void tb_reset_jump_recursive(TranslationBlock *tb)
1145 tb_reset_jump_recursive2(tb, 0);
1146 tb_reset_jump_recursive2(tb, 1);
1149 #if defined(TARGET_HAS_ICE)
1150 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1152 target_ulong phys_addr;
1154 phys_addr = cpu_get_phys_page_debug(env, pc);
1155 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1157 #endif
1159 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1160 breakpoint is reached */
1161 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1163 #if defined(TARGET_HAS_ICE)
1164 int i;
1166 for(i = 0; i < env->nb_breakpoints; i++) {
1167 if (env->breakpoints[i] == pc)
1168 return 0;
1171 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1172 return -1;
1173 env->breakpoints[env->nb_breakpoints++] = pc;
1175 breakpoint_invalidate(env, pc);
1176 return 0;
1177 #else
1178 return -1;
1179 #endif
1182 /* remove a breakpoint */
1183 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1185 #if defined(TARGET_HAS_ICE)
1186 int i;
1187 for(i = 0; i < env->nb_breakpoints; i++) {
1188 if (env->breakpoints[i] == pc)
1189 goto found;
1191 return -1;
1192 found:
1193 env->nb_breakpoints--;
1194 if (i < env->nb_breakpoints)
1195 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1197 breakpoint_invalidate(env, pc);
1198 return 0;
1199 #else
1200 return -1;
1201 #endif
1204 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1205 CPU loop after each instruction */
1206 void cpu_single_step(CPUState *env, int enabled)
1208 #if defined(TARGET_HAS_ICE)
1209 if (env->singlestep_enabled != enabled) {
1210 env->singlestep_enabled = enabled;
1211 /* must flush all the translated code to avoid inconsistancies */
1212 /* XXX: only flush what is necessary */
1213 tb_flush(env);
1215 #endif
1218 /* enable or disable low levels log */
1219 void cpu_set_log(int log_flags)
1221 loglevel = log_flags;
1222 if (loglevel && !logfile) {
1223 logfile = fopen(logfilename, "w");
1224 if (!logfile) {
1225 perror(logfilename);
1226 _exit(1);
1228 #if !defined(CONFIG_SOFTMMU)
1229 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1231 static uint8_t logfile_buf[4096];
1232 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1234 #else
1235 setvbuf(logfile, NULL, _IOLBF, 0);
1236 #endif
1240 void cpu_set_log_filename(const char *filename)
1242 logfilename = strdup(filename);
1245 /* mask must never be zero, except for A20 change call */
1246 void cpu_interrupt(CPUState *env, int mask)
1248 TranslationBlock *tb;
1249 static int interrupt_lock;
1251 env->interrupt_request |= mask;
1252 /* if the cpu is currently executing code, we must unlink it and
1253 all the potentially executing TB */
1254 tb = env->current_tb;
1255 if (tb && !testandset(&interrupt_lock)) {
1256 env->current_tb = NULL;
1257 tb_reset_jump_recursive(tb);
1258 interrupt_lock = 0;
1262 void cpu_reset_interrupt(CPUState *env, int mask)
1264 env->interrupt_request &= ~mask;
1267 CPULogItem cpu_log_items[] = {
1268 { CPU_LOG_TB_OUT_ASM, "out_asm",
1269 "show generated host assembly code for each compiled TB" },
1270 { CPU_LOG_TB_IN_ASM, "in_asm",
1271 "show target assembly code for each compiled TB" },
1272 { CPU_LOG_TB_OP, "op",
1273 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1274 #ifdef TARGET_I386
1275 { CPU_LOG_TB_OP_OPT, "op_opt",
1276 "show micro ops after optimization for each compiled TB" },
1277 #endif
1278 { CPU_LOG_INT, "int",
1279 "show interrupts/exceptions in short format" },
1280 { CPU_LOG_EXEC, "exec",
1281 "show trace before each executed TB (lots of logs)" },
1282 { CPU_LOG_TB_CPU, "cpu",
1283 "show CPU state before bloc translation" },
1284 #ifdef TARGET_I386
1285 { CPU_LOG_PCALL, "pcall",
1286 "show protected mode far calls/returns/exceptions" },
1287 #endif
1288 #ifdef DEBUG_IOPORT
1289 { CPU_LOG_IOPORT, "ioport",
1290 "show all i/o ports accesses" },
1291 #endif
1292 { 0, NULL, NULL },
1295 static int cmp1(const char *s1, int n, const char *s2)
1297 if (strlen(s2) != n)
1298 return 0;
1299 return memcmp(s1, s2, n) == 0;
1302 /* takes a comma separated list of log masks. Return 0 if error. */
1303 int cpu_str_to_log_mask(const char *str)
1305 CPULogItem *item;
1306 int mask;
1307 const char *p, *p1;
1309 p = str;
1310 mask = 0;
1311 for(;;) {
1312 p1 = strchr(p, ',');
1313 if (!p1)
1314 p1 = p + strlen(p);
1315 if(cmp1(p,p1-p,"all")) {
1316 for(item = cpu_log_items; item->mask != 0; item++) {
1317 mask |= item->mask;
1319 } else {
1320 for(item = cpu_log_items; item->mask != 0; item++) {
1321 if (cmp1(p, p1 - p, item->name))
1322 goto found;
1324 return 0;
1326 found:
1327 mask |= item->mask;
1328 if (*p1 != ',')
1329 break;
1330 p = p1 + 1;
1332 return mask;
1335 void cpu_abort(CPUState *env, const char *fmt, ...)
1337 va_list ap;
1339 va_start(ap, fmt);
1340 fprintf(stderr, "qemu: fatal: ");
1341 vfprintf(stderr, fmt, ap);
1342 fprintf(stderr, "\n");
1343 #ifdef TARGET_I386
1344 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1345 #else
1346 cpu_dump_state(env, stderr, fprintf, 0);
1347 #endif
1348 va_end(ap);
1349 abort();
1352 #if !defined(CONFIG_USER_ONLY)
1354 /* NOTE: if flush_global is true, also flush global entries (not
1355 implemented yet) */
1356 void tlb_flush(CPUState *env, int flush_global)
1358 int i;
1360 #if defined(DEBUG_TLB)
1361 printf("tlb_flush:\n");
1362 #endif
1363 /* must reset current TB so that interrupts cannot modify the
1364 links while we are modifying them */
1365 env->current_tb = NULL;
1367 for(i = 0; i < CPU_TLB_SIZE; i++) {
1368 env->tlb_read[0][i].address = -1;
1369 env->tlb_write[0][i].address = -1;
1370 env->tlb_read[1][i].address = -1;
1371 env->tlb_write[1][i].address = -1;
1374 virt_page_flush();
1375 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
1377 #if !defined(CONFIG_SOFTMMU)
1378 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1379 #endif
1380 #ifdef USE_KQEMU
1381 if (env->kqemu_enabled) {
1382 kqemu_flush(env, flush_global);
1384 #endif
1385 tlb_flush_count++;
1388 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1390 if (addr == (tlb_entry->address &
1391 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1392 tlb_entry->address = -1;
1395 void tlb_flush_page(CPUState *env, target_ulong addr)
1397 int i, n;
1398 VirtPageDesc *vp;
1399 PageDesc *p;
1400 TranslationBlock *tb;
1402 #if defined(DEBUG_TLB)
1403 printf("tlb_flush_page: 0x%08x\n", addr);
1404 #endif
1405 /* must reset current TB so that interrupts cannot modify the
1406 links while we are modifying them */
1407 env->current_tb = NULL;
1409 addr &= TARGET_PAGE_MASK;
1410 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1411 tlb_flush_entry(&env->tlb_read[0][i], addr);
1412 tlb_flush_entry(&env->tlb_write[0][i], addr);
1413 tlb_flush_entry(&env->tlb_read[1][i], addr);
1414 tlb_flush_entry(&env->tlb_write[1][i], addr);
1416 /* remove from the virtual pc hash table all the TB at this
1417 virtual address */
1419 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1420 if (vp && vp->valid_tag == virt_valid_tag) {
1421 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1422 if (p) {
1423 /* we remove all the links to the TBs in this virtual page */
1424 tb = p->first_tb;
1425 while (tb != NULL) {
1426 n = (long)tb & 3;
1427 tb = (TranslationBlock *)((long)tb & ~3);
1428 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1429 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1430 tb_invalidate(tb);
1432 tb = tb->page_next[n];
1435 vp->valid_tag = 0;
1438 #if !defined(CONFIG_SOFTMMU)
1439 if (addr < MMAP_AREA_END)
1440 munmap((void *)addr, TARGET_PAGE_SIZE);
1441 #endif
1442 #ifdef USE_KQEMU
1443 if (env->kqemu_enabled) {
1444 kqemu_flush_page(env, addr);
1446 #endif
1449 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1451 if (addr == (tlb_entry->address &
1452 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1453 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1454 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1455 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1459 /* update the TLBs so that writes to code in the virtual page 'addr'
1460 can be detected */
1461 static void tlb_protect_code(CPUState *env, target_ulong addr)
1463 int i;
1465 addr &= TARGET_PAGE_MASK;
1466 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1467 tlb_protect_code1(&env->tlb_write[0][i], addr);
1468 tlb_protect_code1(&env->tlb_write[1][i], addr);
1469 #if !defined(CONFIG_SOFTMMU)
1470 /* NOTE: as we generated the code for this page, it is already at
1471 least readable */
1472 if (addr < MMAP_AREA_END)
1473 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1474 #endif
1477 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1478 unsigned long phys_addr)
1480 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1481 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1482 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1486 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1487 tested self modifying code */
1488 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1490 int i;
1492 phys_addr &= TARGET_PAGE_MASK;
1493 phys_addr += (long)phys_ram_base;
1494 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1495 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1496 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1499 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1500 unsigned long start, unsigned long length)
1502 unsigned long addr;
1503 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1504 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1505 if ((addr - start) < length) {
1506 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1511 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end,
1512 int dirty_flags)
1514 CPUState *env;
1515 unsigned long length, start1;
1516 int i, mask, len;
1517 uint8_t *p;
1519 start &= TARGET_PAGE_MASK;
1520 end = TARGET_PAGE_ALIGN(end);
1522 length = end - start;
1523 if (length == 0)
1524 return;
1525 mask = ~dirty_flags;
1526 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1527 len = length >> TARGET_PAGE_BITS;
1528 for(i = 0; i < len; i++)
1529 p[i] &= mask;
1531 env = cpu_single_env;
1532 /* we modify the TLB cache so that the dirty bit will be set again
1533 when accessing the range */
1534 start1 = start + (unsigned long)phys_ram_base;
1535 for(i = 0; i < CPU_TLB_SIZE; i++)
1536 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1537 for(i = 0; i < CPU_TLB_SIZE; i++)
1538 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1540 #if !defined(CONFIG_SOFTMMU)
1541 /* XXX: this is expensive */
1543 VirtPageDesc *p;
1544 int j;
1545 target_ulong addr;
1547 for(i = 0; i < L1_SIZE; i++) {
1548 p = l1_virt_map[i];
1549 if (p) {
1550 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1551 for(j = 0; j < L2_SIZE; j++) {
1552 if (p->valid_tag == virt_valid_tag &&
1553 p->phys_addr >= start && p->phys_addr < end &&
1554 (p->prot & PROT_WRITE)) {
1555 if (addr < MMAP_AREA_END) {
1556 mprotect((void *)addr, TARGET_PAGE_SIZE,
1557 p->prot & ~PROT_WRITE);
1560 addr += TARGET_PAGE_SIZE;
1561 p++;
1566 #endif
1569 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1570 unsigned long start)
1572 unsigned long addr;
1573 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1574 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1575 if (addr == start) {
1576 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1581 /* update the TLB corresponding to virtual page vaddr and phys addr
1582 addr so that it is no longer dirty */
1583 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1585 CPUState *env = cpu_single_env;
1586 int i;
1588 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 0xff;
1590 addr &= TARGET_PAGE_MASK;
1591 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1592 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1593 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1596 /* add a new TLB entry. At most one entry for a given virtual address
1597 is permitted. Return 0 if OK or 2 if the page could not be mapped
1598 (can only happen in non SOFTMMU mode for I/O pages or pages
1599 conflicting with the host address space). */
1600 int tlb_set_page(CPUState *env, target_ulong vaddr,
1601 target_phys_addr_t paddr, int prot,
1602 int is_user, int is_softmmu)
1604 PhysPageDesc *p;
1605 unsigned long pd;
1606 TranslationBlock *first_tb;
1607 unsigned int index;
1608 target_ulong address;
1609 unsigned long addend;
1610 int ret;
1612 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1613 first_tb = NULL;
1614 if (!p) {
1615 pd = IO_MEM_UNASSIGNED;
1616 } else {
1617 PageDesc *p1;
1618 pd = p->phys_offset;
1619 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1620 /* NOTE: we also allocate the page at this stage */
1621 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1622 first_tb = p1->first_tb;
1625 #if defined(DEBUG_TLB)
1626 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1627 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1628 #endif
1630 ret = 0;
1631 #if !defined(CONFIG_SOFTMMU)
1632 if (is_softmmu)
1633 #endif
1635 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1636 /* IO memory case */
1637 address = vaddr | pd;
1638 addend = paddr;
1639 } else {
1640 /* standard memory */
1641 address = vaddr;
1642 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1645 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1646 addend -= vaddr;
1647 if (prot & PAGE_READ) {
1648 env->tlb_read[is_user][index].address = address;
1649 env->tlb_read[is_user][index].addend = addend;
1650 } else {
1651 env->tlb_read[is_user][index].address = -1;
1652 env->tlb_read[is_user][index].addend = -1;
1654 if (prot & PAGE_WRITE) {
1655 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1656 /* ROM: access is ignored (same as unassigned) */
1657 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1658 env->tlb_write[is_user][index].addend = addend;
1659 } else
1660 /* XXX: the PowerPC code seems not ready to handle
1661 self modifying code with DCBI */
1662 #if defined(TARGET_HAS_SMC) || 1
1663 if (first_tb) {
1664 /* if code is present, we use a specific memory
1665 handler. It works only for physical memory access */
1666 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1667 env->tlb_write[is_user][index].addend = addend;
1668 } else
1669 #endif
1670 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1671 !cpu_physical_memory_is_dirty(pd)) {
1672 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1673 env->tlb_write[is_user][index].addend = addend;
1674 } else {
1675 env->tlb_write[is_user][index].address = address;
1676 env->tlb_write[is_user][index].addend = addend;
1678 } else {
1679 env->tlb_write[is_user][index].address = -1;
1680 env->tlb_write[is_user][index].addend = -1;
1683 #if !defined(CONFIG_SOFTMMU)
1684 else {
1685 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1686 /* IO access: no mapping is done as it will be handled by the
1687 soft MMU */
1688 if (!(env->hflags & HF_SOFTMMU_MASK))
1689 ret = 2;
1690 } else {
1691 void *map_addr;
1693 if (vaddr >= MMAP_AREA_END) {
1694 ret = 2;
1695 } else {
1696 if (prot & PROT_WRITE) {
1697 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1698 #if defined(TARGET_HAS_SMC) || 1
1699 first_tb ||
1700 #endif
1701 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1702 !cpu_physical_memory_is_dirty(pd))) {
1703 /* ROM: we do as if code was inside */
1704 /* if code is present, we only map as read only and save the
1705 original mapping */
1706 VirtPageDesc *vp;
1708 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1709 vp->phys_addr = pd;
1710 vp->prot = prot;
1711 vp->valid_tag = virt_valid_tag;
1712 prot &= ~PAGE_WRITE;
1715 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1716 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1717 if (map_addr == MAP_FAILED) {
1718 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1719 paddr, vaddr);
1724 #endif
1725 return ret;
1728 /* called from signal handler: invalidate the code and unprotect the
1729 page. Return TRUE if the fault was succesfully handled. */
1730 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1732 #if !defined(CONFIG_SOFTMMU)
1733 VirtPageDesc *vp;
1735 #if defined(DEBUG_TLB)
1736 printf("page_unprotect: addr=0x%08x\n", addr);
1737 #endif
1738 addr &= TARGET_PAGE_MASK;
1740 /* if it is not mapped, no need to worry here */
1741 if (addr >= MMAP_AREA_END)
1742 return 0;
1743 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1744 if (!vp)
1745 return 0;
1746 /* NOTE: in this case, validate_tag is _not_ tested as it
1747 validates only the code TLB */
1748 if (vp->valid_tag != virt_valid_tag)
1749 return 0;
1750 if (!(vp->prot & PAGE_WRITE))
1751 return 0;
1752 #if defined(DEBUG_TLB)
1753 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1754 addr, vp->phys_addr, vp->prot);
1755 #endif
1756 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1757 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1758 (unsigned long)addr, vp->prot);
1759 /* set the dirty bit */
1760 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1761 /* flush the code inside */
1762 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1763 return 1;
1764 #else
1765 return 0;
1766 #endif
1769 #else
1771 void tlb_flush(CPUState *env, int flush_global)
1775 void tlb_flush_page(CPUState *env, target_ulong addr)
1779 int tlb_set_page(CPUState *env, target_ulong vaddr,
1780 target_phys_addr_t paddr, int prot,
1781 int is_user, int is_softmmu)
1783 return 0;
1786 /* dump memory mappings */
1787 void page_dump(FILE *f)
1789 unsigned long start, end;
1790 int i, j, prot, prot1;
1791 PageDesc *p;
1793 fprintf(f, "%-8s %-8s %-8s %s\n",
1794 "start", "end", "size", "prot");
1795 start = -1;
1796 end = -1;
1797 prot = 0;
1798 for(i = 0; i <= L1_SIZE; i++) {
1799 if (i < L1_SIZE)
1800 p = l1_map[i];
1801 else
1802 p = NULL;
1803 for(j = 0;j < L2_SIZE; j++) {
1804 if (!p)
1805 prot1 = 0;
1806 else
1807 prot1 = p[j].flags;
1808 if (prot1 != prot) {
1809 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1810 if (start != -1) {
1811 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1812 start, end, end - start,
1813 prot & PAGE_READ ? 'r' : '-',
1814 prot & PAGE_WRITE ? 'w' : '-',
1815 prot & PAGE_EXEC ? 'x' : '-');
1817 if (prot1 != 0)
1818 start = end;
1819 else
1820 start = -1;
1821 prot = prot1;
1823 if (!p)
1824 break;
1829 int page_get_flags(unsigned long address)
1831 PageDesc *p;
1833 p = page_find(address >> TARGET_PAGE_BITS);
1834 if (!p)
1835 return 0;
1836 return p->flags;
1839 /* modify the flags of a page and invalidate the code if
1840 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1841 depending on PAGE_WRITE */
1842 void page_set_flags(unsigned long start, unsigned long end, int flags)
1844 PageDesc *p;
1845 unsigned long addr;
1847 start = start & TARGET_PAGE_MASK;
1848 end = TARGET_PAGE_ALIGN(end);
1849 if (flags & PAGE_WRITE)
1850 flags |= PAGE_WRITE_ORG;
1851 spin_lock(&tb_lock);
1852 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1853 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1854 /* if the write protection is set, then we invalidate the code
1855 inside */
1856 if (!(p->flags & PAGE_WRITE) &&
1857 (flags & PAGE_WRITE) &&
1858 p->first_tb) {
1859 tb_invalidate_phys_page(addr, 0, NULL);
1861 p->flags = flags;
1863 spin_unlock(&tb_lock);
1866 /* called from signal handler: invalidate the code and unprotect the
1867 page. Return TRUE if the fault was succesfully handled. */
1868 int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1870 unsigned int page_index, prot, pindex;
1871 PageDesc *p, *p1;
1872 unsigned long host_start, host_end, addr;
1874 host_start = address & qemu_host_page_mask;
1875 page_index = host_start >> TARGET_PAGE_BITS;
1876 p1 = page_find(page_index);
1877 if (!p1)
1878 return 0;
1879 host_end = host_start + qemu_host_page_size;
1880 p = p1;
1881 prot = 0;
1882 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1883 prot |= p->flags;
1884 p++;
1886 /* if the page was really writable, then we change its
1887 protection back to writable */
1888 if (prot & PAGE_WRITE_ORG) {
1889 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1890 if (!(p1[pindex].flags & PAGE_WRITE)) {
1891 mprotect((void *)host_start, qemu_host_page_size,
1892 (prot & PAGE_BITS) | PAGE_WRITE);
1893 p1[pindex].flags |= PAGE_WRITE;
1894 /* and since the content will be modified, we must invalidate
1895 the corresponding translated code. */
1896 tb_invalidate_phys_page(address, pc, puc);
1897 #ifdef DEBUG_TB_CHECK
1898 tb_invalidate_check(address);
1899 #endif
1900 return 1;
1903 return 0;
1906 /* call this function when system calls directly modify a memory area */
1907 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1909 unsigned long start, end, addr;
1911 start = (unsigned long)data;
1912 end = start + data_size;
1913 start &= TARGET_PAGE_MASK;
1914 end = TARGET_PAGE_ALIGN(end);
1915 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1916 page_unprotect(addr, 0, NULL);
1920 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1923 #endif /* defined(CONFIG_USER_ONLY) */
1925 /* register physical memory. 'size' must be a multiple of the target
1926 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1927 io memory page */
1928 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1929 unsigned long size,
1930 unsigned long phys_offset)
1932 unsigned long addr, end_addr;
1933 PhysPageDesc *p;
1935 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1936 end_addr = start_addr + size;
1937 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1938 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1939 p->phys_offset = phys_offset;
1940 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1941 phys_offset += TARGET_PAGE_SIZE;
1945 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1947 return 0;
1950 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1954 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1955 unassigned_mem_readb,
1956 unassigned_mem_readb,
1957 unassigned_mem_readb,
1960 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1961 unassigned_mem_writeb,
1962 unassigned_mem_writeb,
1963 unassigned_mem_writeb,
1966 /* self modifying code support in soft mmu mode : writing to a page
1967 containing code comes to these functions */
1969 static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1971 unsigned long phys_addr;
1973 phys_addr = addr - (unsigned long)phys_ram_base;
1974 #if !defined(CONFIG_USER_ONLY)
1975 tb_invalidate_phys_page_fast(phys_addr, 1);
1976 #endif
1977 stb_p((uint8_t *)(long)addr, val);
1978 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
1981 static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1983 unsigned long phys_addr;
1985 phys_addr = addr - (unsigned long)phys_ram_base;
1986 #if !defined(CONFIG_USER_ONLY)
1987 tb_invalidate_phys_page_fast(phys_addr, 2);
1988 #endif
1989 stw_p((uint8_t *)(long)addr, val);
1990 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
1993 static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1995 unsigned long phys_addr;
1997 phys_addr = addr - (unsigned long)phys_ram_base;
1998 #if !defined(CONFIG_USER_ONLY)
1999 tb_invalidate_phys_page_fast(phys_addr, 4);
2000 #endif
2001 stl_p((uint8_t *)(long)addr, val);
2002 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
2005 static CPUReadMemoryFunc *code_mem_read[3] = {
2006 NULL, /* never used */
2007 NULL, /* never used */
2008 NULL, /* never used */
2011 static CPUWriteMemoryFunc *code_mem_write[3] = {
2012 code_mem_writeb,
2013 code_mem_writew,
2014 code_mem_writel,
2017 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2019 stb_p((uint8_t *)(long)addr, val);
2020 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2023 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2025 stw_p((uint8_t *)(long)addr, val);
2026 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2029 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2031 stl_p((uint8_t *)(long)addr, val);
2032 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2035 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2036 notdirty_mem_writeb,
2037 notdirty_mem_writew,
2038 notdirty_mem_writel,
2041 static void io_mem_init(void)
2043 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
2044 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2045 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
2046 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
2047 io_mem_nb = 5;
2049 /* alloc dirty bits array */
2050 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2053 /* mem_read and mem_write are arrays of functions containing the
2054 function to access byte (index 0), word (index 1) and dword (index
2055 2). All functions must be supplied. If io_index is non zero, the
2056 corresponding io zone is modified. If it is zero, a new io zone is
2057 allocated. The return value can be used with
2058 cpu_register_physical_memory(). (-1) is returned if error. */
2059 int cpu_register_io_memory(int io_index,
2060 CPUReadMemoryFunc **mem_read,
2061 CPUWriteMemoryFunc **mem_write,
2062 void *opaque)
2064 int i;
2066 if (io_index <= 0) {
2067 if (io_index >= IO_MEM_NB_ENTRIES)
2068 return -1;
2069 io_index = io_mem_nb++;
2070 } else {
2071 if (io_index >= IO_MEM_NB_ENTRIES)
2072 return -1;
2075 for(i = 0;i < 3; i++) {
2076 io_mem_read[io_index][i] = mem_read[i];
2077 io_mem_write[io_index][i] = mem_write[i];
2079 io_mem_opaque[io_index] = opaque;
2080 return io_index << IO_MEM_SHIFT;
2083 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2085 return io_mem_write[io_index >> IO_MEM_SHIFT];
2088 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2090 return io_mem_read[io_index >> IO_MEM_SHIFT];
2093 /* physical memory access (slow version, mainly for debug) */
2094 #if defined(CONFIG_USER_ONLY)
2095 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2096 int len, int is_write)
2098 int l, flags;
2099 target_ulong page;
2101 while (len > 0) {
2102 page = addr & TARGET_PAGE_MASK;
2103 l = (page + TARGET_PAGE_SIZE) - addr;
2104 if (l > len)
2105 l = len;
2106 flags = page_get_flags(page);
2107 if (!(flags & PAGE_VALID))
2108 return;
2109 if (is_write) {
2110 if (!(flags & PAGE_WRITE))
2111 return;
2112 memcpy((uint8_t *)addr, buf, len);
2113 } else {
2114 if (!(flags & PAGE_READ))
2115 return;
2116 memcpy(buf, (uint8_t *)addr, len);
2118 len -= l;
2119 buf += l;
2120 addr += l;
2124 /* never used */
2125 uint32_t ldl_phys(target_phys_addr_t addr)
2127 return 0;
2130 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2134 void stl_phys(target_phys_addr_t addr, uint32_t val)
2138 #else
2139 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2140 int len, int is_write)
2142 int l, io_index;
2143 uint8_t *ptr;
2144 uint32_t val;
2145 target_phys_addr_t page;
2146 unsigned long pd;
2147 PhysPageDesc *p;
2149 while (len > 0) {
2150 page = addr & TARGET_PAGE_MASK;
2151 l = (page + TARGET_PAGE_SIZE) - addr;
2152 if (l > len)
2153 l = len;
2154 p = phys_page_find(page >> TARGET_PAGE_BITS);
2155 if (!p) {
2156 pd = IO_MEM_UNASSIGNED;
2157 } else {
2158 pd = p->phys_offset;
2161 if (is_write) {
2162 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2163 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2164 if (l >= 4 && ((addr & 3) == 0)) {
2165 /* 32 bit read access */
2166 val = ldl_p(buf);
2167 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2168 l = 4;
2169 } else if (l >= 2 && ((addr & 1) == 0)) {
2170 /* 16 bit read access */
2171 val = lduw_p(buf);
2172 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2173 l = 2;
2174 } else {
2175 /* 8 bit access */
2176 val = ldub_p(buf);
2177 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2178 l = 1;
2180 } else {
2181 unsigned long addr1;
2182 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2183 /* RAM case */
2184 ptr = phys_ram_base + addr1;
2185 memcpy(ptr, buf, l);
2186 /* invalidate code */
2187 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2188 /* set dirty bit */
2189 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
2191 } else {
2192 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2193 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2194 /* I/O case */
2195 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2196 if (l >= 4 && ((addr & 3) == 0)) {
2197 /* 32 bit read access */
2198 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2199 stl_p(buf, val);
2200 l = 4;
2201 } else if (l >= 2 && ((addr & 1) == 0)) {
2202 /* 16 bit read access */
2203 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2204 stw_p(buf, val);
2205 l = 2;
2206 } else {
2207 /* 8 bit access */
2208 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2209 stb_p(buf, val);
2210 l = 1;
2212 } else {
2213 /* RAM case */
2214 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2215 (addr & ~TARGET_PAGE_MASK);
2216 memcpy(buf, ptr, l);
2219 len -= l;
2220 buf += l;
2221 addr += l;
2225 /* warning: addr must be aligned */
2226 uint32_t ldl_phys(target_phys_addr_t addr)
2228 int io_index;
2229 uint8_t *ptr;
2230 uint32_t val;
2231 unsigned long pd;
2232 PhysPageDesc *p;
2234 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2235 if (!p) {
2236 pd = IO_MEM_UNASSIGNED;
2237 } else {
2238 pd = p->phys_offset;
2241 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2242 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2243 /* I/O case */
2244 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2245 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2246 } else {
2247 /* RAM case */
2248 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2249 (addr & ~TARGET_PAGE_MASK);
2250 val = ldl_p(ptr);
2252 return val;
2255 /* warning: addr must be aligned. The ram page is not masked as dirty
2256 and the code inside is not invalidated. It is useful if the dirty
2257 bits are used to track modified PTEs */
2258 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2260 int io_index;
2261 uint8_t *ptr;
2262 unsigned long pd;
2263 PhysPageDesc *p;
2265 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2266 if (!p) {
2267 pd = IO_MEM_UNASSIGNED;
2268 } else {
2269 pd = p->phys_offset;
2272 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2273 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2274 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2275 } else {
2276 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2277 (addr & ~TARGET_PAGE_MASK);
2278 stl_p(ptr, val);
2282 /* warning: addr must be aligned */
2283 /* XXX: optimize code invalidation test */
2284 void stl_phys(target_phys_addr_t addr, uint32_t val)
2286 int io_index;
2287 uint8_t *ptr;
2288 unsigned long pd;
2289 PhysPageDesc *p;
2291 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2292 if (!p) {
2293 pd = IO_MEM_UNASSIGNED;
2294 } else {
2295 pd = p->phys_offset;
2298 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2299 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2300 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2301 } else {
2302 unsigned long addr1;
2303 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2304 /* RAM case */
2305 ptr = phys_ram_base + addr1;
2306 stl_p(ptr, val);
2307 /* invalidate code */
2308 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2309 /* set dirty bit */
2310 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
2314 #endif
2316 /* virtual memory access for debug */
2317 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2318 uint8_t *buf, int len, int is_write)
2320 int l;
2321 target_ulong page, phys_addr;
2323 while (len > 0) {
2324 page = addr & TARGET_PAGE_MASK;
2325 phys_addr = cpu_get_phys_page_debug(env, page);
2326 /* if no physical page mapped, return an error */
2327 if (phys_addr == -1)
2328 return -1;
2329 l = (page + TARGET_PAGE_SIZE) - addr;
2330 if (l > len)
2331 l = len;
2332 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2333 buf, l, is_write);
2334 len -= l;
2335 buf += l;
2336 addr += l;
2338 return 0;
2341 void dump_exec_info(FILE *f,
2342 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2344 int i, target_code_size, max_target_code_size;
2345 int direct_jmp_count, direct_jmp2_count, cross_page;
2346 TranslationBlock *tb;
2348 target_code_size = 0;
2349 max_target_code_size = 0;
2350 cross_page = 0;
2351 direct_jmp_count = 0;
2352 direct_jmp2_count = 0;
2353 for(i = 0; i < nb_tbs; i++) {
2354 tb = &tbs[i];
2355 target_code_size += tb->size;
2356 if (tb->size > max_target_code_size)
2357 max_target_code_size = tb->size;
2358 if (tb->page_addr[1] != -1)
2359 cross_page++;
2360 if (tb->tb_next_offset[0] != 0xffff) {
2361 direct_jmp_count++;
2362 if (tb->tb_next_offset[1] != 0xffff) {
2363 direct_jmp2_count++;
2367 /* XXX: avoid using doubles ? */
2368 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2369 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2370 nb_tbs ? target_code_size / nb_tbs : 0,
2371 max_target_code_size);
2372 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2373 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2374 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2375 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2376 cross_page,
2377 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2378 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2379 direct_jmp_count,
2380 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2381 direct_jmp2_count,
2382 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2383 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2384 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2385 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2388 #if !defined(CONFIG_USER_ONLY)
2390 #define MMUSUFFIX _cmmu
2391 #define GETPC() NULL
2392 #define env cpu_single_env
2393 #define SOFTMMU_CODE_ACCESS
2395 #define SHIFT 0
2396 #include "softmmu_template.h"
2398 #define SHIFT 1
2399 #include "softmmu_template.h"
2401 #define SHIFT 2
2402 #include "softmmu_template.h"
2404 #define SHIFT 3
2405 #include "softmmu_template.h"
2407 #undef env
2409 #endif