Eliminate some uses of T2
[qemu/malc.git] / exec.c
blobb74d90920cb3dd66f7700a42ef38ba54e4083e38
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include <qemu.h>
40 #endif
42 //#define DEBUG_TB_INVALIDATE
43 //#define DEBUG_FLUSH
44 //#define DEBUG_TLB
45 //#define DEBUG_UNASSIGNED
47 /* make various TB consistency checks */
48 //#define DEBUG_TB_CHECK
49 //#define DEBUG_TLB_CHECK
51 //#define DEBUG_IOPORT
52 //#define DEBUG_SUBPAGE
54 #if !defined(CONFIG_USER_ONLY)
55 /* TB consistency checks only implemented for usermode emulation. */
56 #undef DEBUG_TB_CHECK
57 #endif
59 /* threshold to flush the translated code buffer */
60 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
62 #define SMC_BITMAP_USE_THRESHOLD 10
64 #define MMAP_AREA_START 0x00000000
65 #define MMAP_AREA_END 0xa8000000
67 #if defined(TARGET_SPARC64)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 41
69 #elif defined(TARGET_SPARC)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 36
71 #elif defined(TARGET_ALPHA)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 42
73 #define TARGET_VIRT_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_PPC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #else
77 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
78 #define TARGET_PHYS_ADDR_SPACE_BITS 32
79 #endif
81 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
82 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
83 int nb_tbs;
84 /* any access to the tbs or the page table must use this lock */
85 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
88 uint8_t *code_gen_ptr;
90 int phys_ram_size;
91 int phys_ram_fd;
92 uint8_t *phys_ram_base;
93 uint8_t *phys_ram_dirty;
94 static ram_addr_t phys_ram_alloc_offset = 0;
96 CPUState *first_cpu;
97 /* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
99 CPUState *cpu_single_env;
101 typedef struct PageDesc {
102 /* list of TBs intersecting this ram page */
103 TranslationBlock *first_tb;
104 /* in order to optimize self modifying code, we count the number
105 of lookups we do to a given page to use a bitmap */
106 unsigned int code_write_count;
107 uint8_t *code_bitmap;
108 #if defined(CONFIG_USER_ONLY)
109 unsigned long flags;
110 #endif
111 } PageDesc;
113 typedef struct PhysPageDesc {
114 /* offset in host memory of the page + io_index in the low 12 bits */
115 uint32_t phys_offset;
116 } PhysPageDesc;
118 #define L2_BITS 10
119 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
120 /* XXX: this is a temporary hack for alpha target.
121 * In the future, this is to be replaced by a multi-level table
122 * to actually be able to handle the complete 64 bits address space.
124 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
125 #else
126 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
127 #endif
129 #define L1_SIZE (1 << L1_BITS)
130 #define L2_SIZE (1 << L2_BITS)
132 static void io_mem_init(void);
134 unsigned long qemu_real_host_page_size;
135 unsigned long qemu_host_page_bits;
136 unsigned long qemu_host_page_size;
137 unsigned long qemu_host_page_mask;
139 /* XXX: for system emulation, it could just be an array */
140 static PageDesc *l1_map[L1_SIZE];
141 PhysPageDesc **l1_phys_map;
143 /* io memory support */
144 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
145 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
146 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
147 static int io_mem_nb;
148 #if defined(CONFIG_SOFTMMU)
149 static int io_mem_watch;
150 #endif
152 /* log support */
153 char *logfilename = "/tmp/qemu.log";
154 FILE *logfile;
155 int loglevel;
156 static int log_append = 0;
158 /* statistics */
159 static int tlb_flush_count;
160 static int tb_flush_count;
161 static int tb_phys_invalidate_count;
163 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
164 typedef struct subpage_t {
165 target_phys_addr_t base;
166 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
167 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
168 void *opaque[TARGET_PAGE_SIZE][2][4];
169 } subpage_t;
171 static void page_init(void)
173 /* NOTE: we can always suppose that qemu_host_page_size >=
174 TARGET_PAGE_SIZE */
175 #ifdef _WIN32
177 SYSTEM_INFO system_info;
178 DWORD old_protect;
180 GetSystemInfo(&system_info);
181 qemu_real_host_page_size = system_info.dwPageSize;
183 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
184 PAGE_EXECUTE_READWRITE, &old_protect);
186 #else
187 qemu_real_host_page_size = getpagesize();
189 unsigned long start, end;
191 start = (unsigned long)code_gen_buffer;
192 start &= ~(qemu_real_host_page_size - 1);
194 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
195 end += qemu_real_host_page_size - 1;
196 end &= ~(qemu_real_host_page_size - 1);
198 mprotect((void *)start, end - start,
199 PROT_READ | PROT_WRITE | PROT_EXEC);
201 #endif
203 if (qemu_host_page_size == 0)
204 qemu_host_page_size = qemu_real_host_page_size;
205 if (qemu_host_page_size < TARGET_PAGE_SIZE)
206 qemu_host_page_size = TARGET_PAGE_SIZE;
207 qemu_host_page_bits = 0;
208 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
209 qemu_host_page_bits++;
210 qemu_host_page_mask = ~(qemu_host_page_size - 1);
211 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
212 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
214 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
216 long long startaddr, endaddr;
217 FILE *f;
218 int n;
220 f = fopen("/proc/self/maps", "r");
221 if (f) {
222 do {
223 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
224 if (n == 2) {
225 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
226 TARGET_PAGE_ALIGN(endaddr),
227 PAGE_RESERVED);
229 } while (!feof(f));
230 fclose(f);
233 #endif
236 static inline PageDesc *page_find_alloc(unsigned int index)
238 PageDesc **lp, *p;
240 lp = &l1_map[index >> L2_BITS];
241 p = *lp;
242 if (!p) {
243 /* allocate if not found */
244 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
245 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
246 *lp = p;
248 return p + (index & (L2_SIZE - 1));
251 static inline PageDesc *page_find(unsigned int index)
253 PageDesc *p;
255 p = l1_map[index >> L2_BITS];
256 if (!p)
257 return 0;
258 return p + (index & (L2_SIZE - 1));
261 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
263 void **lp, **p;
264 PhysPageDesc *pd;
266 p = (void **)l1_phys_map;
267 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
269 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
270 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
271 #endif
272 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
273 p = *lp;
274 if (!p) {
275 /* allocate if not found */
276 if (!alloc)
277 return NULL;
278 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
279 memset(p, 0, sizeof(void *) * L1_SIZE);
280 *lp = p;
282 #endif
283 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
284 pd = *lp;
285 if (!pd) {
286 int i;
287 /* allocate if not found */
288 if (!alloc)
289 return NULL;
290 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
291 *lp = pd;
292 for (i = 0; i < L2_SIZE; i++)
293 pd[i].phys_offset = IO_MEM_UNASSIGNED;
295 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
298 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
300 return phys_page_find_alloc(index, 0);
303 #if !defined(CONFIG_USER_ONLY)
304 static void tlb_protect_code(ram_addr_t ram_addr);
305 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
306 target_ulong vaddr);
307 #endif
309 void cpu_exec_init(CPUState *env)
311 CPUState **penv;
312 int cpu_index;
314 if (!code_gen_ptr) {
315 cpu_gen_init();
316 code_gen_ptr = code_gen_buffer;
317 page_init();
318 io_mem_init();
320 env->next_cpu = NULL;
321 penv = &first_cpu;
322 cpu_index = 0;
323 while (*penv != NULL) {
324 penv = (CPUState **)&(*penv)->next_cpu;
325 cpu_index++;
327 env->cpu_index = cpu_index;
328 env->nb_watchpoints = 0;
329 *penv = env;
332 static inline void invalidate_page_bitmap(PageDesc *p)
334 if (p->code_bitmap) {
335 qemu_free(p->code_bitmap);
336 p->code_bitmap = NULL;
338 p->code_write_count = 0;
341 /* set to NULL all the 'first_tb' fields in all PageDescs */
342 static void page_flush_tb(void)
344 int i, j;
345 PageDesc *p;
347 for(i = 0; i < L1_SIZE; i++) {
348 p = l1_map[i];
349 if (p) {
350 for(j = 0; j < L2_SIZE; j++) {
351 p->first_tb = NULL;
352 invalidate_page_bitmap(p);
353 p++;
359 /* flush all the translation blocks */
360 /* XXX: tb_flush is currently not thread safe */
361 void tb_flush(CPUState *env1)
363 CPUState *env;
364 #if defined(DEBUG_FLUSH)
365 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
366 (unsigned long)(code_gen_ptr - code_gen_buffer),
367 nb_tbs, nb_tbs > 0 ?
368 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
369 #endif
370 nb_tbs = 0;
372 for(env = first_cpu; env != NULL; env = env->next_cpu) {
373 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
376 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
377 page_flush_tb();
379 code_gen_ptr = code_gen_buffer;
380 /* XXX: flush processor icache at this point if cache flush is
381 expensive */
382 tb_flush_count++;
385 #ifdef DEBUG_TB_CHECK
387 static void tb_invalidate_check(target_ulong address)
389 TranslationBlock *tb;
390 int i;
391 address &= TARGET_PAGE_MASK;
392 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
393 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
394 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
395 address >= tb->pc + tb->size)) {
396 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
397 address, (long)tb->pc, tb->size);
403 /* verify that all the pages have correct rights for code */
404 static void tb_page_check(void)
406 TranslationBlock *tb;
407 int i, flags1, flags2;
409 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
410 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
411 flags1 = page_get_flags(tb->pc);
412 flags2 = page_get_flags(tb->pc + tb->size - 1);
413 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
414 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
415 (long)tb->pc, tb->size, flags1, flags2);
421 void tb_jmp_check(TranslationBlock *tb)
423 TranslationBlock *tb1;
424 unsigned int n1;
426 /* suppress any remaining jumps to this TB */
427 tb1 = tb->jmp_first;
428 for(;;) {
429 n1 = (long)tb1 & 3;
430 tb1 = (TranslationBlock *)((long)tb1 & ~3);
431 if (n1 == 2)
432 break;
433 tb1 = tb1->jmp_next[n1];
435 /* check end of list */
436 if (tb1 != tb) {
437 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
441 #endif
443 /* invalidate one TB */
444 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
445 int next_offset)
447 TranslationBlock *tb1;
448 for(;;) {
449 tb1 = *ptb;
450 if (tb1 == tb) {
451 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
452 break;
454 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
458 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
460 TranslationBlock *tb1;
461 unsigned int n1;
463 for(;;) {
464 tb1 = *ptb;
465 n1 = (long)tb1 & 3;
466 tb1 = (TranslationBlock *)((long)tb1 & ~3);
467 if (tb1 == tb) {
468 *ptb = tb1->page_next[n1];
469 break;
471 ptb = &tb1->page_next[n1];
475 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
477 TranslationBlock *tb1, **ptb;
478 unsigned int n1;
480 ptb = &tb->jmp_next[n];
481 tb1 = *ptb;
482 if (tb1) {
483 /* find tb(n) in circular list */
484 for(;;) {
485 tb1 = *ptb;
486 n1 = (long)tb1 & 3;
487 tb1 = (TranslationBlock *)((long)tb1 & ~3);
488 if (n1 == n && tb1 == tb)
489 break;
490 if (n1 == 2) {
491 ptb = &tb1->jmp_first;
492 } else {
493 ptb = &tb1->jmp_next[n1];
496 /* now we can suppress tb(n) from the list */
497 *ptb = tb->jmp_next[n];
499 tb->jmp_next[n] = NULL;
503 /* reset the jump entry 'n' of a TB so that it is not chained to
504 another TB */
505 static inline void tb_reset_jump(TranslationBlock *tb, int n)
507 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
510 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
512 CPUState *env;
513 PageDesc *p;
514 unsigned int h, n1;
515 target_ulong phys_pc;
516 TranslationBlock *tb1, *tb2;
518 /* remove the TB from the hash list */
519 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
520 h = tb_phys_hash_func(phys_pc);
521 tb_remove(&tb_phys_hash[h], tb,
522 offsetof(TranslationBlock, phys_hash_next));
524 /* remove the TB from the page list */
525 if (tb->page_addr[0] != page_addr) {
526 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
527 tb_page_remove(&p->first_tb, tb);
528 invalidate_page_bitmap(p);
530 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
531 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
532 tb_page_remove(&p->first_tb, tb);
533 invalidate_page_bitmap(p);
536 tb_invalidated_flag = 1;
538 /* remove the TB from the hash list */
539 h = tb_jmp_cache_hash_func(tb->pc);
540 for(env = first_cpu; env != NULL; env = env->next_cpu) {
541 if (env->tb_jmp_cache[h] == tb)
542 env->tb_jmp_cache[h] = NULL;
545 /* suppress this TB from the two jump lists */
546 tb_jmp_remove(tb, 0);
547 tb_jmp_remove(tb, 1);
549 /* suppress any remaining jumps to this TB */
550 tb1 = tb->jmp_first;
551 for(;;) {
552 n1 = (long)tb1 & 3;
553 if (n1 == 2)
554 break;
555 tb1 = (TranslationBlock *)((long)tb1 & ~3);
556 tb2 = tb1->jmp_next[n1];
557 tb_reset_jump(tb1, n1);
558 tb1->jmp_next[n1] = NULL;
559 tb1 = tb2;
561 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
563 tb_phys_invalidate_count++;
566 static inline void set_bits(uint8_t *tab, int start, int len)
568 int end, mask, end1;
570 end = start + len;
571 tab += start >> 3;
572 mask = 0xff << (start & 7);
573 if ((start & ~7) == (end & ~7)) {
574 if (start < end) {
575 mask &= ~(0xff << (end & 7));
576 *tab |= mask;
578 } else {
579 *tab++ |= mask;
580 start = (start + 8) & ~7;
581 end1 = end & ~7;
582 while (start < end1) {
583 *tab++ = 0xff;
584 start += 8;
586 if (start < end) {
587 mask = ~(0xff << (end & 7));
588 *tab |= mask;
593 static void build_page_bitmap(PageDesc *p)
595 int n, tb_start, tb_end;
596 TranslationBlock *tb;
598 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
599 if (!p->code_bitmap)
600 return;
601 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
603 tb = p->first_tb;
604 while (tb != NULL) {
605 n = (long)tb & 3;
606 tb = (TranslationBlock *)((long)tb & ~3);
607 /* NOTE: this is subtle as a TB may span two physical pages */
608 if (n == 0) {
609 /* NOTE: tb_end may be after the end of the page, but
610 it is not a problem */
611 tb_start = tb->pc & ~TARGET_PAGE_MASK;
612 tb_end = tb_start + tb->size;
613 if (tb_end > TARGET_PAGE_SIZE)
614 tb_end = TARGET_PAGE_SIZE;
615 } else {
616 tb_start = 0;
617 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
619 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
620 tb = tb->page_next[n];
624 #ifdef TARGET_HAS_PRECISE_SMC
626 static void tb_gen_code(CPUState *env,
627 target_ulong pc, target_ulong cs_base, int flags,
628 int cflags)
630 TranslationBlock *tb;
631 uint8_t *tc_ptr;
632 target_ulong phys_pc, phys_page2, virt_page2;
633 int code_gen_size;
635 phys_pc = get_phys_addr_code(env, pc);
636 tb = tb_alloc(pc);
637 if (!tb) {
638 /* flush must be done */
639 tb_flush(env);
640 /* cannot fail at this point */
641 tb = tb_alloc(pc);
643 tc_ptr = code_gen_ptr;
644 tb->tc_ptr = tc_ptr;
645 tb->cs_base = cs_base;
646 tb->flags = flags;
647 tb->cflags = cflags;
648 cpu_gen_code(env, tb, &code_gen_size);
649 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
651 /* check next page if needed */
652 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
653 phys_page2 = -1;
654 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
655 phys_page2 = get_phys_addr_code(env, virt_page2);
657 tb_link_phys(tb, phys_pc, phys_page2);
659 #endif
661 /* invalidate all TBs which intersect with the target physical page
662 starting in range [start;end[. NOTE: start and end must refer to
663 the same physical page. 'is_cpu_write_access' should be true if called
664 from a real cpu write access: the virtual CPU will exit the current
665 TB if code is modified inside this TB. */
666 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
667 int is_cpu_write_access)
669 int n, current_tb_modified, current_tb_not_found, current_flags;
670 CPUState *env = cpu_single_env;
671 PageDesc *p;
672 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
673 target_ulong tb_start, tb_end;
674 target_ulong current_pc, current_cs_base;
676 p = page_find(start >> TARGET_PAGE_BITS);
677 if (!p)
678 return;
679 if (!p->code_bitmap &&
680 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
681 is_cpu_write_access) {
682 /* build code bitmap */
683 build_page_bitmap(p);
686 /* we remove all the TBs in the range [start, end[ */
687 /* XXX: see if in some cases it could be faster to invalidate all the code */
688 current_tb_not_found = is_cpu_write_access;
689 current_tb_modified = 0;
690 current_tb = NULL; /* avoid warning */
691 current_pc = 0; /* avoid warning */
692 current_cs_base = 0; /* avoid warning */
693 current_flags = 0; /* avoid warning */
694 tb = p->first_tb;
695 while (tb != NULL) {
696 n = (long)tb & 3;
697 tb = (TranslationBlock *)((long)tb & ~3);
698 tb_next = tb->page_next[n];
699 /* NOTE: this is subtle as a TB may span two physical pages */
700 if (n == 0) {
701 /* NOTE: tb_end may be after the end of the page, but
702 it is not a problem */
703 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
704 tb_end = tb_start + tb->size;
705 } else {
706 tb_start = tb->page_addr[1];
707 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
709 if (!(tb_end <= start || tb_start >= end)) {
710 #ifdef TARGET_HAS_PRECISE_SMC
711 if (current_tb_not_found) {
712 current_tb_not_found = 0;
713 current_tb = NULL;
714 if (env->mem_write_pc) {
715 /* now we have a real cpu fault */
716 current_tb = tb_find_pc(env->mem_write_pc);
719 if (current_tb == tb &&
720 !(current_tb->cflags & CF_SINGLE_INSN)) {
721 /* If we are modifying the current TB, we must stop
722 its execution. We could be more precise by checking
723 that the modification is after the current PC, but it
724 would require a specialized function to partially
725 restore the CPU state */
727 current_tb_modified = 1;
728 cpu_restore_state(current_tb, env,
729 env->mem_write_pc, NULL);
730 #if defined(TARGET_I386)
731 current_flags = env->hflags;
732 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
733 current_cs_base = (target_ulong)env->segs[R_CS].base;
734 current_pc = current_cs_base + env->eip;
735 #else
736 #error unsupported CPU
737 #endif
739 #endif /* TARGET_HAS_PRECISE_SMC */
740 /* we need to do that to handle the case where a signal
741 occurs while doing tb_phys_invalidate() */
742 saved_tb = NULL;
743 if (env) {
744 saved_tb = env->current_tb;
745 env->current_tb = NULL;
747 tb_phys_invalidate(tb, -1);
748 if (env) {
749 env->current_tb = saved_tb;
750 if (env->interrupt_request && env->current_tb)
751 cpu_interrupt(env, env->interrupt_request);
754 tb = tb_next;
756 #if !defined(CONFIG_USER_ONLY)
757 /* if no code remaining, no need to continue to use slow writes */
758 if (!p->first_tb) {
759 invalidate_page_bitmap(p);
760 if (is_cpu_write_access) {
761 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
764 #endif
765 #ifdef TARGET_HAS_PRECISE_SMC
766 if (current_tb_modified) {
767 /* we generate a block containing just the instruction
768 modifying the memory. It will ensure that it cannot modify
769 itself */
770 env->current_tb = NULL;
771 tb_gen_code(env, current_pc, current_cs_base, current_flags,
772 CF_SINGLE_INSN);
773 cpu_resume_from_signal(env, NULL);
775 #endif
778 /* len must be <= 8 and start must be a multiple of len */
779 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
781 PageDesc *p;
782 int offset, b;
783 #if 0
784 if (1) {
785 if (loglevel) {
786 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
787 cpu_single_env->mem_write_vaddr, len,
788 cpu_single_env->eip,
789 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
792 #endif
793 p = page_find(start >> TARGET_PAGE_BITS);
794 if (!p)
795 return;
796 if (p->code_bitmap) {
797 offset = start & ~TARGET_PAGE_MASK;
798 b = p->code_bitmap[offset >> 3] >> (offset & 7);
799 if (b & ((1 << len) - 1))
800 goto do_invalidate;
801 } else {
802 do_invalidate:
803 tb_invalidate_phys_page_range(start, start + len, 1);
807 #if !defined(CONFIG_SOFTMMU)
808 static void tb_invalidate_phys_page(target_ulong addr,
809 unsigned long pc, void *puc)
811 int n, current_flags, current_tb_modified;
812 target_ulong current_pc, current_cs_base;
813 PageDesc *p;
814 TranslationBlock *tb, *current_tb;
815 #ifdef TARGET_HAS_PRECISE_SMC
816 CPUState *env = cpu_single_env;
817 #endif
819 addr &= TARGET_PAGE_MASK;
820 p = page_find(addr >> TARGET_PAGE_BITS);
821 if (!p)
822 return;
823 tb = p->first_tb;
824 current_tb_modified = 0;
825 current_tb = NULL;
826 current_pc = 0; /* avoid warning */
827 current_cs_base = 0; /* avoid warning */
828 current_flags = 0; /* avoid warning */
829 #ifdef TARGET_HAS_PRECISE_SMC
830 if (tb && pc != 0) {
831 current_tb = tb_find_pc(pc);
833 #endif
834 while (tb != NULL) {
835 n = (long)tb & 3;
836 tb = (TranslationBlock *)((long)tb & ~3);
837 #ifdef TARGET_HAS_PRECISE_SMC
838 if (current_tb == tb &&
839 !(current_tb->cflags & CF_SINGLE_INSN)) {
840 /* If we are modifying the current TB, we must stop
841 its execution. We could be more precise by checking
842 that the modification is after the current PC, but it
843 would require a specialized function to partially
844 restore the CPU state */
846 current_tb_modified = 1;
847 cpu_restore_state(current_tb, env, pc, puc);
848 #if defined(TARGET_I386)
849 current_flags = env->hflags;
850 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
851 current_cs_base = (target_ulong)env->segs[R_CS].base;
852 current_pc = current_cs_base + env->eip;
853 #else
854 #error unsupported CPU
855 #endif
857 #endif /* TARGET_HAS_PRECISE_SMC */
858 tb_phys_invalidate(tb, addr);
859 tb = tb->page_next[n];
861 p->first_tb = NULL;
862 #ifdef TARGET_HAS_PRECISE_SMC
863 if (current_tb_modified) {
864 /* we generate a block containing just the instruction
865 modifying the memory. It will ensure that it cannot modify
866 itself */
867 env->current_tb = NULL;
868 tb_gen_code(env, current_pc, current_cs_base, current_flags,
869 CF_SINGLE_INSN);
870 cpu_resume_from_signal(env, puc);
872 #endif
874 #endif
876 /* add the tb in the target page and protect it if necessary */
877 static inline void tb_alloc_page(TranslationBlock *tb,
878 unsigned int n, target_ulong page_addr)
880 PageDesc *p;
881 TranslationBlock *last_first_tb;
883 tb->page_addr[n] = page_addr;
884 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
885 tb->page_next[n] = p->first_tb;
886 last_first_tb = p->first_tb;
887 p->first_tb = (TranslationBlock *)((long)tb | n);
888 invalidate_page_bitmap(p);
890 #if defined(TARGET_HAS_SMC) || 1
892 #if defined(CONFIG_USER_ONLY)
893 if (p->flags & PAGE_WRITE) {
894 target_ulong addr;
895 PageDesc *p2;
896 int prot;
898 /* force the host page as non writable (writes will have a
899 page fault + mprotect overhead) */
900 page_addr &= qemu_host_page_mask;
901 prot = 0;
902 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
903 addr += TARGET_PAGE_SIZE) {
905 p2 = page_find (addr >> TARGET_PAGE_BITS);
906 if (!p2)
907 continue;
908 prot |= p2->flags;
909 p2->flags &= ~PAGE_WRITE;
910 page_get_flags(addr);
912 mprotect(g2h(page_addr), qemu_host_page_size,
913 (prot & PAGE_BITS) & ~PAGE_WRITE);
914 #ifdef DEBUG_TB_INVALIDATE
915 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
916 page_addr);
917 #endif
919 #else
920 /* if some code is already present, then the pages are already
921 protected. So we handle the case where only the first TB is
922 allocated in a physical page */
923 if (!last_first_tb) {
924 tlb_protect_code(page_addr);
926 #endif
928 #endif /* TARGET_HAS_SMC */
931 /* Allocate a new translation block. Flush the translation buffer if
932 too many translation blocks or too much generated code. */
933 TranslationBlock *tb_alloc(target_ulong pc)
935 TranslationBlock *tb;
937 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
938 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
939 return NULL;
940 tb = &tbs[nb_tbs++];
941 tb->pc = pc;
942 tb->cflags = 0;
943 return tb;
946 /* add a new TB and link it to the physical page tables. phys_page2 is
947 (-1) to indicate that only one page contains the TB. */
948 void tb_link_phys(TranslationBlock *tb,
949 target_ulong phys_pc, target_ulong phys_page2)
951 unsigned int h;
952 TranslationBlock **ptb;
954 /* add in the physical hash table */
955 h = tb_phys_hash_func(phys_pc);
956 ptb = &tb_phys_hash[h];
957 tb->phys_hash_next = *ptb;
958 *ptb = tb;
960 /* add in the page list */
961 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
962 if (phys_page2 != -1)
963 tb_alloc_page(tb, 1, phys_page2);
964 else
965 tb->page_addr[1] = -1;
967 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
968 tb->jmp_next[0] = NULL;
969 tb->jmp_next[1] = NULL;
971 /* init original jump addresses */
972 if (tb->tb_next_offset[0] != 0xffff)
973 tb_reset_jump(tb, 0);
974 if (tb->tb_next_offset[1] != 0xffff)
975 tb_reset_jump(tb, 1);
977 #ifdef DEBUG_TB_CHECK
978 tb_page_check();
979 #endif
982 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
983 tb[1].tc_ptr. Return NULL if not found */
984 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
986 int m_min, m_max, m;
987 unsigned long v;
988 TranslationBlock *tb;
990 if (nb_tbs <= 0)
991 return NULL;
992 if (tc_ptr < (unsigned long)code_gen_buffer ||
993 tc_ptr >= (unsigned long)code_gen_ptr)
994 return NULL;
995 /* binary search (cf Knuth) */
996 m_min = 0;
997 m_max = nb_tbs - 1;
998 while (m_min <= m_max) {
999 m = (m_min + m_max) >> 1;
1000 tb = &tbs[m];
1001 v = (unsigned long)tb->tc_ptr;
1002 if (v == tc_ptr)
1003 return tb;
1004 else if (tc_ptr < v) {
1005 m_max = m - 1;
1006 } else {
1007 m_min = m + 1;
1010 return &tbs[m_max];
1013 static void tb_reset_jump_recursive(TranslationBlock *tb);
1015 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1017 TranslationBlock *tb1, *tb_next, **ptb;
1018 unsigned int n1;
1020 tb1 = tb->jmp_next[n];
1021 if (tb1 != NULL) {
1022 /* find head of list */
1023 for(;;) {
1024 n1 = (long)tb1 & 3;
1025 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1026 if (n1 == 2)
1027 break;
1028 tb1 = tb1->jmp_next[n1];
1030 /* we are now sure now that tb jumps to tb1 */
1031 tb_next = tb1;
1033 /* remove tb from the jmp_first list */
1034 ptb = &tb_next->jmp_first;
1035 for(;;) {
1036 tb1 = *ptb;
1037 n1 = (long)tb1 & 3;
1038 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1039 if (n1 == n && tb1 == tb)
1040 break;
1041 ptb = &tb1->jmp_next[n1];
1043 *ptb = tb->jmp_next[n];
1044 tb->jmp_next[n] = NULL;
1046 /* suppress the jump to next tb in generated code */
1047 tb_reset_jump(tb, n);
1049 /* suppress jumps in the tb on which we could have jumped */
1050 tb_reset_jump_recursive(tb_next);
1054 static void tb_reset_jump_recursive(TranslationBlock *tb)
1056 tb_reset_jump_recursive2(tb, 0);
1057 tb_reset_jump_recursive2(tb, 1);
1060 #if defined(TARGET_HAS_ICE)
1061 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1063 target_phys_addr_t addr;
1064 target_ulong pd;
1065 ram_addr_t ram_addr;
1066 PhysPageDesc *p;
1068 addr = cpu_get_phys_page_debug(env, pc);
1069 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1070 if (!p) {
1071 pd = IO_MEM_UNASSIGNED;
1072 } else {
1073 pd = p->phys_offset;
1075 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1076 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1078 #endif
1080 /* Add a watchpoint. */
1081 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1083 int i;
1085 for (i = 0; i < env->nb_watchpoints; i++) {
1086 if (addr == env->watchpoint[i].vaddr)
1087 return 0;
1089 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1090 return -1;
1092 i = env->nb_watchpoints++;
1093 env->watchpoint[i].vaddr = addr;
1094 tlb_flush_page(env, addr);
1095 /* FIXME: This flush is needed because of the hack to make memory ops
1096 terminate the TB. It can be removed once the proper IO trap and
1097 re-execute bits are in. */
1098 tb_flush(env);
1099 return i;
1102 /* Remove a watchpoint. */
1103 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1105 int i;
1107 for (i = 0; i < env->nb_watchpoints; i++) {
1108 if (addr == env->watchpoint[i].vaddr) {
1109 env->nb_watchpoints--;
1110 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1111 tlb_flush_page(env, addr);
1112 return 0;
1115 return -1;
1118 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1119 breakpoint is reached */
1120 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1122 #if defined(TARGET_HAS_ICE)
1123 int i;
1125 for(i = 0; i < env->nb_breakpoints; i++) {
1126 if (env->breakpoints[i] == pc)
1127 return 0;
1130 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1131 return -1;
1132 env->breakpoints[env->nb_breakpoints++] = pc;
1134 breakpoint_invalidate(env, pc);
1135 return 0;
1136 #else
1137 return -1;
1138 #endif
1141 /* remove a breakpoint */
1142 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1144 #if defined(TARGET_HAS_ICE)
1145 int i;
1146 for(i = 0; i < env->nb_breakpoints; i++) {
1147 if (env->breakpoints[i] == pc)
1148 goto found;
1150 return -1;
1151 found:
1152 env->nb_breakpoints--;
1153 if (i < env->nb_breakpoints)
1154 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1156 breakpoint_invalidate(env, pc);
1157 return 0;
1158 #else
1159 return -1;
1160 #endif
1163 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1164 CPU loop after each instruction */
1165 void cpu_single_step(CPUState *env, int enabled)
1167 #if defined(TARGET_HAS_ICE)
1168 if (env->singlestep_enabled != enabled) {
1169 env->singlestep_enabled = enabled;
1170 /* must flush all the translated code to avoid inconsistancies */
1171 /* XXX: only flush what is necessary */
1172 tb_flush(env);
1174 #endif
1177 /* enable or disable low levels log */
1178 void cpu_set_log(int log_flags)
1180 loglevel = log_flags;
1181 if (loglevel && !logfile) {
1182 logfile = fopen(logfilename, log_append ? "a" : "w");
1183 if (!logfile) {
1184 perror(logfilename);
1185 _exit(1);
1187 #if !defined(CONFIG_SOFTMMU)
1188 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1190 static uint8_t logfile_buf[4096];
1191 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1193 #else
1194 setvbuf(logfile, NULL, _IOLBF, 0);
1195 #endif
1196 log_append = 1;
1198 if (!loglevel && logfile) {
1199 fclose(logfile);
1200 logfile = NULL;
1204 void cpu_set_log_filename(const char *filename)
1206 logfilename = strdup(filename);
1207 if (logfile) {
1208 fclose(logfile);
1209 logfile = NULL;
1211 cpu_set_log(loglevel);
1214 /* mask must never be zero, except for A20 change call */
1215 void cpu_interrupt(CPUState *env, int mask)
1217 TranslationBlock *tb;
1218 static int interrupt_lock;
1220 env->interrupt_request |= mask;
1221 /* if the cpu is currently executing code, we must unlink it and
1222 all the potentially executing TB */
1223 tb = env->current_tb;
1224 if (tb && !testandset(&interrupt_lock)) {
1225 env->current_tb = NULL;
1226 tb_reset_jump_recursive(tb);
1227 interrupt_lock = 0;
1231 void cpu_reset_interrupt(CPUState *env, int mask)
1233 env->interrupt_request &= ~mask;
1236 CPULogItem cpu_log_items[] = {
1237 { CPU_LOG_TB_OUT_ASM, "out_asm",
1238 "show generated host assembly code for each compiled TB" },
1239 { CPU_LOG_TB_IN_ASM, "in_asm",
1240 "show target assembly code for each compiled TB" },
1241 { CPU_LOG_TB_OP, "op",
1242 "show micro ops for each compiled TB" },
1243 { CPU_LOG_TB_OP_OPT, "op_opt",
1244 "show micro ops "
1245 #ifdef TARGET_I386
1246 "before eflags optimization and "
1247 #endif
1248 "after liveness analysis" },
1249 { CPU_LOG_INT, "int",
1250 "show interrupts/exceptions in short format" },
1251 { CPU_LOG_EXEC, "exec",
1252 "show trace before each executed TB (lots of logs)" },
1253 { CPU_LOG_TB_CPU, "cpu",
1254 "show CPU state before block translation" },
1255 #ifdef TARGET_I386
1256 { CPU_LOG_PCALL, "pcall",
1257 "show protected mode far calls/returns/exceptions" },
1258 #endif
1259 #ifdef DEBUG_IOPORT
1260 { CPU_LOG_IOPORT, "ioport",
1261 "show all i/o ports accesses" },
1262 #endif
1263 { 0, NULL, NULL },
1266 static int cmp1(const char *s1, int n, const char *s2)
1268 if (strlen(s2) != n)
1269 return 0;
1270 return memcmp(s1, s2, n) == 0;
1273 /* takes a comma separated list of log masks. Return 0 if error. */
1274 int cpu_str_to_log_mask(const char *str)
1276 CPULogItem *item;
1277 int mask;
1278 const char *p, *p1;
1280 p = str;
1281 mask = 0;
1282 for(;;) {
1283 p1 = strchr(p, ',');
1284 if (!p1)
1285 p1 = p + strlen(p);
1286 if(cmp1(p,p1-p,"all")) {
1287 for(item = cpu_log_items; item->mask != 0; item++) {
1288 mask |= item->mask;
1290 } else {
1291 for(item = cpu_log_items; item->mask != 0; item++) {
1292 if (cmp1(p, p1 - p, item->name))
1293 goto found;
1295 return 0;
1297 found:
1298 mask |= item->mask;
1299 if (*p1 != ',')
1300 break;
1301 p = p1 + 1;
1303 return mask;
1306 void cpu_abort(CPUState *env, const char *fmt, ...)
1308 va_list ap;
1309 va_list ap2;
1311 va_start(ap, fmt);
1312 va_copy(ap2, ap);
1313 fprintf(stderr, "qemu: fatal: ");
1314 vfprintf(stderr, fmt, ap);
1315 fprintf(stderr, "\n");
1316 #ifdef TARGET_I386
1317 if(env->intercept & INTERCEPT_SVM_MASK) {
1318 /* most probably the virtual machine should not
1319 be shut down but rather caught by the VMM */
1320 vmexit(SVM_EXIT_SHUTDOWN, 0);
1322 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1323 #else
1324 cpu_dump_state(env, stderr, fprintf, 0);
1325 #endif
1326 if (logfile) {
1327 fprintf(logfile, "qemu: fatal: ");
1328 vfprintf(logfile, fmt, ap2);
1329 fprintf(logfile, "\n");
1330 #ifdef TARGET_I386
1331 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1332 #else
1333 cpu_dump_state(env, logfile, fprintf, 0);
1334 #endif
1335 fflush(logfile);
1336 fclose(logfile);
1338 va_end(ap2);
1339 va_end(ap);
1340 abort();
1343 CPUState *cpu_copy(CPUState *env)
1345 CPUState *new_env = cpu_init(env->cpu_model_str);
1346 /* preserve chaining and index */
1347 CPUState *next_cpu = new_env->next_cpu;
1348 int cpu_index = new_env->cpu_index;
1349 memcpy(new_env, env, sizeof(CPUState));
1350 new_env->next_cpu = next_cpu;
1351 new_env->cpu_index = cpu_index;
1352 return new_env;
1355 #if !defined(CONFIG_USER_ONLY)
1357 /* NOTE: if flush_global is true, also flush global entries (not
1358 implemented yet) */
1359 void tlb_flush(CPUState *env, int flush_global)
1361 int i;
1363 #if defined(DEBUG_TLB)
1364 printf("tlb_flush:\n");
1365 #endif
1366 /* must reset current TB so that interrupts cannot modify the
1367 links while we are modifying them */
1368 env->current_tb = NULL;
1370 for(i = 0; i < CPU_TLB_SIZE; i++) {
1371 env->tlb_table[0][i].addr_read = -1;
1372 env->tlb_table[0][i].addr_write = -1;
1373 env->tlb_table[0][i].addr_code = -1;
1374 env->tlb_table[1][i].addr_read = -1;
1375 env->tlb_table[1][i].addr_write = -1;
1376 env->tlb_table[1][i].addr_code = -1;
1377 #if (NB_MMU_MODES >= 3)
1378 env->tlb_table[2][i].addr_read = -1;
1379 env->tlb_table[2][i].addr_write = -1;
1380 env->tlb_table[2][i].addr_code = -1;
1381 #if (NB_MMU_MODES == 4)
1382 env->tlb_table[3][i].addr_read = -1;
1383 env->tlb_table[3][i].addr_write = -1;
1384 env->tlb_table[3][i].addr_code = -1;
1385 #endif
1386 #endif
1389 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1391 #if !defined(CONFIG_SOFTMMU)
1392 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1393 #endif
1394 #ifdef USE_KQEMU
1395 if (env->kqemu_enabled) {
1396 kqemu_flush(env, flush_global);
1398 #endif
1399 tlb_flush_count++;
1402 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1404 if (addr == (tlb_entry->addr_read &
1405 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1406 addr == (tlb_entry->addr_write &
1407 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1408 addr == (tlb_entry->addr_code &
1409 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1410 tlb_entry->addr_read = -1;
1411 tlb_entry->addr_write = -1;
1412 tlb_entry->addr_code = -1;
1416 void tlb_flush_page(CPUState *env, target_ulong addr)
1418 int i;
1419 TranslationBlock *tb;
1421 #if defined(DEBUG_TLB)
1422 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1423 #endif
1424 /* must reset current TB so that interrupts cannot modify the
1425 links while we are modifying them */
1426 env->current_tb = NULL;
1428 addr &= TARGET_PAGE_MASK;
1429 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1430 tlb_flush_entry(&env->tlb_table[0][i], addr);
1431 tlb_flush_entry(&env->tlb_table[1][i], addr);
1432 #if (NB_MMU_MODES >= 3)
1433 tlb_flush_entry(&env->tlb_table[2][i], addr);
1434 #if (NB_MMU_MODES == 4)
1435 tlb_flush_entry(&env->tlb_table[3][i], addr);
1436 #endif
1437 #endif
1439 /* Discard jump cache entries for any tb which might potentially
1440 overlap the flushed page. */
1441 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1442 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1444 i = tb_jmp_cache_hash_page(addr);
1445 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1447 #if !defined(CONFIG_SOFTMMU)
1448 if (addr < MMAP_AREA_END)
1449 munmap((void *)addr, TARGET_PAGE_SIZE);
1450 #endif
1451 #ifdef USE_KQEMU
1452 if (env->kqemu_enabled) {
1453 kqemu_flush_page(env, addr);
1455 #endif
1458 /* update the TLBs so that writes to code in the virtual page 'addr'
1459 can be detected */
1460 static void tlb_protect_code(ram_addr_t ram_addr)
1462 cpu_physical_memory_reset_dirty(ram_addr,
1463 ram_addr + TARGET_PAGE_SIZE,
1464 CODE_DIRTY_FLAG);
1467 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1468 tested for self modifying code */
1469 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1470 target_ulong vaddr)
1472 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1475 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1476 unsigned long start, unsigned long length)
1478 unsigned long addr;
1479 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1480 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1481 if ((addr - start) < length) {
1482 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1487 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1488 int dirty_flags)
1490 CPUState *env;
1491 unsigned long length, start1;
1492 int i, mask, len;
1493 uint8_t *p;
1495 start &= TARGET_PAGE_MASK;
1496 end = TARGET_PAGE_ALIGN(end);
1498 length = end - start;
1499 if (length == 0)
1500 return;
1501 len = length >> TARGET_PAGE_BITS;
1502 #ifdef USE_KQEMU
1503 /* XXX: should not depend on cpu context */
1504 env = first_cpu;
1505 if (env->kqemu_enabled) {
1506 ram_addr_t addr;
1507 addr = start;
1508 for(i = 0; i < len; i++) {
1509 kqemu_set_notdirty(env, addr);
1510 addr += TARGET_PAGE_SIZE;
1513 #endif
1514 mask = ~dirty_flags;
1515 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1516 for(i = 0; i < len; i++)
1517 p[i] &= mask;
1519 /* we modify the TLB cache so that the dirty bit will be set again
1520 when accessing the range */
1521 start1 = start + (unsigned long)phys_ram_base;
1522 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1523 for(i = 0; i < CPU_TLB_SIZE; i++)
1524 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1525 for(i = 0; i < CPU_TLB_SIZE; i++)
1526 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1527 #if (NB_MMU_MODES >= 3)
1528 for(i = 0; i < CPU_TLB_SIZE; i++)
1529 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1530 #if (NB_MMU_MODES == 4)
1531 for(i = 0; i < CPU_TLB_SIZE; i++)
1532 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1533 #endif
1534 #endif
1537 #if !defined(CONFIG_SOFTMMU)
1538 /* XXX: this is expensive */
1540 VirtPageDesc *p;
1541 int j;
1542 target_ulong addr;
1544 for(i = 0; i < L1_SIZE; i++) {
1545 p = l1_virt_map[i];
1546 if (p) {
1547 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1548 for(j = 0; j < L2_SIZE; j++) {
1549 if (p->valid_tag == virt_valid_tag &&
1550 p->phys_addr >= start && p->phys_addr < end &&
1551 (p->prot & PROT_WRITE)) {
1552 if (addr < MMAP_AREA_END) {
1553 mprotect((void *)addr, TARGET_PAGE_SIZE,
1554 p->prot & ~PROT_WRITE);
1557 addr += TARGET_PAGE_SIZE;
1558 p++;
1563 #endif
1566 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1568 ram_addr_t ram_addr;
1570 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1571 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1572 tlb_entry->addend - (unsigned long)phys_ram_base;
1573 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1574 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1579 /* update the TLB according to the current state of the dirty bits */
1580 void cpu_tlb_update_dirty(CPUState *env)
1582 int i;
1583 for(i = 0; i < CPU_TLB_SIZE; i++)
1584 tlb_update_dirty(&env->tlb_table[0][i]);
1585 for(i = 0; i < CPU_TLB_SIZE; i++)
1586 tlb_update_dirty(&env->tlb_table[1][i]);
1587 #if (NB_MMU_MODES >= 3)
1588 for(i = 0; i < CPU_TLB_SIZE; i++)
1589 tlb_update_dirty(&env->tlb_table[2][i]);
1590 #if (NB_MMU_MODES == 4)
1591 for(i = 0; i < CPU_TLB_SIZE; i++)
1592 tlb_update_dirty(&env->tlb_table[3][i]);
1593 #endif
1594 #endif
1597 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1598 unsigned long start)
1600 unsigned long addr;
1601 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1602 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1603 if (addr == start) {
1604 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1609 /* update the TLB corresponding to virtual page vaddr and phys addr
1610 addr so that it is no longer dirty */
1611 static inline void tlb_set_dirty(CPUState *env,
1612 unsigned long addr, target_ulong vaddr)
1614 int i;
1616 addr &= TARGET_PAGE_MASK;
1617 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1618 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1619 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1620 #if (NB_MMU_MODES >= 3)
1621 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1622 #if (NB_MMU_MODES == 4)
1623 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1624 #endif
1625 #endif
1628 /* add a new TLB entry. At most one entry for a given virtual address
1629 is permitted. Return 0 if OK or 2 if the page could not be mapped
1630 (can only happen in non SOFTMMU mode for I/O pages or pages
1631 conflicting with the host address space). */
1632 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1633 target_phys_addr_t paddr, int prot,
1634 int mmu_idx, int is_softmmu)
1636 PhysPageDesc *p;
1637 unsigned long pd;
1638 unsigned int index;
1639 target_ulong address;
1640 target_phys_addr_t addend;
1641 int ret;
1642 CPUTLBEntry *te;
1643 int i;
1645 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1646 if (!p) {
1647 pd = IO_MEM_UNASSIGNED;
1648 } else {
1649 pd = p->phys_offset;
1651 #if defined(DEBUG_TLB)
1652 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1653 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1654 #endif
1656 ret = 0;
1657 #if !defined(CONFIG_SOFTMMU)
1658 if (is_softmmu)
1659 #endif
1661 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1662 /* IO memory case */
1663 address = vaddr | pd;
1664 addend = paddr;
1665 } else {
1666 /* standard memory */
1667 address = vaddr;
1668 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1671 /* Make accesses to pages with watchpoints go via the
1672 watchpoint trap routines. */
1673 for (i = 0; i < env->nb_watchpoints; i++) {
1674 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1675 if (address & ~TARGET_PAGE_MASK) {
1676 env->watchpoint[i].addend = 0;
1677 address = vaddr | io_mem_watch;
1678 } else {
1679 env->watchpoint[i].addend = pd - paddr +
1680 (unsigned long) phys_ram_base;
1681 /* TODO: Figure out how to make read watchpoints coexist
1682 with code. */
1683 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1688 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1689 addend -= vaddr;
1690 te = &env->tlb_table[mmu_idx][index];
1691 te->addend = addend;
1692 if (prot & PAGE_READ) {
1693 te->addr_read = address;
1694 } else {
1695 te->addr_read = -1;
1697 if (prot & PAGE_EXEC) {
1698 te->addr_code = address;
1699 } else {
1700 te->addr_code = -1;
1702 if (prot & PAGE_WRITE) {
1703 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1704 (pd & IO_MEM_ROMD)) {
1705 /* write access calls the I/O callback */
1706 te->addr_write = vaddr |
1707 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1708 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1709 !cpu_physical_memory_is_dirty(pd)) {
1710 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1711 } else {
1712 te->addr_write = address;
1714 } else {
1715 te->addr_write = -1;
1718 #if !defined(CONFIG_SOFTMMU)
1719 else {
1720 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1721 /* IO access: no mapping is done as it will be handled by the
1722 soft MMU */
1723 if (!(env->hflags & HF_SOFTMMU_MASK))
1724 ret = 2;
1725 } else {
1726 void *map_addr;
1728 if (vaddr >= MMAP_AREA_END) {
1729 ret = 2;
1730 } else {
1731 if (prot & PROT_WRITE) {
1732 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1733 #if defined(TARGET_HAS_SMC) || 1
1734 first_tb ||
1735 #endif
1736 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1737 !cpu_physical_memory_is_dirty(pd))) {
1738 /* ROM: we do as if code was inside */
1739 /* if code is present, we only map as read only and save the
1740 original mapping */
1741 VirtPageDesc *vp;
1743 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1744 vp->phys_addr = pd;
1745 vp->prot = prot;
1746 vp->valid_tag = virt_valid_tag;
1747 prot &= ~PAGE_WRITE;
1750 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1751 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1752 if (map_addr == MAP_FAILED) {
1753 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1754 paddr, vaddr);
1759 #endif
1760 return ret;
1763 /* called from signal handler: invalidate the code and unprotect the
1764 page. Return TRUE if the fault was succesfully handled. */
1765 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1767 #if !defined(CONFIG_SOFTMMU)
1768 VirtPageDesc *vp;
1770 #if defined(DEBUG_TLB)
1771 printf("page_unprotect: addr=0x%08x\n", addr);
1772 #endif
1773 addr &= TARGET_PAGE_MASK;
1775 /* if it is not mapped, no need to worry here */
1776 if (addr >= MMAP_AREA_END)
1777 return 0;
1778 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1779 if (!vp)
1780 return 0;
1781 /* NOTE: in this case, validate_tag is _not_ tested as it
1782 validates only the code TLB */
1783 if (vp->valid_tag != virt_valid_tag)
1784 return 0;
1785 if (!(vp->prot & PAGE_WRITE))
1786 return 0;
1787 #if defined(DEBUG_TLB)
1788 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1789 addr, vp->phys_addr, vp->prot);
1790 #endif
1791 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1792 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1793 (unsigned long)addr, vp->prot);
1794 /* set the dirty bit */
1795 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1796 /* flush the code inside */
1797 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1798 return 1;
1799 #else
1800 return 0;
1801 #endif
1804 #else
1806 void tlb_flush(CPUState *env, int flush_global)
1810 void tlb_flush_page(CPUState *env, target_ulong addr)
1814 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1815 target_phys_addr_t paddr, int prot,
1816 int mmu_idx, int is_softmmu)
1818 return 0;
1821 /* dump memory mappings */
1822 void page_dump(FILE *f)
1824 unsigned long start, end;
1825 int i, j, prot, prot1;
1826 PageDesc *p;
1828 fprintf(f, "%-8s %-8s %-8s %s\n",
1829 "start", "end", "size", "prot");
1830 start = -1;
1831 end = -1;
1832 prot = 0;
1833 for(i = 0; i <= L1_SIZE; i++) {
1834 if (i < L1_SIZE)
1835 p = l1_map[i];
1836 else
1837 p = NULL;
1838 for(j = 0;j < L2_SIZE; j++) {
1839 if (!p)
1840 prot1 = 0;
1841 else
1842 prot1 = p[j].flags;
1843 if (prot1 != prot) {
1844 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1845 if (start != -1) {
1846 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1847 start, end, end - start,
1848 prot & PAGE_READ ? 'r' : '-',
1849 prot & PAGE_WRITE ? 'w' : '-',
1850 prot & PAGE_EXEC ? 'x' : '-');
1852 if (prot1 != 0)
1853 start = end;
1854 else
1855 start = -1;
1856 prot = prot1;
1858 if (!p)
1859 break;
1864 int page_get_flags(target_ulong address)
1866 PageDesc *p;
1868 p = page_find(address >> TARGET_PAGE_BITS);
1869 if (!p)
1870 return 0;
1871 return p->flags;
1874 /* modify the flags of a page and invalidate the code if
1875 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1876 depending on PAGE_WRITE */
1877 void page_set_flags(target_ulong start, target_ulong end, int flags)
1879 PageDesc *p;
1880 target_ulong addr;
1882 start = start & TARGET_PAGE_MASK;
1883 end = TARGET_PAGE_ALIGN(end);
1884 if (flags & PAGE_WRITE)
1885 flags |= PAGE_WRITE_ORG;
1886 spin_lock(&tb_lock);
1887 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1888 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1889 /* if the write protection is set, then we invalidate the code
1890 inside */
1891 if (!(p->flags & PAGE_WRITE) &&
1892 (flags & PAGE_WRITE) &&
1893 p->first_tb) {
1894 tb_invalidate_phys_page(addr, 0, NULL);
1896 p->flags = flags;
1898 spin_unlock(&tb_lock);
1901 int page_check_range(target_ulong start, target_ulong len, int flags)
1903 PageDesc *p;
1904 target_ulong end;
1905 target_ulong addr;
1907 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1908 start = start & TARGET_PAGE_MASK;
1910 if( end < start )
1911 /* we've wrapped around */
1912 return -1;
1913 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1914 p = page_find(addr >> TARGET_PAGE_BITS);
1915 if( !p )
1916 return -1;
1917 if( !(p->flags & PAGE_VALID) )
1918 return -1;
1920 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1921 return -1;
1922 if (flags & PAGE_WRITE) {
1923 if (!(p->flags & PAGE_WRITE_ORG))
1924 return -1;
1925 /* unprotect the page if it was put read-only because it
1926 contains translated code */
1927 if (!(p->flags & PAGE_WRITE)) {
1928 if (!page_unprotect(addr, 0, NULL))
1929 return -1;
1931 return 0;
1934 return 0;
1937 /* called from signal handler: invalidate the code and unprotect the
1938 page. Return TRUE if the fault was succesfully handled. */
1939 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1941 unsigned int page_index, prot, pindex;
1942 PageDesc *p, *p1;
1943 target_ulong host_start, host_end, addr;
1945 host_start = address & qemu_host_page_mask;
1946 page_index = host_start >> TARGET_PAGE_BITS;
1947 p1 = page_find(page_index);
1948 if (!p1)
1949 return 0;
1950 host_end = host_start + qemu_host_page_size;
1951 p = p1;
1952 prot = 0;
1953 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1954 prot |= p->flags;
1955 p++;
1957 /* if the page was really writable, then we change its
1958 protection back to writable */
1959 if (prot & PAGE_WRITE_ORG) {
1960 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1961 if (!(p1[pindex].flags & PAGE_WRITE)) {
1962 mprotect((void *)g2h(host_start), qemu_host_page_size,
1963 (prot & PAGE_BITS) | PAGE_WRITE);
1964 p1[pindex].flags |= PAGE_WRITE;
1965 /* and since the content will be modified, we must invalidate
1966 the corresponding translated code. */
1967 tb_invalidate_phys_page(address, pc, puc);
1968 #ifdef DEBUG_TB_CHECK
1969 tb_invalidate_check(address);
1970 #endif
1971 return 1;
1974 return 0;
1977 static inline void tlb_set_dirty(CPUState *env,
1978 unsigned long addr, target_ulong vaddr)
1981 #endif /* defined(CONFIG_USER_ONLY) */
1983 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1984 int memory);
1985 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1986 int orig_memory);
1987 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1988 need_subpage) \
1989 do { \
1990 if (addr > start_addr) \
1991 start_addr2 = 0; \
1992 else { \
1993 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1994 if (start_addr2 > 0) \
1995 need_subpage = 1; \
1998 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
1999 end_addr2 = TARGET_PAGE_SIZE - 1; \
2000 else { \
2001 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2002 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2003 need_subpage = 1; \
2005 } while (0)
2007 /* register physical memory. 'size' must be a multiple of the target
2008 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2009 io memory page */
2010 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2011 unsigned long size,
2012 unsigned long phys_offset)
2014 target_phys_addr_t addr, end_addr;
2015 PhysPageDesc *p;
2016 CPUState *env;
2017 unsigned long orig_size = size;
2018 void *subpage;
2020 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2021 end_addr = start_addr + (target_phys_addr_t)size;
2022 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2023 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2024 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2025 unsigned long orig_memory = p->phys_offset;
2026 target_phys_addr_t start_addr2, end_addr2;
2027 int need_subpage = 0;
2029 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2030 need_subpage);
2031 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2032 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2033 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2034 &p->phys_offset, orig_memory);
2035 } else {
2036 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2037 >> IO_MEM_SHIFT];
2039 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2040 } else {
2041 p->phys_offset = phys_offset;
2042 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2043 (phys_offset & IO_MEM_ROMD))
2044 phys_offset += TARGET_PAGE_SIZE;
2046 } else {
2047 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2048 p->phys_offset = phys_offset;
2049 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2050 (phys_offset & IO_MEM_ROMD))
2051 phys_offset += TARGET_PAGE_SIZE;
2052 else {
2053 target_phys_addr_t start_addr2, end_addr2;
2054 int need_subpage = 0;
2056 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2057 end_addr2, need_subpage);
2059 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2060 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2061 &p->phys_offset, IO_MEM_UNASSIGNED);
2062 subpage_register(subpage, start_addr2, end_addr2,
2063 phys_offset);
2069 /* since each CPU stores ram addresses in its TLB cache, we must
2070 reset the modified entries */
2071 /* XXX: slow ! */
2072 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2073 tlb_flush(env, 1);
2077 /* XXX: temporary until new memory mapping API */
2078 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2080 PhysPageDesc *p;
2082 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2083 if (!p)
2084 return IO_MEM_UNASSIGNED;
2085 return p->phys_offset;
2088 /* XXX: better than nothing */
2089 ram_addr_t qemu_ram_alloc(unsigned int size)
2091 ram_addr_t addr;
2092 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2093 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2094 size, phys_ram_size);
2095 abort();
2097 addr = phys_ram_alloc_offset;
2098 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2099 return addr;
2102 void qemu_ram_free(ram_addr_t addr)
2106 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2108 #ifdef DEBUG_UNASSIGNED
2109 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2110 #endif
2111 #ifdef TARGET_SPARC
2112 do_unassigned_access(addr, 0, 0, 0);
2113 #elif TARGET_CRIS
2114 do_unassigned_access(addr, 0, 0, 0);
2115 #endif
2116 return 0;
2119 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2121 #ifdef DEBUG_UNASSIGNED
2122 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2123 #endif
2124 #ifdef TARGET_SPARC
2125 do_unassigned_access(addr, 1, 0, 0);
2126 #elif TARGET_CRIS
2127 do_unassigned_access(addr, 1, 0, 0);
2128 #endif
2131 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2132 unassigned_mem_readb,
2133 unassigned_mem_readb,
2134 unassigned_mem_readb,
2137 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2138 unassigned_mem_writeb,
2139 unassigned_mem_writeb,
2140 unassigned_mem_writeb,
2143 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2145 unsigned long ram_addr;
2146 int dirty_flags;
2147 ram_addr = addr - (unsigned long)phys_ram_base;
2148 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2149 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2150 #if !defined(CONFIG_USER_ONLY)
2151 tb_invalidate_phys_page_fast(ram_addr, 1);
2152 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2153 #endif
2155 stb_p((uint8_t *)(long)addr, val);
2156 #ifdef USE_KQEMU
2157 if (cpu_single_env->kqemu_enabled &&
2158 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2159 kqemu_modify_page(cpu_single_env, ram_addr);
2160 #endif
2161 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2162 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2163 /* we remove the notdirty callback only if the code has been
2164 flushed */
2165 if (dirty_flags == 0xff)
2166 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2169 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2171 unsigned long ram_addr;
2172 int dirty_flags;
2173 ram_addr = addr - (unsigned long)phys_ram_base;
2174 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2175 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2176 #if !defined(CONFIG_USER_ONLY)
2177 tb_invalidate_phys_page_fast(ram_addr, 2);
2178 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2179 #endif
2181 stw_p((uint8_t *)(long)addr, val);
2182 #ifdef USE_KQEMU
2183 if (cpu_single_env->kqemu_enabled &&
2184 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2185 kqemu_modify_page(cpu_single_env, ram_addr);
2186 #endif
2187 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2188 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2189 /* we remove the notdirty callback only if the code has been
2190 flushed */
2191 if (dirty_flags == 0xff)
2192 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2195 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2197 unsigned long ram_addr;
2198 int dirty_flags;
2199 ram_addr = addr - (unsigned long)phys_ram_base;
2200 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2201 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2202 #if !defined(CONFIG_USER_ONLY)
2203 tb_invalidate_phys_page_fast(ram_addr, 4);
2204 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2205 #endif
2207 stl_p((uint8_t *)(long)addr, val);
2208 #ifdef USE_KQEMU
2209 if (cpu_single_env->kqemu_enabled &&
2210 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2211 kqemu_modify_page(cpu_single_env, ram_addr);
2212 #endif
2213 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2214 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2215 /* we remove the notdirty callback only if the code has been
2216 flushed */
2217 if (dirty_flags == 0xff)
2218 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2221 static CPUReadMemoryFunc *error_mem_read[3] = {
2222 NULL, /* never used */
2223 NULL, /* never used */
2224 NULL, /* never used */
2227 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2228 notdirty_mem_writeb,
2229 notdirty_mem_writew,
2230 notdirty_mem_writel,
2233 #if defined(CONFIG_SOFTMMU)
2234 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2235 so these check for a hit then pass through to the normal out-of-line
2236 phys routines. */
2237 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2239 return ldub_phys(addr);
2242 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2244 return lduw_phys(addr);
2247 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2249 return ldl_phys(addr);
2252 /* Generate a debug exception if a watchpoint has been hit.
2253 Returns the real physical address of the access. addr will be a host
2254 address in case of a RAM location. */
2255 static target_ulong check_watchpoint(target_phys_addr_t addr)
2257 CPUState *env = cpu_single_env;
2258 target_ulong watch;
2259 target_ulong retaddr;
2260 int i;
2262 retaddr = addr;
2263 for (i = 0; i < env->nb_watchpoints; i++) {
2264 watch = env->watchpoint[i].vaddr;
2265 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2266 retaddr = addr - env->watchpoint[i].addend;
2267 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2268 cpu_single_env->watchpoint_hit = i + 1;
2269 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2270 break;
2274 return retaddr;
2277 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2278 uint32_t val)
2280 addr = check_watchpoint(addr);
2281 stb_phys(addr, val);
2284 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2285 uint32_t val)
2287 addr = check_watchpoint(addr);
2288 stw_phys(addr, val);
2291 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2292 uint32_t val)
2294 addr = check_watchpoint(addr);
2295 stl_phys(addr, val);
2298 static CPUReadMemoryFunc *watch_mem_read[3] = {
2299 watch_mem_readb,
2300 watch_mem_readw,
2301 watch_mem_readl,
2304 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2305 watch_mem_writeb,
2306 watch_mem_writew,
2307 watch_mem_writel,
2309 #endif
2311 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2312 unsigned int len)
2314 uint32_t ret;
2315 unsigned int idx;
2317 idx = SUBPAGE_IDX(addr - mmio->base);
2318 #if defined(DEBUG_SUBPAGE)
2319 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2320 mmio, len, addr, idx);
2321 #endif
2322 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2324 return ret;
2327 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2328 uint32_t value, unsigned int len)
2330 unsigned int idx;
2332 idx = SUBPAGE_IDX(addr - mmio->base);
2333 #if defined(DEBUG_SUBPAGE)
2334 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2335 mmio, len, addr, idx, value);
2336 #endif
2337 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2340 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2342 #if defined(DEBUG_SUBPAGE)
2343 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2344 #endif
2346 return subpage_readlen(opaque, addr, 0);
2349 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2350 uint32_t value)
2352 #if defined(DEBUG_SUBPAGE)
2353 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2354 #endif
2355 subpage_writelen(opaque, addr, value, 0);
2358 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2360 #if defined(DEBUG_SUBPAGE)
2361 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2362 #endif
2364 return subpage_readlen(opaque, addr, 1);
2367 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2368 uint32_t value)
2370 #if defined(DEBUG_SUBPAGE)
2371 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2372 #endif
2373 subpage_writelen(opaque, addr, value, 1);
2376 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2378 #if defined(DEBUG_SUBPAGE)
2379 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2380 #endif
2382 return subpage_readlen(opaque, addr, 2);
2385 static void subpage_writel (void *opaque,
2386 target_phys_addr_t addr, uint32_t value)
2388 #if defined(DEBUG_SUBPAGE)
2389 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2390 #endif
2391 subpage_writelen(opaque, addr, value, 2);
2394 static CPUReadMemoryFunc *subpage_read[] = {
2395 &subpage_readb,
2396 &subpage_readw,
2397 &subpage_readl,
2400 static CPUWriteMemoryFunc *subpage_write[] = {
2401 &subpage_writeb,
2402 &subpage_writew,
2403 &subpage_writel,
2406 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2407 int memory)
2409 int idx, eidx;
2410 unsigned int i;
2412 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2413 return -1;
2414 idx = SUBPAGE_IDX(start);
2415 eidx = SUBPAGE_IDX(end);
2416 #if defined(DEBUG_SUBPAGE)
2417 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2418 mmio, start, end, idx, eidx, memory);
2419 #endif
2420 memory >>= IO_MEM_SHIFT;
2421 for (; idx <= eidx; idx++) {
2422 for (i = 0; i < 4; i++) {
2423 if (io_mem_read[memory][i]) {
2424 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2425 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2427 if (io_mem_write[memory][i]) {
2428 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2429 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2434 return 0;
2437 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2438 int orig_memory)
2440 subpage_t *mmio;
2441 int subpage_memory;
2443 mmio = qemu_mallocz(sizeof(subpage_t));
2444 if (mmio != NULL) {
2445 mmio->base = base;
2446 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2447 #if defined(DEBUG_SUBPAGE)
2448 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2449 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2450 #endif
2451 *phys = subpage_memory | IO_MEM_SUBPAGE;
2452 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2455 return mmio;
2458 static void io_mem_init(void)
2460 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2461 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2462 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2463 io_mem_nb = 5;
2465 #if defined(CONFIG_SOFTMMU)
2466 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2467 watch_mem_write, NULL);
2468 #endif
2469 /* alloc dirty bits array */
2470 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2471 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2474 /* mem_read and mem_write are arrays of functions containing the
2475 function to access byte (index 0), word (index 1) and dword (index
2476 2). Functions can be omitted with a NULL function pointer. The
2477 registered functions may be modified dynamically later.
2478 If io_index is non zero, the corresponding io zone is
2479 modified. If it is zero, a new io zone is allocated. The return
2480 value can be used with cpu_register_physical_memory(). (-1) is
2481 returned if error. */
2482 int cpu_register_io_memory(int io_index,
2483 CPUReadMemoryFunc **mem_read,
2484 CPUWriteMemoryFunc **mem_write,
2485 void *opaque)
2487 int i, subwidth = 0;
2489 if (io_index <= 0) {
2490 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2491 return -1;
2492 io_index = io_mem_nb++;
2493 } else {
2494 if (io_index >= IO_MEM_NB_ENTRIES)
2495 return -1;
2498 for(i = 0;i < 3; i++) {
2499 if (!mem_read[i] || !mem_write[i])
2500 subwidth = IO_MEM_SUBWIDTH;
2501 io_mem_read[io_index][i] = mem_read[i];
2502 io_mem_write[io_index][i] = mem_write[i];
2504 io_mem_opaque[io_index] = opaque;
2505 return (io_index << IO_MEM_SHIFT) | subwidth;
2508 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2510 return io_mem_write[io_index >> IO_MEM_SHIFT];
2513 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2515 return io_mem_read[io_index >> IO_MEM_SHIFT];
2518 /* physical memory access (slow version, mainly for debug) */
2519 #if defined(CONFIG_USER_ONLY)
2520 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2521 int len, int is_write)
2523 int l, flags;
2524 target_ulong page;
2525 void * p;
2527 while (len > 0) {
2528 page = addr & TARGET_PAGE_MASK;
2529 l = (page + TARGET_PAGE_SIZE) - addr;
2530 if (l > len)
2531 l = len;
2532 flags = page_get_flags(page);
2533 if (!(flags & PAGE_VALID))
2534 return;
2535 if (is_write) {
2536 if (!(flags & PAGE_WRITE))
2537 return;
2538 /* XXX: this code should not depend on lock_user */
2539 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2540 /* FIXME - should this return an error rather than just fail? */
2541 return;
2542 memcpy(p, buf, len);
2543 unlock_user(p, addr, len);
2544 } else {
2545 if (!(flags & PAGE_READ))
2546 return;
2547 /* XXX: this code should not depend on lock_user */
2548 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2549 /* FIXME - should this return an error rather than just fail? */
2550 return;
2551 memcpy(buf, p, len);
2552 unlock_user(p, addr, 0);
2554 len -= l;
2555 buf += l;
2556 addr += l;
2560 #else
2561 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2562 int len, int is_write)
2564 int l, io_index;
2565 uint8_t *ptr;
2566 uint32_t val;
2567 target_phys_addr_t page;
2568 unsigned long pd;
2569 PhysPageDesc *p;
2571 while (len > 0) {
2572 page = addr & TARGET_PAGE_MASK;
2573 l = (page + TARGET_PAGE_SIZE) - addr;
2574 if (l > len)
2575 l = len;
2576 p = phys_page_find(page >> TARGET_PAGE_BITS);
2577 if (!p) {
2578 pd = IO_MEM_UNASSIGNED;
2579 } else {
2580 pd = p->phys_offset;
2583 if (is_write) {
2584 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2585 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2586 /* XXX: could force cpu_single_env to NULL to avoid
2587 potential bugs */
2588 if (l >= 4 && ((addr & 3) == 0)) {
2589 /* 32 bit write access */
2590 val = ldl_p(buf);
2591 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2592 l = 4;
2593 } else if (l >= 2 && ((addr & 1) == 0)) {
2594 /* 16 bit write access */
2595 val = lduw_p(buf);
2596 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2597 l = 2;
2598 } else {
2599 /* 8 bit write access */
2600 val = ldub_p(buf);
2601 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2602 l = 1;
2604 } else {
2605 unsigned long addr1;
2606 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2607 /* RAM case */
2608 ptr = phys_ram_base + addr1;
2609 memcpy(ptr, buf, l);
2610 if (!cpu_physical_memory_is_dirty(addr1)) {
2611 /* invalidate code */
2612 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2613 /* set dirty bit */
2614 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2615 (0xff & ~CODE_DIRTY_FLAG);
2618 } else {
2619 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2620 !(pd & IO_MEM_ROMD)) {
2621 /* I/O case */
2622 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2623 if (l >= 4 && ((addr & 3) == 0)) {
2624 /* 32 bit read access */
2625 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2626 stl_p(buf, val);
2627 l = 4;
2628 } else if (l >= 2 && ((addr & 1) == 0)) {
2629 /* 16 bit read access */
2630 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2631 stw_p(buf, val);
2632 l = 2;
2633 } else {
2634 /* 8 bit read access */
2635 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2636 stb_p(buf, val);
2637 l = 1;
2639 } else {
2640 /* RAM case */
2641 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2642 (addr & ~TARGET_PAGE_MASK);
2643 memcpy(buf, ptr, l);
2646 len -= l;
2647 buf += l;
2648 addr += l;
2652 /* used for ROM loading : can write in RAM and ROM */
2653 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2654 const uint8_t *buf, int len)
2656 int l;
2657 uint8_t *ptr;
2658 target_phys_addr_t page;
2659 unsigned long pd;
2660 PhysPageDesc *p;
2662 while (len > 0) {
2663 page = addr & TARGET_PAGE_MASK;
2664 l = (page + TARGET_PAGE_SIZE) - addr;
2665 if (l > len)
2666 l = len;
2667 p = phys_page_find(page >> TARGET_PAGE_BITS);
2668 if (!p) {
2669 pd = IO_MEM_UNASSIGNED;
2670 } else {
2671 pd = p->phys_offset;
2674 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2675 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2676 !(pd & IO_MEM_ROMD)) {
2677 /* do nothing */
2678 } else {
2679 unsigned long addr1;
2680 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2681 /* ROM/RAM case */
2682 ptr = phys_ram_base + addr1;
2683 memcpy(ptr, buf, l);
2685 len -= l;
2686 buf += l;
2687 addr += l;
2692 /* warning: addr must be aligned */
2693 uint32_t ldl_phys(target_phys_addr_t addr)
2695 int io_index;
2696 uint8_t *ptr;
2697 uint32_t val;
2698 unsigned long pd;
2699 PhysPageDesc *p;
2701 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2702 if (!p) {
2703 pd = IO_MEM_UNASSIGNED;
2704 } else {
2705 pd = p->phys_offset;
2708 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2709 !(pd & IO_MEM_ROMD)) {
2710 /* I/O case */
2711 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2712 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2713 } else {
2714 /* RAM case */
2715 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2716 (addr & ~TARGET_PAGE_MASK);
2717 val = ldl_p(ptr);
2719 return val;
2722 /* warning: addr must be aligned */
2723 uint64_t ldq_phys(target_phys_addr_t addr)
2725 int io_index;
2726 uint8_t *ptr;
2727 uint64_t val;
2728 unsigned long pd;
2729 PhysPageDesc *p;
2731 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2732 if (!p) {
2733 pd = IO_MEM_UNASSIGNED;
2734 } else {
2735 pd = p->phys_offset;
2738 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2739 !(pd & IO_MEM_ROMD)) {
2740 /* I/O case */
2741 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2742 #ifdef TARGET_WORDS_BIGENDIAN
2743 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2744 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2745 #else
2746 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2747 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2748 #endif
2749 } else {
2750 /* RAM case */
2751 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2752 (addr & ~TARGET_PAGE_MASK);
2753 val = ldq_p(ptr);
2755 return val;
2758 /* XXX: optimize */
2759 uint32_t ldub_phys(target_phys_addr_t addr)
2761 uint8_t val;
2762 cpu_physical_memory_read(addr, &val, 1);
2763 return val;
2766 /* XXX: optimize */
2767 uint32_t lduw_phys(target_phys_addr_t addr)
2769 uint16_t val;
2770 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2771 return tswap16(val);
2774 /* warning: addr must be aligned. The ram page is not masked as dirty
2775 and the code inside is not invalidated. It is useful if the dirty
2776 bits are used to track modified PTEs */
2777 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2779 int io_index;
2780 uint8_t *ptr;
2781 unsigned long pd;
2782 PhysPageDesc *p;
2784 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2785 if (!p) {
2786 pd = IO_MEM_UNASSIGNED;
2787 } else {
2788 pd = p->phys_offset;
2791 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2792 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2793 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2794 } else {
2795 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2796 (addr & ~TARGET_PAGE_MASK);
2797 stl_p(ptr, val);
2801 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2803 int io_index;
2804 uint8_t *ptr;
2805 unsigned long pd;
2806 PhysPageDesc *p;
2808 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2809 if (!p) {
2810 pd = IO_MEM_UNASSIGNED;
2811 } else {
2812 pd = p->phys_offset;
2815 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2816 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2817 #ifdef TARGET_WORDS_BIGENDIAN
2818 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2819 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2820 #else
2821 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2822 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2823 #endif
2824 } else {
2825 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2826 (addr & ~TARGET_PAGE_MASK);
2827 stq_p(ptr, val);
2831 /* warning: addr must be aligned */
2832 void stl_phys(target_phys_addr_t addr, uint32_t val)
2834 int io_index;
2835 uint8_t *ptr;
2836 unsigned long pd;
2837 PhysPageDesc *p;
2839 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2840 if (!p) {
2841 pd = IO_MEM_UNASSIGNED;
2842 } else {
2843 pd = p->phys_offset;
2846 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2847 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2848 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2849 } else {
2850 unsigned long addr1;
2851 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2852 /* RAM case */
2853 ptr = phys_ram_base + addr1;
2854 stl_p(ptr, val);
2855 if (!cpu_physical_memory_is_dirty(addr1)) {
2856 /* invalidate code */
2857 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2858 /* set dirty bit */
2859 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2860 (0xff & ~CODE_DIRTY_FLAG);
2865 /* XXX: optimize */
2866 void stb_phys(target_phys_addr_t addr, uint32_t val)
2868 uint8_t v = val;
2869 cpu_physical_memory_write(addr, &v, 1);
2872 /* XXX: optimize */
2873 void stw_phys(target_phys_addr_t addr, uint32_t val)
2875 uint16_t v = tswap16(val);
2876 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2879 /* XXX: optimize */
2880 void stq_phys(target_phys_addr_t addr, uint64_t val)
2882 val = tswap64(val);
2883 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2886 #endif
2888 /* virtual memory access for debug */
2889 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2890 uint8_t *buf, int len, int is_write)
2892 int l;
2893 target_phys_addr_t phys_addr;
2894 target_ulong page;
2896 while (len > 0) {
2897 page = addr & TARGET_PAGE_MASK;
2898 phys_addr = cpu_get_phys_page_debug(env, page);
2899 /* if no physical page mapped, return an error */
2900 if (phys_addr == -1)
2901 return -1;
2902 l = (page + TARGET_PAGE_SIZE) - addr;
2903 if (l > len)
2904 l = len;
2905 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2906 buf, l, is_write);
2907 len -= l;
2908 buf += l;
2909 addr += l;
2911 return 0;
2914 void dump_exec_info(FILE *f,
2915 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2917 int i, target_code_size, max_target_code_size;
2918 int direct_jmp_count, direct_jmp2_count, cross_page;
2919 TranslationBlock *tb;
2921 target_code_size = 0;
2922 max_target_code_size = 0;
2923 cross_page = 0;
2924 direct_jmp_count = 0;
2925 direct_jmp2_count = 0;
2926 for(i = 0; i < nb_tbs; i++) {
2927 tb = &tbs[i];
2928 target_code_size += tb->size;
2929 if (tb->size > max_target_code_size)
2930 max_target_code_size = tb->size;
2931 if (tb->page_addr[1] != -1)
2932 cross_page++;
2933 if (tb->tb_next_offset[0] != 0xffff) {
2934 direct_jmp_count++;
2935 if (tb->tb_next_offset[1] != 0xffff) {
2936 direct_jmp2_count++;
2940 /* XXX: avoid using doubles ? */
2941 cpu_fprintf(f, "Translation buffer state:\n");
2942 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2943 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2944 nb_tbs ? target_code_size / nb_tbs : 0,
2945 max_target_code_size);
2946 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2947 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2948 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2949 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2950 cross_page,
2951 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2952 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2953 direct_jmp_count,
2954 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2955 direct_jmp2_count,
2956 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2957 cpu_fprintf(f, "\nStatistics:\n");
2958 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2959 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2960 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2961 #ifdef CONFIG_PROFILER
2963 int64_t tot;
2964 tot = dyngen_interm_time + dyngen_code_time;
2965 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2966 tot, tot / 2.4e9);
2967 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2968 dyngen_tb_count,
2969 dyngen_tb_count1 - dyngen_tb_count,
2970 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
2971 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2972 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
2973 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
2974 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
2975 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2976 dyngen_tb_count ?
2977 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
2978 cpu_fprintf(f, "cycles/op %0.1f\n",
2979 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
2980 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2981 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
2982 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2983 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
2984 if (tot == 0)
2985 tot = 1;
2986 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2987 (double)dyngen_interm_time / tot * 100.0);
2988 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2989 (double)dyngen_code_time / tot * 100.0);
2990 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2991 dyngen_restore_count);
2992 cpu_fprintf(f, " avg cycles %0.1f\n",
2993 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
2995 extern void dump_op_count(void);
2996 dump_op_count();
2999 #endif
3002 #if !defined(CONFIG_USER_ONLY)
3004 #define MMUSUFFIX _cmmu
3005 #define GETPC() NULL
3006 #define env cpu_single_env
3007 #define SOFTMMU_CODE_ACCESS
3009 #define SHIFT 0
3010 #include "softmmu_template.h"
3012 #define SHIFT 1
3013 #include "softmmu_template.h"
3015 #define SHIFT 2
3016 #include "softmmu_template.h"
3018 #define SHIFT 3
3019 #include "softmmu_template.h"
3021 #undef env
3023 #endif