CRIS MMU Updates
[qemu/qemu-JZ.git] / exec.c
blobeb3c8abf5dd28e3ab7d7ddcbb090a3719e29a43f
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #if defined(CONFIG_USER_ONLY)
40 #include <qemu.h>
41 #endif
43 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_FLUSH
45 //#define DEBUG_TLB
46 //#define DEBUG_UNASSIGNED
48 /* make various TB consistency checks */
49 //#define DEBUG_TB_CHECK
50 //#define DEBUG_TLB_CHECK
52 //#define DEBUG_IOPORT
53 //#define DEBUG_SUBPAGE
55 #if !defined(CONFIG_USER_ONLY)
56 /* TB consistency checks only implemented for usermode emulation. */
57 #undef DEBUG_TB_CHECK
58 #endif
60 /* threshold to flush the translated code buffer */
61 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #define MMAP_AREA_START 0x00000000
66 #define MMAP_AREA_END 0xa8000000
68 #if defined(TARGET_SPARC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 41
70 #elif defined(TARGET_SPARC)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 36
72 #elif defined(TARGET_ALPHA)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #define TARGET_VIRT_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_PPC64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 36
81 #else
82 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83 #define TARGET_PHYS_ADDR_SPACE_BITS 32
84 #endif
86 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
87 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88 int nb_tbs;
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
92 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
93 uint8_t *code_gen_ptr;
95 ram_addr_t phys_ram_size;
96 int phys_ram_fd;
97 uint8_t *phys_ram_base;
98 uint8_t *phys_ram_dirty;
99 static ram_addr_t phys_ram_alloc_offset = 0;
101 CPUState *first_cpu;
102 /* current CPU in the current thread. It is only valid inside
103 cpu_exec() */
104 CPUState *cpu_single_env;
106 typedef struct PageDesc {
107 /* list of TBs intersecting this ram page */
108 TranslationBlock *first_tb;
109 /* in order to optimize self modifying code, we count the number
110 of lookups we do to a given page to use a bitmap */
111 unsigned int code_write_count;
112 uint8_t *code_bitmap;
113 #if defined(CONFIG_USER_ONLY)
114 unsigned long flags;
115 #endif
116 } PageDesc;
118 typedef struct PhysPageDesc {
119 /* offset in host memory of the page + io_index in the low 12 bits */
120 ram_addr_t phys_offset;
121 } PhysPageDesc;
123 #define L2_BITS 10
124 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
125 /* XXX: this is a temporary hack for alpha target.
126 * In the future, this is to be replaced by a multi-level table
127 * to actually be able to handle the complete 64 bits address space.
129 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
130 #else
131 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
132 #endif
134 #define L1_SIZE (1 << L1_BITS)
135 #define L2_SIZE (1 << L2_BITS)
137 static void io_mem_init(void);
139 unsigned long qemu_real_host_page_size;
140 unsigned long qemu_host_page_bits;
141 unsigned long qemu_host_page_size;
142 unsigned long qemu_host_page_mask;
144 /* XXX: for system emulation, it could just be an array */
145 static PageDesc *l1_map[L1_SIZE];
146 PhysPageDesc **l1_phys_map;
148 /* io memory support */
149 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
150 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
151 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
152 static int io_mem_nb;
153 #if defined(CONFIG_SOFTMMU)
154 static int io_mem_watch;
155 #endif
157 /* log support */
158 char *logfilename = "/tmp/qemu.log";
159 FILE *logfile;
160 int loglevel;
161 static int log_append = 0;
163 /* statistics */
164 static int tlb_flush_count;
165 static int tb_flush_count;
166 static int tb_phys_invalidate_count;
168 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
169 typedef struct subpage_t {
170 target_phys_addr_t base;
171 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
172 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
173 void *opaque[TARGET_PAGE_SIZE][2][4];
174 } subpage_t;
176 static void page_init(void)
178 /* NOTE: we can always suppose that qemu_host_page_size >=
179 TARGET_PAGE_SIZE */
180 #ifdef _WIN32
182 SYSTEM_INFO system_info;
183 DWORD old_protect;
185 GetSystemInfo(&system_info);
186 qemu_real_host_page_size = system_info.dwPageSize;
188 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
189 PAGE_EXECUTE_READWRITE, &old_protect);
191 #else
192 qemu_real_host_page_size = getpagesize();
194 unsigned long start, end;
196 start = (unsigned long)code_gen_buffer;
197 start &= ~(qemu_real_host_page_size - 1);
199 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
200 end += qemu_real_host_page_size - 1;
201 end &= ~(qemu_real_host_page_size - 1);
203 mprotect((void *)start, end - start,
204 PROT_READ | PROT_WRITE | PROT_EXEC);
206 #endif
208 if (qemu_host_page_size == 0)
209 qemu_host_page_size = qemu_real_host_page_size;
210 if (qemu_host_page_size < TARGET_PAGE_SIZE)
211 qemu_host_page_size = TARGET_PAGE_SIZE;
212 qemu_host_page_bits = 0;
213 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
214 qemu_host_page_bits++;
215 qemu_host_page_mask = ~(qemu_host_page_size - 1);
216 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
217 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
219 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
221 long long startaddr, endaddr;
222 FILE *f;
223 int n;
225 f = fopen("/proc/self/maps", "r");
226 if (f) {
227 do {
228 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
229 if (n == 2) {
230 startaddr = MIN(startaddr,
231 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
232 endaddr = MIN(endaddr,
233 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
234 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
235 TARGET_PAGE_ALIGN(endaddr),
236 PAGE_RESERVED);
238 } while (!feof(f));
239 fclose(f);
242 #endif
245 static inline PageDesc *page_find_alloc(target_ulong index)
247 PageDesc **lp, *p;
249 lp = &l1_map[index >> L2_BITS];
250 p = *lp;
251 if (!p) {
252 /* allocate if not found */
253 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
254 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
255 *lp = p;
257 return p + (index & (L2_SIZE - 1));
260 static inline PageDesc *page_find(target_ulong index)
262 PageDesc *p;
264 p = l1_map[index >> L2_BITS];
265 if (!p)
266 return 0;
267 return p + (index & (L2_SIZE - 1));
270 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
272 void **lp, **p;
273 PhysPageDesc *pd;
275 p = (void **)l1_phys_map;
276 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
278 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
279 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
280 #endif
281 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
282 p = *lp;
283 if (!p) {
284 /* allocate if not found */
285 if (!alloc)
286 return NULL;
287 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
288 memset(p, 0, sizeof(void *) * L1_SIZE);
289 *lp = p;
291 #endif
292 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
293 pd = *lp;
294 if (!pd) {
295 int i;
296 /* allocate if not found */
297 if (!alloc)
298 return NULL;
299 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
300 *lp = pd;
301 for (i = 0; i < L2_SIZE; i++)
302 pd[i].phys_offset = IO_MEM_UNASSIGNED;
304 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
307 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
309 return phys_page_find_alloc(index, 0);
312 #if !defined(CONFIG_USER_ONLY)
313 static void tlb_protect_code(ram_addr_t ram_addr);
314 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
315 target_ulong vaddr);
316 #endif
318 void cpu_exec_init(CPUState *env)
320 CPUState **penv;
321 int cpu_index;
323 if (!code_gen_ptr) {
324 cpu_gen_init();
325 code_gen_ptr = code_gen_buffer;
326 page_init();
327 io_mem_init();
329 env->next_cpu = NULL;
330 penv = &first_cpu;
331 cpu_index = 0;
332 while (*penv != NULL) {
333 penv = (CPUState **)&(*penv)->next_cpu;
334 cpu_index++;
336 env->cpu_index = cpu_index;
337 env->nb_watchpoints = 0;
338 *penv = env;
341 static inline void invalidate_page_bitmap(PageDesc *p)
343 if (p->code_bitmap) {
344 qemu_free(p->code_bitmap);
345 p->code_bitmap = NULL;
347 p->code_write_count = 0;
350 /* set to NULL all the 'first_tb' fields in all PageDescs */
351 static void page_flush_tb(void)
353 int i, j;
354 PageDesc *p;
356 for(i = 0; i < L1_SIZE; i++) {
357 p = l1_map[i];
358 if (p) {
359 for(j = 0; j < L2_SIZE; j++) {
360 p->first_tb = NULL;
361 invalidate_page_bitmap(p);
362 p++;
368 /* flush all the translation blocks */
369 /* XXX: tb_flush is currently not thread safe */
370 void tb_flush(CPUState *env1)
372 CPUState *env;
373 #if defined(DEBUG_FLUSH)
374 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
375 (unsigned long)(code_gen_ptr - code_gen_buffer),
376 nb_tbs, nb_tbs > 0 ?
377 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
378 #endif
379 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
380 cpu_abort(env1, "Internal error: code buffer overflow\n");
382 nb_tbs = 0;
384 for(env = first_cpu; env != NULL; env = env->next_cpu) {
385 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
388 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
389 page_flush_tb();
391 code_gen_ptr = code_gen_buffer;
392 /* XXX: flush processor icache at this point if cache flush is
393 expensive */
394 tb_flush_count++;
397 #ifdef DEBUG_TB_CHECK
399 static void tb_invalidate_check(target_ulong address)
401 TranslationBlock *tb;
402 int i;
403 address &= TARGET_PAGE_MASK;
404 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
405 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
406 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
407 address >= tb->pc + tb->size)) {
408 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
409 address, (long)tb->pc, tb->size);
415 /* verify that all the pages have correct rights for code */
416 static void tb_page_check(void)
418 TranslationBlock *tb;
419 int i, flags1, flags2;
421 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
422 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
423 flags1 = page_get_flags(tb->pc);
424 flags2 = page_get_flags(tb->pc + tb->size - 1);
425 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
426 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
427 (long)tb->pc, tb->size, flags1, flags2);
433 void tb_jmp_check(TranslationBlock *tb)
435 TranslationBlock *tb1;
436 unsigned int n1;
438 /* suppress any remaining jumps to this TB */
439 tb1 = tb->jmp_first;
440 for(;;) {
441 n1 = (long)tb1 & 3;
442 tb1 = (TranslationBlock *)((long)tb1 & ~3);
443 if (n1 == 2)
444 break;
445 tb1 = tb1->jmp_next[n1];
447 /* check end of list */
448 if (tb1 != tb) {
449 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
453 #endif
455 /* invalidate one TB */
456 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
457 int next_offset)
459 TranslationBlock *tb1;
460 for(;;) {
461 tb1 = *ptb;
462 if (tb1 == tb) {
463 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
464 break;
466 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
470 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
472 TranslationBlock *tb1;
473 unsigned int n1;
475 for(;;) {
476 tb1 = *ptb;
477 n1 = (long)tb1 & 3;
478 tb1 = (TranslationBlock *)((long)tb1 & ~3);
479 if (tb1 == tb) {
480 *ptb = tb1->page_next[n1];
481 break;
483 ptb = &tb1->page_next[n1];
487 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
489 TranslationBlock *tb1, **ptb;
490 unsigned int n1;
492 ptb = &tb->jmp_next[n];
493 tb1 = *ptb;
494 if (tb1) {
495 /* find tb(n) in circular list */
496 for(;;) {
497 tb1 = *ptb;
498 n1 = (long)tb1 & 3;
499 tb1 = (TranslationBlock *)((long)tb1 & ~3);
500 if (n1 == n && tb1 == tb)
501 break;
502 if (n1 == 2) {
503 ptb = &tb1->jmp_first;
504 } else {
505 ptb = &tb1->jmp_next[n1];
508 /* now we can suppress tb(n) from the list */
509 *ptb = tb->jmp_next[n];
511 tb->jmp_next[n] = NULL;
515 /* reset the jump entry 'n' of a TB so that it is not chained to
516 another TB */
517 static inline void tb_reset_jump(TranslationBlock *tb, int n)
519 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
522 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
524 CPUState *env;
525 PageDesc *p;
526 unsigned int h, n1;
527 target_phys_addr_t phys_pc;
528 TranslationBlock *tb1, *tb2;
530 /* remove the TB from the hash list */
531 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
532 h = tb_phys_hash_func(phys_pc);
533 tb_remove(&tb_phys_hash[h], tb,
534 offsetof(TranslationBlock, phys_hash_next));
536 /* remove the TB from the page list */
537 if (tb->page_addr[0] != page_addr) {
538 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
539 tb_page_remove(&p->first_tb, tb);
540 invalidate_page_bitmap(p);
542 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
543 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
544 tb_page_remove(&p->first_tb, tb);
545 invalidate_page_bitmap(p);
548 tb_invalidated_flag = 1;
550 /* remove the TB from the hash list */
551 h = tb_jmp_cache_hash_func(tb->pc);
552 for(env = first_cpu; env != NULL; env = env->next_cpu) {
553 if (env->tb_jmp_cache[h] == tb)
554 env->tb_jmp_cache[h] = NULL;
557 /* suppress this TB from the two jump lists */
558 tb_jmp_remove(tb, 0);
559 tb_jmp_remove(tb, 1);
561 /* suppress any remaining jumps to this TB */
562 tb1 = tb->jmp_first;
563 for(;;) {
564 n1 = (long)tb1 & 3;
565 if (n1 == 2)
566 break;
567 tb1 = (TranslationBlock *)((long)tb1 & ~3);
568 tb2 = tb1->jmp_next[n1];
569 tb_reset_jump(tb1, n1);
570 tb1->jmp_next[n1] = NULL;
571 tb1 = tb2;
573 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
575 tb_phys_invalidate_count++;
578 static inline void set_bits(uint8_t *tab, int start, int len)
580 int end, mask, end1;
582 end = start + len;
583 tab += start >> 3;
584 mask = 0xff << (start & 7);
585 if ((start & ~7) == (end & ~7)) {
586 if (start < end) {
587 mask &= ~(0xff << (end & 7));
588 *tab |= mask;
590 } else {
591 *tab++ |= mask;
592 start = (start + 8) & ~7;
593 end1 = end & ~7;
594 while (start < end1) {
595 *tab++ = 0xff;
596 start += 8;
598 if (start < end) {
599 mask = ~(0xff << (end & 7));
600 *tab |= mask;
605 static void build_page_bitmap(PageDesc *p)
607 int n, tb_start, tb_end;
608 TranslationBlock *tb;
610 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
611 if (!p->code_bitmap)
612 return;
613 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
615 tb = p->first_tb;
616 while (tb != NULL) {
617 n = (long)tb & 3;
618 tb = (TranslationBlock *)((long)tb & ~3);
619 /* NOTE: this is subtle as a TB may span two physical pages */
620 if (n == 0) {
621 /* NOTE: tb_end may be after the end of the page, but
622 it is not a problem */
623 tb_start = tb->pc & ~TARGET_PAGE_MASK;
624 tb_end = tb_start + tb->size;
625 if (tb_end > TARGET_PAGE_SIZE)
626 tb_end = TARGET_PAGE_SIZE;
627 } else {
628 tb_start = 0;
629 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
631 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
632 tb = tb->page_next[n];
636 #ifdef TARGET_HAS_PRECISE_SMC
638 static void tb_gen_code(CPUState *env,
639 target_ulong pc, target_ulong cs_base, int flags,
640 int cflags)
642 TranslationBlock *tb;
643 uint8_t *tc_ptr;
644 target_ulong phys_pc, phys_page2, virt_page2;
645 int code_gen_size;
647 phys_pc = get_phys_addr_code(env, pc);
648 tb = tb_alloc(pc);
649 if (!tb) {
650 /* flush must be done */
651 tb_flush(env);
652 /* cannot fail at this point */
653 tb = tb_alloc(pc);
655 tc_ptr = code_gen_ptr;
656 tb->tc_ptr = tc_ptr;
657 tb->cs_base = cs_base;
658 tb->flags = flags;
659 tb->cflags = cflags;
660 cpu_gen_code(env, tb, &code_gen_size);
661 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
663 /* check next page if needed */
664 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
665 phys_page2 = -1;
666 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
667 phys_page2 = get_phys_addr_code(env, virt_page2);
669 tb_link_phys(tb, phys_pc, phys_page2);
671 #endif
673 /* invalidate all TBs which intersect with the target physical page
674 starting in range [start;end[. NOTE: start and end must refer to
675 the same physical page. 'is_cpu_write_access' should be true if called
676 from a real cpu write access: the virtual CPU will exit the current
677 TB if code is modified inside this TB. */
678 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
679 int is_cpu_write_access)
681 int n, current_tb_modified, current_tb_not_found, current_flags;
682 CPUState *env = cpu_single_env;
683 PageDesc *p;
684 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
685 target_ulong tb_start, tb_end;
686 target_ulong current_pc, current_cs_base;
688 p = page_find(start >> TARGET_PAGE_BITS);
689 if (!p)
690 return;
691 if (!p->code_bitmap &&
692 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
693 is_cpu_write_access) {
694 /* build code bitmap */
695 build_page_bitmap(p);
698 /* we remove all the TBs in the range [start, end[ */
699 /* XXX: see if in some cases it could be faster to invalidate all the code */
700 current_tb_not_found = is_cpu_write_access;
701 current_tb_modified = 0;
702 current_tb = NULL; /* avoid warning */
703 current_pc = 0; /* avoid warning */
704 current_cs_base = 0; /* avoid warning */
705 current_flags = 0; /* avoid warning */
706 tb = p->first_tb;
707 while (tb != NULL) {
708 n = (long)tb & 3;
709 tb = (TranslationBlock *)((long)tb & ~3);
710 tb_next = tb->page_next[n];
711 /* NOTE: this is subtle as a TB may span two physical pages */
712 if (n == 0) {
713 /* NOTE: tb_end may be after the end of the page, but
714 it is not a problem */
715 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
716 tb_end = tb_start + tb->size;
717 } else {
718 tb_start = tb->page_addr[1];
719 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
721 if (!(tb_end <= start || tb_start >= end)) {
722 #ifdef TARGET_HAS_PRECISE_SMC
723 if (current_tb_not_found) {
724 current_tb_not_found = 0;
725 current_tb = NULL;
726 if (env->mem_write_pc) {
727 /* now we have a real cpu fault */
728 current_tb = tb_find_pc(env->mem_write_pc);
731 if (current_tb == tb &&
732 !(current_tb->cflags & CF_SINGLE_INSN)) {
733 /* If we are modifying the current TB, we must stop
734 its execution. We could be more precise by checking
735 that the modification is after the current PC, but it
736 would require a specialized function to partially
737 restore the CPU state */
739 current_tb_modified = 1;
740 cpu_restore_state(current_tb, env,
741 env->mem_write_pc, NULL);
742 #if defined(TARGET_I386)
743 current_flags = env->hflags;
744 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
745 current_cs_base = (target_ulong)env->segs[R_CS].base;
746 current_pc = current_cs_base + env->eip;
747 #else
748 #error unsupported CPU
749 #endif
751 #endif /* TARGET_HAS_PRECISE_SMC */
752 /* we need to do that to handle the case where a signal
753 occurs while doing tb_phys_invalidate() */
754 saved_tb = NULL;
755 if (env) {
756 saved_tb = env->current_tb;
757 env->current_tb = NULL;
759 tb_phys_invalidate(tb, -1);
760 if (env) {
761 env->current_tb = saved_tb;
762 if (env->interrupt_request && env->current_tb)
763 cpu_interrupt(env, env->interrupt_request);
766 tb = tb_next;
768 #if !defined(CONFIG_USER_ONLY)
769 /* if no code remaining, no need to continue to use slow writes */
770 if (!p->first_tb) {
771 invalidate_page_bitmap(p);
772 if (is_cpu_write_access) {
773 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
776 #endif
777 #ifdef TARGET_HAS_PRECISE_SMC
778 if (current_tb_modified) {
779 /* we generate a block containing just the instruction
780 modifying the memory. It will ensure that it cannot modify
781 itself */
782 env->current_tb = NULL;
783 tb_gen_code(env, current_pc, current_cs_base, current_flags,
784 CF_SINGLE_INSN);
785 cpu_resume_from_signal(env, NULL);
787 #endif
790 /* len must be <= 8 and start must be a multiple of len */
791 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
793 PageDesc *p;
794 int offset, b;
795 #if 0
796 if (1) {
797 if (loglevel) {
798 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
799 cpu_single_env->mem_write_vaddr, len,
800 cpu_single_env->eip,
801 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
804 #endif
805 p = page_find(start >> TARGET_PAGE_BITS);
806 if (!p)
807 return;
808 if (p->code_bitmap) {
809 offset = start & ~TARGET_PAGE_MASK;
810 b = p->code_bitmap[offset >> 3] >> (offset & 7);
811 if (b & ((1 << len) - 1))
812 goto do_invalidate;
813 } else {
814 do_invalidate:
815 tb_invalidate_phys_page_range(start, start + len, 1);
819 #if !defined(CONFIG_SOFTMMU)
820 static void tb_invalidate_phys_page(target_phys_addr_t addr,
821 unsigned long pc, void *puc)
823 int n, current_flags, current_tb_modified;
824 target_ulong current_pc, current_cs_base;
825 PageDesc *p;
826 TranslationBlock *tb, *current_tb;
827 #ifdef TARGET_HAS_PRECISE_SMC
828 CPUState *env = cpu_single_env;
829 #endif
831 addr &= TARGET_PAGE_MASK;
832 p = page_find(addr >> TARGET_PAGE_BITS);
833 if (!p)
834 return;
835 tb = p->first_tb;
836 current_tb_modified = 0;
837 current_tb = NULL;
838 current_pc = 0; /* avoid warning */
839 current_cs_base = 0; /* avoid warning */
840 current_flags = 0; /* avoid warning */
841 #ifdef TARGET_HAS_PRECISE_SMC
842 if (tb && pc != 0) {
843 current_tb = tb_find_pc(pc);
845 #endif
846 while (tb != NULL) {
847 n = (long)tb & 3;
848 tb = (TranslationBlock *)((long)tb & ~3);
849 #ifdef TARGET_HAS_PRECISE_SMC
850 if (current_tb == tb &&
851 !(current_tb->cflags & CF_SINGLE_INSN)) {
852 /* If we are modifying the current TB, we must stop
853 its execution. We could be more precise by checking
854 that the modification is after the current PC, but it
855 would require a specialized function to partially
856 restore the CPU state */
858 current_tb_modified = 1;
859 cpu_restore_state(current_tb, env, pc, puc);
860 #if defined(TARGET_I386)
861 current_flags = env->hflags;
862 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
863 current_cs_base = (target_ulong)env->segs[R_CS].base;
864 current_pc = current_cs_base + env->eip;
865 #else
866 #error unsupported CPU
867 #endif
869 #endif /* TARGET_HAS_PRECISE_SMC */
870 tb_phys_invalidate(tb, addr);
871 tb = tb->page_next[n];
873 p->first_tb = NULL;
874 #ifdef TARGET_HAS_PRECISE_SMC
875 if (current_tb_modified) {
876 /* we generate a block containing just the instruction
877 modifying the memory. It will ensure that it cannot modify
878 itself */
879 env->current_tb = NULL;
880 tb_gen_code(env, current_pc, current_cs_base, current_flags,
881 CF_SINGLE_INSN);
882 cpu_resume_from_signal(env, puc);
884 #endif
886 #endif
888 /* add the tb in the target page and protect it if necessary */
889 static inline void tb_alloc_page(TranslationBlock *tb,
890 unsigned int n, target_ulong page_addr)
892 PageDesc *p;
893 TranslationBlock *last_first_tb;
895 tb->page_addr[n] = page_addr;
896 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
897 tb->page_next[n] = p->first_tb;
898 last_first_tb = p->first_tb;
899 p->first_tb = (TranslationBlock *)((long)tb | n);
900 invalidate_page_bitmap(p);
902 #if defined(TARGET_HAS_SMC) || 1
904 #if defined(CONFIG_USER_ONLY)
905 if (p->flags & PAGE_WRITE) {
906 target_ulong addr;
907 PageDesc *p2;
908 int prot;
910 /* force the host page as non writable (writes will have a
911 page fault + mprotect overhead) */
912 page_addr &= qemu_host_page_mask;
913 prot = 0;
914 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
915 addr += TARGET_PAGE_SIZE) {
917 p2 = page_find (addr >> TARGET_PAGE_BITS);
918 if (!p2)
919 continue;
920 prot |= p2->flags;
921 p2->flags &= ~PAGE_WRITE;
922 page_get_flags(addr);
924 mprotect(g2h(page_addr), qemu_host_page_size,
925 (prot & PAGE_BITS) & ~PAGE_WRITE);
926 #ifdef DEBUG_TB_INVALIDATE
927 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
928 page_addr);
929 #endif
931 #else
932 /* if some code is already present, then the pages are already
933 protected. So we handle the case where only the first TB is
934 allocated in a physical page */
935 if (!last_first_tb) {
936 tlb_protect_code(page_addr);
938 #endif
940 #endif /* TARGET_HAS_SMC */
943 /* Allocate a new translation block. Flush the translation buffer if
944 too many translation blocks or too much generated code. */
945 TranslationBlock *tb_alloc(target_ulong pc)
947 TranslationBlock *tb;
949 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
950 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
951 return NULL;
952 tb = &tbs[nb_tbs++];
953 tb->pc = pc;
954 tb->cflags = 0;
955 return tb;
958 /* add a new TB and link it to the physical page tables. phys_page2 is
959 (-1) to indicate that only one page contains the TB. */
960 void tb_link_phys(TranslationBlock *tb,
961 target_ulong phys_pc, target_ulong phys_page2)
963 unsigned int h;
964 TranslationBlock **ptb;
966 /* add in the physical hash table */
967 h = tb_phys_hash_func(phys_pc);
968 ptb = &tb_phys_hash[h];
969 tb->phys_hash_next = *ptb;
970 *ptb = tb;
972 /* add in the page list */
973 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
974 if (phys_page2 != -1)
975 tb_alloc_page(tb, 1, phys_page2);
976 else
977 tb->page_addr[1] = -1;
979 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
980 tb->jmp_next[0] = NULL;
981 tb->jmp_next[1] = NULL;
983 /* init original jump addresses */
984 if (tb->tb_next_offset[0] != 0xffff)
985 tb_reset_jump(tb, 0);
986 if (tb->tb_next_offset[1] != 0xffff)
987 tb_reset_jump(tb, 1);
989 #ifdef DEBUG_TB_CHECK
990 tb_page_check();
991 #endif
994 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
995 tb[1].tc_ptr. Return NULL if not found */
996 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
998 int m_min, m_max, m;
999 unsigned long v;
1000 TranslationBlock *tb;
1002 if (nb_tbs <= 0)
1003 return NULL;
1004 if (tc_ptr < (unsigned long)code_gen_buffer ||
1005 tc_ptr >= (unsigned long)code_gen_ptr)
1006 return NULL;
1007 /* binary search (cf Knuth) */
1008 m_min = 0;
1009 m_max = nb_tbs - 1;
1010 while (m_min <= m_max) {
1011 m = (m_min + m_max) >> 1;
1012 tb = &tbs[m];
1013 v = (unsigned long)tb->tc_ptr;
1014 if (v == tc_ptr)
1015 return tb;
1016 else if (tc_ptr < v) {
1017 m_max = m - 1;
1018 } else {
1019 m_min = m + 1;
1022 return &tbs[m_max];
1025 static void tb_reset_jump_recursive(TranslationBlock *tb);
1027 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1029 TranslationBlock *tb1, *tb_next, **ptb;
1030 unsigned int n1;
1032 tb1 = tb->jmp_next[n];
1033 if (tb1 != NULL) {
1034 /* find head of list */
1035 for(;;) {
1036 n1 = (long)tb1 & 3;
1037 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1038 if (n1 == 2)
1039 break;
1040 tb1 = tb1->jmp_next[n1];
1042 /* we are now sure now that tb jumps to tb1 */
1043 tb_next = tb1;
1045 /* remove tb from the jmp_first list */
1046 ptb = &tb_next->jmp_first;
1047 for(;;) {
1048 tb1 = *ptb;
1049 n1 = (long)tb1 & 3;
1050 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1051 if (n1 == n && tb1 == tb)
1052 break;
1053 ptb = &tb1->jmp_next[n1];
1055 *ptb = tb->jmp_next[n];
1056 tb->jmp_next[n] = NULL;
1058 /* suppress the jump to next tb in generated code */
1059 tb_reset_jump(tb, n);
1061 /* suppress jumps in the tb on which we could have jumped */
1062 tb_reset_jump_recursive(tb_next);
1066 static void tb_reset_jump_recursive(TranslationBlock *tb)
1068 tb_reset_jump_recursive2(tb, 0);
1069 tb_reset_jump_recursive2(tb, 1);
1072 #if defined(TARGET_HAS_ICE)
1073 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1075 target_phys_addr_t addr;
1076 target_ulong pd;
1077 ram_addr_t ram_addr;
1078 PhysPageDesc *p;
1080 addr = cpu_get_phys_page_debug(env, pc);
1081 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1082 if (!p) {
1083 pd = IO_MEM_UNASSIGNED;
1084 } else {
1085 pd = p->phys_offset;
1087 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1088 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1090 #endif
1092 /* Add a watchpoint. */
1093 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1095 int i;
1097 for (i = 0; i < env->nb_watchpoints; i++) {
1098 if (addr == env->watchpoint[i].vaddr)
1099 return 0;
1101 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1102 return -1;
1104 i = env->nb_watchpoints++;
1105 env->watchpoint[i].vaddr = addr;
1106 tlb_flush_page(env, addr);
1107 /* FIXME: This flush is needed because of the hack to make memory ops
1108 terminate the TB. It can be removed once the proper IO trap and
1109 re-execute bits are in. */
1110 tb_flush(env);
1111 return i;
1114 /* Remove a watchpoint. */
1115 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1117 int i;
1119 for (i = 0; i < env->nb_watchpoints; i++) {
1120 if (addr == env->watchpoint[i].vaddr) {
1121 env->nb_watchpoints--;
1122 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1123 tlb_flush_page(env, addr);
1124 return 0;
1127 return -1;
1130 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1131 breakpoint is reached */
1132 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1134 #if defined(TARGET_HAS_ICE)
1135 int i;
1137 for(i = 0; i < env->nb_breakpoints; i++) {
1138 if (env->breakpoints[i] == pc)
1139 return 0;
1142 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1143 return -1;
1144 env->breakpoints[env->nb_breakpoints++] = pc;
1146 breakpoint_invalidate(env, pc);
1147 return 0;
1148 #else
1149 return -1;
1150 #endif
1153 /* remove a breakpoint */
1154 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1156 #if defined(TARGET_HAS_ICE)
1157 int i;
1158 for(i = 0; i < env->nb_breakpoints; i++) {
1159 if (env->breakpoints[i] == pc)
1160 goto found;
1162 return -1;
1163 found:
1164 env->nb_breakpoints--;
1165 if (i < env->nb_breakpoints)
1166 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1168 breakpoint_invalidate(env, pc);
1169 return 0;
1170 #else
1171 return -1;
1172 #endif
1175 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1176 CPU loop after each instruction */
1177 void cpu_single_step(CPUState *env, int enabled)
1179 #if defined(TARGET_HAS_ICE)
1180 if (env->singlestep_enabled != enabled) {
1181 env->singlestep_enabled = enabled;
1182 /* must flush all the translated code to avoid inconsistancies */
1183 /* XXX: only flush what is necessary */
1184 tb_flush(env);
1186 #endif
1189 /* enable or disable low levels log */
1190 void cpu_set_log(int log_flags)
1192 loglevel = log_flags;
1193 if (loglevel && !logfile) {
1194 logfile = fopen(logfilename, log_append ? "a" : "w");
1195 if (!logfile) {
1196 perror(logfilename);
1197 _exit(1);
1199 #if !defined(CONFIG_SOFTMMU)
1200 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1202 static uint8_t logfile_buf[4096];
1203 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1205 #else
1206 setvbuf(logfile, NULL, _IOLBF, 0);
1207 #endif
1208 log_append = 1;
1210 if (!loglevel && logfile) {
1211 fclose(logfile);
1212 logfile = NULL;
1216 void cpu_set_log_filename(const char *filename)
1218 logfilename = strdup(filename);
1219 if (logfile) {
1220 fclose(logfile);
1221 logfile = NULL;
1223 cpu_set_log(loglevel);
1226 /* mask must never be zero, except for A20 change call */
1227 void cpu_interrupt(CPUState *env, int mask)
1229 TranslationBlock *tb;
1230 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1232 env->interrupt_request |= mask;
1233 /* if the cpu is currently executing code, we must unlink it and
1234 all the potentially executing TB */
1235 tb = env->current_tb;
1236 if (tb && !testandset(&interrupt_lock)) {
1237 env->current_tb = NULL;
1238 tb_reset_jump_recursive(tb);
1239 resetlock(&interrupt_lock);
1243 void cpu_reset_interrupt(CPUState *env, int mask)
1245 env->interrupt_request &= ~mask;
1248 CPULogItem cpu_log_items[] = {
1249 { CPU_LOG_TB_OUT_ASM, "out_asm",
1250 "show generated host assembly code for each compiled TB" },
1251 { CPU_LOG_TB_IN_ASM, "in_asm",
1252 "show target assembly code for each compiled TB" },
1253 { CPU_LOG_TB_OP, "op",
1254 "show micro ops for each compiled TB" },
1255 { CPU_LOG_TB_OP_OPT, "op_opt",
1256 "show micro ops "
1257 #ifdef TARGET_I386
1258 "before eflags optimization and "
1259 #endif
1260 "after liveness analysis" },
1261 { CPU_LOG_INT, "int",
1262 "show interrupts/exceptions in short format" },
1263 { CPU_LOG_EXEC, "exec",
1264 "show trace before each executed TB (lots of logs)" },
1265 { CPU_LOG_TB_CPU, "cpu",
1266 "show CPU state before block translation" },
1267 #ifdef TARGET_I386
1268 { CPU_LOG_PCALL, "pcall",
1269 "show protected mode far calls/returns/exceptions" },
1270 #endif
1271 #ifdef DEBUG_IOPORT
1272 { CPU_LOG_IOPORT, "ioport",
1273 "show all i/o ports accesses" },
1274 #endif
1275 { 0, NULL, NULL },
1278 static int cmp1(const char *s1, int n, const char *s2)
1280 if (strlen(s2) != n)
1281 return 0;
1282 return memcmp(s1, s2, n) == 0;
1285 /* takes a comma separated list of log masks. Return 0 if error. */
1286 int cpu_str_to_log_mask(const char *str)
1288 CPULogItem *item;
1289 int mask;
1290 const char *p, *p1;
1292 p = str;
1293 mask = 0;
1294 for(;;) {
1295 p1 = strchr(p, ',');
1296 if (!p1)
1297 p1 = p + strlen(p);
1298 if(cmp1(p,p1-p,"all")) {
1299 for(item = cpu_log_items; item->mask != 0; item++) {
1300 mask |= item->mask;
1302 } else {
1303 for(item = cpu_log_items; item->mask != 0; item++) {
1304 if (cmp1(p, p1 - p, item->name))
1305 goto found;
1307 return 0;
1309 found:
1310 mask |= item->mask;
1311 if (*p1 != ',')
1312 break;
1313 p = p1 + 1;
1315 return mask;
1318 void cpu_abort(CPUState *env, const char *fmt, ...)
1320 va_list ap;
1321 va_list ap2;
1323 va_start(ap, fmt);
1324 va_copy(ap2, ap);
1325 fprintf(stderr, "qemu: fatal: ");
1326 vfprintf(stderr, fmt, ap);
1327 fprintf(stderr, "\n");
1328 #ifdef TARGET_I386
1329 if(env->intercept & INTERCEPT_SVM_MASK) {
1330 /* most probably the virtual machine should not
1331 be shut down but rather caught by the VMM */
1332 vmexit(SVM_EXIT_SHUTDOWN, 0);
1334 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1335 #else
1336 cpu_dump_state(env, stderr, fprintf, 0);
1337 #endif
1338 if (logfile) {
1339 fprintf(logfile, "qemu: fatal: ");
1340 vfprintf(logfile, fmt, ap2);
1341 fprintf(logfile, "\n");
1342 #ifdef TARGET_I386
1343 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1344 #else
1345 cpu_dump_state(env, logfile, fprintf, 0);
1346 #endif
1347 fflush(logfile);
1348 fclose(logfile);
1350 va_end(ap2);
1351 va_end(ap);
1352 abort();
1355 CPUState *cpu_copy(CPUState *env)
1357 CPUState *new_env = cpu_init(env->cpu_model_str);
1358 /* preserve chaining and index */
1359 CPUState *next_cpu = new_env->next_cpu;
1360 int cpu_index = new_env->cpu_index;
1361 memcpy(new_env, env, sizeof(CPUState));
1362 new_env->next_cpu = next_cpu;
1363 new_env->cpu_index = cpu_index;
1364 return new_env;
1367 #if !defined(CONFIG_USER_ONLY)
1369 /* NOTE: if flush_global is true, also flush global entries (not
1370 implemented yet) */
1371 void tlb_flush(CPUState *env, int flush_global)
1373 int i;
1375 #if defined(DEBUG_TLB)
1376 printf("tlb_flush:\n");
1377 #endif
1378 /* must reset current TB so that interrupts cannot modify the
1379 links while we are modifying them */
1380 env->current_tb = NULL;
1382 for(i = 0; i < CPU_TLB_SIZE; i++) {
1383 env->tlb_table[0][i].addr_read = -1;
1384 env->tlb_table[0][i].addr_write = -1;
1385 env->tlb_table[0][i].addr_code = -1;
1386 env->tlb_table[1][i].addr_read = -1;
1387 env->tlb_table[1][i].addr_write = -1;
1388 env->tlb_table[1][i].addr_code = -1;
1389 #if (NB_MMU_MODES >= 3)
1390 env->tlb_table[2][i].addr_read = -1;
1391 env->tlb_table[2][i].addr_write = -1;
1392 env->tlb_table[2][i].addr_code = -1;
1393 #if (NB_MMU_MODES == 4)
1394 env->tlb_table[3][i].addr_read = -1;
1395 env->tlb_table[3][i].addr_write = -1;
1396 env->tlb_table[3][i].addr_code = -1;
1397 #endif
1398 #endif
1401 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1403 #if !defined(CONFIG_SOFTMMU)
1404 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1405 #endif
1406 #ifdef USE_KQEMU
1407 if (env->kqemu_enabled) {
1408 kqemu_flush(env, flush_global);
1410 #endif
1411 tlb_flush_count++;
1414 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1416 if (addr == (tlb_entry->addr_read &
1417 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1418 addr == (tlb_entry->addr_write &
1419 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1420 addr == (tlb_entry->addr_code &
1421 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1422 tlb_entry->addr_read = -1;
1423 tlb_entry->addr_write = -1;
1424 tlb_entry->addr_code = -1;
1428 void tlb_flush_page(CPUState *env, target_ulong addr)
1430 int i;
1431 TranslationBlock *tb;
1433 #if defined(DEBUG_TLB)
1434 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1435 #endif
1436 /* must reset current TB so that interrupts cannot modify the
1437 links while we are modifying them */
1438 env->current_tb = NULL;
1440 addr &= TARGET_PAGE_MASK;
1441 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1442 tlb_flush_entry(&env->tlb_table[0][i], addr);
1443 tlb_flush_entry(&env->tlb_table[1][i], addr);
1444 #if (NB_MMU_MODES >= 3)
1445 tlb_flush_entry(&env->tlb_table[2][i], addr);
1446 #if (NB_MMU_MODES == 4)
1447 tlb_flush_entry(&env->tlb_table[3][i], addr);
1448 #endif
1449 #endif
1451 /* Discard jump cache entries for any tb which might potentially
1452 overlap the flushed page. */
1453 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1454 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1456 i = tb_jmp_cache_hash_page(addr);
1457 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1459 #if !defined(CONFIG_SOFTMMU)
1460 if (addr < MMAP_AREA_END)
1461 munmap((void *)addr, TARGET_PAGE_SIZE);
1462 #endif
1463 #ifdef USE_KQEMU
1464 if (env->kqemu_enabled) {
1465 kqemu_flush_page(env, addr);
1467 #endif
1470 /* update the TLBs so that writes to code in the virtual page 'addr'
1471 can be detected */
1472 static void tlb_protect_code(ram_addr_t ram_addr)
1474 cpu_physical_memory_reset_dirty(ram_addr,
1475 ram_addr + TARGET_PAGE_SIZE,
1476 CODE_DIRTY_FLAG);
1479 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1480 tested for self modifying code */
1481 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1482 target_ulong vaddr)
1484 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1487 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1488 unsigned long start, unsigned long length)
1490 unsigned long addr;
1491 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1492 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1493 if ((addr - start) < length) {
1494 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1499 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1500 int dirty_flags)
1502 CPUState *env;
1503 unsigned long length, start1;
1504 int i, mask, len;
1505 uint8_t *p;
1507 start &= TARGET_PAGE_MASK;
1508 end = TARGET_PAGE_ALIGN(end);
1510 length = end - start;
1511 if (length == 0)
1512 return;
1513 len = length >> TARGET_PAGE_BITS;
1514 #ifdef USE_KQEMU
1515 /* XXX: should not depend on cpu context */
1516 env = first_cpu;
1517 if (env->kqemu_enabled) {
1518 ram_addr_t addr;
1519 addr = start;
1520 for(i = 0; i < len; i++) {
1521 kqemu_set_notdirty(env, addr);
1522 addr += TARGET_PAGE_SIZE;
1525 #endif
1526 mask = ~dirty_flags;
1527 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1528 for(i = 0; i < len; i++)
1529 p[i] &= mask;
1531 /* we modify the TLB cache so that the dirty bit will be set again
1532 when accessing the range */
1533 start1 = start + (unsigned long)phys_ram_base;
1534 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1535 for(i = 0; i < CPU_TLB_SIZE; i++)
1536 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1537 for(i = 0; i < CPU_TLB_SIZE; i++)
1538 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1539 #if (NB_MMU_MODES >= 3)
1540 for(i = 0; i < CPU_TLB_SIZE; i++)
1541 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1542 #if (NB_MMU_MODES == 4)
1543 for(i = 0; i < CPU_TLB_SIZE; i++)
1544 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1545 #endif
1546 #endif
1549 #if !defined(CONFIG_SOFTMMU)
1550 /* XXX: this is expensive */
1552 VirtPageDesc *p;
1553 int j;
1554 target_ulong addr;
1556 for(i = 0; i < L1_SIZE; i++) {
1557 p = l1_virt_map[i];
1558 if (p) {
1559 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1560 for(j = 0; j < L2_SIZE; j++) {
1561 if (p->valid_tag == virt_valid_tag &&
1562 p->phys_addr >= start && p->phys_addr < end &&
1563 (p->prot & PROT_WRITE)) {
1564 if (addr < MMAP_AREA_END) {
1565 mprotect((void *)addr, TARGET_PAGE_SIZE,
1566 p->prot & ~PROT_WRITE);
1569 addr += TARGET_PAGE_SIZE;
1570 p++;
1575 #endif
1578 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1580 ram_addr_t ram_addr;
1582 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1583 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1584 tlb_entry->addend - (unsigned long)phys_ram_base;
1585 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1586 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1591 /* update the TLB according to the current state of the dirty bits */
1592 void cpu_tlb_update_dirty(CPUState *env)
1594 int i;
1595 for(i = 0; i < CPU_TLB_SIZE; i++)
1596 tlb_update_dirty(&env->tlb_table[0][i]);
1597 for(i = 0; i < CPU_TLB_SIZE; i++)
1598 tlb_update_dirty(&env->tlb_table[1][i]);
1599 #if (NB_MMU_MODES >= 3)
1600 for(i = 0; i < CPU_TLB_SIZE; i++)
1601 tlb_update_dirty(&env->tlb_table[2][i]);
1602 #if (NB_MMU_MODES == 4)
1603 for(i = 0; i < CPU_TLB_SIZE; i++)
1604 tlb_update_dirty(&env->tlb_table[3][i]);
1605 #endif
1606 #endif
1609 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1610 unsigned long start)
1612 unsigned long addr;
1613 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1614 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1615 if (addr == start) {
1616 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1621 /* update the TLB corresponding to virtual page vaddr and phys addr
1622 addr so that it is no longer dirty */
1623 static inline void tlb_set_dirty(CPUState *env,
1624 unsigned long addr, target_ulong vaddr)
1626 int i;
1628 addr &= TARGET_PAGE_MASK;
1629 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1630 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1631 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1632 #if (NB_MMU_MODES >= 3)
1633 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1634 #if (NB_MMU_MODES == 4)
1635 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1636 #endif
1637 #endif
1640 /* add a new TLB entry. At most one entry for a given virtual address
1641 is permitted. Return 0 if OK or 2 if the page could not be mapped
1642 (can only happen in non SOFTMMU mode for I/O pages or pages
1643 conflicting with the host address space). */
1644 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1645 target_phys_addr_t paddr, int prot,
1646 int mmu_idx, int is_softmmu)
1648 PhysPageDesc *p;
1649 unsigned long pd;
1650 unsigned int index;
1651 target_ulong address;
1652 target_phys_addr_t addend;
1653 int ret;
1654 CPUTLBEntry *te;
1655 int i;
1657 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1658 if (!p) {
1659 pd = IO_MEM_UNASSIGNED;
1660 } else {
1661 pd = p->phys_offset;
1663 #if defined(DEBUG_TLB)
1664 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1665 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1666 #endif
1668 ret = 0;
1669 #if !defined(CONFIG_SOFTMMU)
1670 if (is_softmmu)
1671 #endif
1673 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1674 /* IO memory case */
1675 address = vaddr | pd;
1676 addend = paddr;
1677 } else {
1678 /* standard memory */
1679 address = vaddr;
1680 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1683 /* Make accesses to pages with watchpoints go via the
1684 watchpoint trap routines. */
1685 for (i = 0; i < env->nb_watchpoints; i++) {
1686 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1687 if (address & ~TARGET_PAGE_MASK) {
1688 env->watchpoint[i].addend = 0;
1689 address = vaddr | io_mem_watch;
1690 } else {
1691 env->watchpoint[i].addend = pd - paddr +
1692 (unsigned long) phys_ram_base;
1693 /* TODO: Figure out how to make read watchpoints coexist
1694 with code. */
1695 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1700 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1701 addend -= vaddr;
1702 te = &env->tlb_table[mmu_idx][index];
1703 te->addend = addend;
1704 if (prot & PAGE_READ) {
1705 te->addr_read = address;
1706 } else {
1707 te->addr_read = -1;
1709 if (prot & PAGE_EXEC) {
1710 te->addr_code = address;
1711 } else {
1712 te->addr_code = -1;
1714 if (prot & PAGE_WRITE) {
1715 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1716 (pd & IO_MEM_ROMD)) {
1717 /* write access calls the I/O callback */
1718 te->addr_write = vaddr |
1719 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1720 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1721 !cpu_physical_memory_is_dirty(pd)) {
1722 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1723 } else {
1724 te->addr_write = address;
1726 } else {
1727 te->addr_write = -1;
1730 #if !defined(CONFIG_SOFTMMU)
1731 else {
1732 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1733 /* IO access: no mapping is done as it will be handled by the
1734 soft MMU */
1735 if (!(env->hflags & HF_SOFTMMU_MASK))
1736 ret = 2;
1737 } else {
1738 void *map_addr;
1740 if (vaddr >= MMAP_AREA_END) {
1741 ret = 2;
1742 } else {
1743 if (prot & PROT_WRITE) {
1744 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1745 #if defined(TARGET_HAS_SMC) || 1
1746 first_tb ||
1747 #endif
1748 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1749 !cpu_physical_memory_is_dirty(pd))) {
1750 /* ROM: we do as if code was inside */
1751 /* if code is present, we only map as read only and save the
1752 original mapping */
1753 VirtPageDesc *vp;
1755 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1756 vp->phys_addr = pd;
1757 vp->prot = prot;
1758 vp->valid_tag = virt_valid_tag;
1759 prot &= ~PAGE_WRITE;
1762 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1763 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1764 if (map_addr == MAP_FAILED) {
1765 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1766 paddr, vaddr);
1771 #endif
1772 return ret;
1775 /* called from signal handler: invalidate the code and unprotect the
1776 page. Return TRUE if the fault was succesfully handled. */
1777 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1779 #if !defined(CONFIG_SOFTMMU)
1780 VirtPageDesc *vp;
1782 #if defined(DEBUG_TLB)
1783 printf("page_unprotect: addr=0x%08x\n", addr);
1784 #endif
1785 addr &= TARGET_PAGE_MASK;
1787 /* if it is not mapped, no need to worry here */
1788 if (addr >= MMAP_AREA_END)
1789 return 0;
1790 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1791 if (!vp)
1792 return 0;
1793 /* NOTE: in this case, validate_tag is _not_ tested as it
1794 validates only the code TLB */
1795 if (vp->valid_tag != virt_valid_tag)
1796 return 0;
1797 if (!(vp->prot & PAGE_WRITE))
1798 return 0;
1799 #if defined(DEBUG_TLB)
1800 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1801 addr, vp->phys_addr, vp->prot);
1802 #endif
1803 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1804 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1805 (unsigned long)addr, vp->prot);
1806 /* set the dirty bit */
1807 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1808 /* flush the code inside */
1809 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1810 return 1;
1811 #else
1812 return 0;
1813 #endif
1816 #else
1818 void tlb_flush(CPUState *env, int flush_global)
1822 void tlb_flush_page(CPUState *env, target_ulong addr)
1826 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1827 target_phys_addr_t paddr, int prot,
1828 int mmu_idx, int is_softmmu)
1830 return 0;
1833 /* dump memory mappings */
1834 void page_dump(FILE *f)
1836 unsigned long start, end;
1837 int i, j, prot, prot1;
1838 PageDesc *p;
1840 fprintf(f, "%-8s %-8s %-8s %s\n",
1841 "start", "end", "size", "prot");
1842 start = -1;
1843 end = -1;
1844 prot = 0;
1845 for(i = 0; i <= L1_SIZE; i++) {
1846 if (i < L1_SIZE)
1847 p = l1_map[i];
1848 else
1849 p = NULL;
1850 for(j = 0;j < L2_SIZE; j++) {
1851 if (!p)
1852 prot1 = 0;
1853 else
1854 prot1 = p[j].flags;
1855 if (prot1 != prot) {
1856 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1857 if (start != -1) {
1858 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1859 start, end, end - start,
1860 prot & PAGE_READ ? 'r' : '-',
1861 prot & PAGE_WRITE ? 'w' : '-',
1862 prot & PAGE_EXEC ? 'x' : '-');
1864 if (prot1 != 0)
1865 start = end;
1866 else
1867 start = -1;
1868 prot = prot1;
1870 if (!p)
1871 break;
1876 int page_get_flags(target_ulong address)
1878 PageDesc *p;
1880 p = page_find(address >> TARGET_PAGE_BITS);
1881 if (!p)
1882 return 0;
1883 return p->flags;
1886 /* modify the flags of a page and invalidate the code if
1887 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1888 depending on PAGE_WRITE */
1889 void page_set_flags(target_ulong start, target_ulong end, int flags)
1891 PageDesc *p;
1892 target_ulong addr;
1894 start = start & TARGET_PAGE_MASK;
1895 end = TARGET_PAGE_ALIGN(end);
1896 if (flags & PAGE_WRITE)
1897 flags |= PAGE_WRITE_ORG;
1898 spin_lock(&tb_lock);
1899 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1900 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1901 /* if the write protection is set, then we invalidate the code
1902 inside */
1903 if (!(p->flags & PAGE_WRITE) &&
1904 (flags & PAGE_WRITE) &&
1905 p->first_tb) {
1906 tb_invalidate_phys_page(addr, 0, NULL);
1908 p->flags = flags;
1910 spin_unlock(&tb_lock);
1913 int page_check_range(target_ulong start, target_ulong len, int flags)
1915 PageDesc *p;
1916 target_ulong end;
1917 target_ulong addr;
1919 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1920 start = start & TARGET_PAGE_MASK;
1922 if( end < start )
1923 /* we've wrapped around */
1924 return -1;
1925 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1926 p = page_find(addr >> TARGET_PAGE_BITS);
1927 if( !p )
1928 return -1;
1929 if( !(p->flags & PAGE_VALID) )
1930 return -1;
1932 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1933 return -1;
1934 if (flags & PAGE_WRITE) {
1935 if (!(p->flags & PAGE_WRITE_ORG))
1936 return -1;
1937 /* unprotect the page if it was put read-only because it
1938 contains translated code */
1939 if (!(p->flags & PAGE_WRITE)) {
1940 if (!page_unprotect(addr, 0, NULL))
1941 return -1;
1943 return 0;
1946 return 0;
1949 /* called from signal handler: invalidate the code and unprotect the
1950 page. Return TRUE if the fault was succesfully handled. */
1951 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1953 unsigned int page_index, prot, pindex;
1954 PageDesc *p, *p1;
1955 target_ulong host_start, host_end, addr;
1957 host_start = address & qemu_host_page_mask;
1958 page_index = host_start >> TARGET_PAGE_BITS;
1959 p1 = page_find(page_index);
1960 if (!p1)
1961 return 0;
1962 host_end = host_start + qemu_host_page_size;
1963 p = p1;
1964 prot = 0;
1965 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1966 prot |= p->flags;
1967 p++;
1969 /* if the page was really writable, then we change its
1970 protection back to writable */
1971 if (prot & PAGE_WRITE_ORG) {
1972 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1973 if (!(p1[pindex].flags & PAGE_WRITE)) {
1974 mprotect((void *)g2h(host_start), qemu_host_page_size,
1975 (prot & PAGE_BITS) | PAGE_WRITE);
1976 p1[pindex].flags |= PAGE_WRITE;
1977 /* and since the content will be modified, we must invalidate
1978 the corresponding translated code. */
1979 tb_invalidate_phys_page(address, pc, puc);
1980 #ifdef DEBUG_TB_CHECK
1981 tb_invalidate_check(address);
1982 #endif
1983 return 1;
1986 return 0;
1989 static inline void tlb_set_dirty(CPUState *env,
1990 unsigned long addr, target_ulong vaddr)
1993 #endif /* defined(CONFIG_USER_ONLY) */
1995 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1996 ram_addr_t memory);
1997 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
1998 ram_addr_t orig_memory);
1999 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2000 need_subpage) \
2001 do { \
2002 if (addr > start_addr) \
2003 start_addr2 = 0; \
2004 else { \
2005 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2006 if (start_addr2 > 0) \
2007 need_subpage = 1; \
2010 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2011 end_addr2 = TARGET_PAGE_SIZE - 1; \
2012 else { \
2013 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2014 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2015 need_subpage = 1; \
2017 } while (0)
2019 /* register physical memory. 'size' must be a multiple of the target
2020 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2021 io memory page */
2022 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2023 ram_addr_t size,
2024 ram_addr_t phys_offset)
2026 target_phys_addr_t addr, end_addr;
2027 PhysPageDesc *p;
2028 CPUState *env;
2029 ram_addr_t orig_size = size;
2030 void *subpage;
2032 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2033 end_addr = start_addr + (target_phys_addr_t)size;
2034 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2035 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2036 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2037 ram_addr_t orig_memory = p->phys_offset;
2038 target_phys_addr_t start_addr2, end_addr2;
2039 int need_subpage = 0;
2041 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2042 need_subpage);
2043 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2044 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2045 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2046 &p->phys_offset, orig_memory);
2047 } else {
2048 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2049 >> IO_MEM_SHIFT];
2051 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2052 } else {
2053 p->phys_offset = phys_offset;
2054 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2055 (phys_offset & IO_MEM_ROMD))
2056 phys_offset += TARGET_PAGE_SIZE;
2058 } else {
2059 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2060 p->phys_offset = phys_offset;
2061 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2062 (phys_offset & IO_MEM_ROMD))
2063 phys_offset += TARGET_PAGE_SIZE;
2064 else {
2065 target_phys_addr_t start_addr2, end_addr2;
2066 int need_subpage = 0;
2068 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2069 end_addr2, need_subpage);
2071 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2072 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2073 &p->phys_offset, IO_MEM_UNASSIGNED);
2074 subpage_register(subpage, start_addr2, end_addr2,
2075 phys_offset);
2081 /* since each CPU stores ram addresses in its TLB cache, we must
2082 reset the modified entries */
2083 /* XXX: slow ! */
2084 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2085 tlb_flush(env, 1);
2089 /* XXX: temporary until new memory mapping API */
2090 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2092 PhysPageDesc *p;
2094 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2095 if (!p)
2096 return IO_MEM_UNASSIGNED;
2097 return p->phys_offset;
2100 /* XXX: better than nothing */
2101 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2103 ram_addr_t addr;
2104 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2105 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
2106 size, phys_ram_size);
2107 abort();
2109 addr = phys_ram_alloc_offset;
2110 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2111 return addr;
2114 void qemu_ram_free(ram_addr_t addr)
2118 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2120 #ifdef DEBUG_UNASSIGNED
2121 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2122 #endif
2123 #ifdef TARGET_SPARC
2124 do_unassigned_access(addr, 0, 0, 0);
2125 #elif TARGET_CRIS
2126 do_unassigned_access(addr, 0, 0, 0);
2127 #endif
2128 return 0;
2131 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2133 #ifdef DEBUG_UNASSIGNED
2134 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2135 #endif
2136 #ifdef TARGET_SPARC
2137 do_unassigned_access(addr, 1, 0, 0);
2138 #elif TARGET_CRIS
2139 do_unassigned_access(addr, 1, 0, 0);
2140 #endif
2143 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2144 unassigned_mem_readb,
2145 unassigned_mem_readb,
2146 unassigned_mem_readb,
2149 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2150 unassigned_mem_writeb,
2151 unassigned_mem_writeb,
2152 unassigned_mem_writeb,
2155 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2157 unsigned long ram_addr;
2158 int dirty_flags;
2159 ram_addr = addr - (unsigned long)phys_ram_base;
2160 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2161 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2162 #if !defined(CONFIG_USER_ONLY)
2163 tb_invalidate_phys_page_fast(ram_addr, 1);
2164 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2165 #endif
2167 stb_p((uint8_t *)(long)addr, val);
2168 #ifdef USE_KQEMU
2169 if (cpu_single_env->kqemu_enabled &&
2170 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2171 kqemu_modify_page(cpu_single_env, ram_addr);
2172 #endif
2173 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2174 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2175 /* we remove the notdirty callback only if the code has been
2176 flushed */
2177 if (dirty_flags == 0xff)
2178 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2181 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2183 unsigned long ram_addr;
2184 int dirty_flags;
2185 ram_addr = addr - (unsigned long)phys_ram_base;
2186 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2187 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2188 #if !defined(CONFIG_USER_ONLY)
2189 tb_invalidate_phys_page_fast(ram_addr, 2);
2190 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2191 #endif
2193 stw_p((uint8_t *)(long)addr, val);
2194 #ifdef USE_KQEMU
2195 if (cpu_single_env->kqemu_enabled &&
2196 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2197 kqemu_modify_page(cpu_single_env, ram_addr);
2198 #endif
2199 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2200 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2201 /* we remove the notdirty callback only if the code has been
2202 flushed */
2203 if (dirty_flags == 0xff)
2204 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2207 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2209 unsigned long ram_addr;
2210 int dirty_flags;
2211 ram_addr = addr - (unsigned long)phys_ram_base;
2212 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2213 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2214 #if !defined(CONFIG_USER_ONLY)
2215 tb_invalidate_phys_page_fast(ram_addr, 4);
2216 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2217 #endif
2219 stl_p((uint8_t *)(long)addr, val);
2220 #ifdef USE_KQEMU
2221 if (cpu_single_env->kqemu_enabled &&
2222 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2223 kqemu_modify_page(cpu_single_env, ram_addr);
2224 #endif
2225 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2226 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2227 /* we remove the notdirty callback only if the code has been
2228 flushed */
2229 if (dirty_flags == 0xff)
2230 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2233 static CPUReadMemoryFunc *error_mem_read[3] = {
2234 NULL, /* never used */
2235 NULL, /* never used */
2236 NULL, /* never used */
2239 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2240 notdirty_mem_writeb,
2241 notdirty_mem_writew,
2242 notdirty_mem_writel,
2245 #if defined(CONFIG_SOFTMMU)
2246 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2247 so these check for a hit then pass through to the normal out-of-line
2248 phys routines. */
2249 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2251 return ldub_phys(addr);
2254 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2256 return lduw_phys(addr);
2259 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2261 return ldl_phys(addr);
2264 /* Generate a debug exception if a watchpoint has been hit.
2265 Returns the real physical address of the access. addr will be a host
2266 address in case of a RAM location. */
2267 static target_ulong check_watchpoint(target_phys_addr_t addr)
2269 CPUState *env = cpu_single_env;
2270 target_ulong watch;
2271 target_ulong retaddr;
2272 int i;
2274 retaddr = addr;
2275 for (i = 0; i < env->nb_watchpoints; i++) {
2276 watch = env->watchpoint[i].vaddr;
2277 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2278 retaddr = addr - env->watchpoint[i].addend;
2279 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2280 cpu_single_env->watchpoint_hit = i + 1;
2281 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2282 break;
2286 return retaddr;
2289 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2290 uint32_t val)
2292 addr = check_watchpoint(addr);
2293 stb_phys(addr, val);
2296 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2297 uint32_t val)
2299 addr = check_watchpoint(addr);
2300 stw_phys(addr, val);
2303 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2304 uint32_t val)
2306 addr = check_watchpoint(addr);
2307 stl_phys(addr, val);
2310 static CPUReadMemoryFunc *watch_mem_read[3] = {
2311 watch_mem_readb,
2312 watch_mem_readw,
2313 watch_mem_readl,
2316 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2317 watch_mem_writeb,
2318 watch_mem_writew,
2319 watch_mem_writel,
2321 #endif
2323 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2324 unsigned int len)
2326 uint32_t ret;
2327 unsigned int idx;
2329 idx = SUBPAGE_IDX(addr - mmio->base);
2330 #if defined(DEBUG_SUBPAGE)
2331 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2332 mmio, len, addr, idx);
2333 #endif
2334 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2336 return ret;
2339 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2340 uint32_t value, unsigned int len)
2342 unsigned int idx;
2344 idx = SUBPAGE_IDX(addr - mmio->base);
2345 #if defined(DEBUG_SUBPAGE)
2346 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2347 mmio, len, addr, idx, value);
2348 #endif
2349 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2352 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2354 #if defined(DEBUG_SUBPAGE)
2355 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2356 #endif
2358 return subpage_readlen(opaque, addr, 0);
2361 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2362 uint32_t value)
2364 #if defined(DEBUG_SUBPAGE)
2365 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2366 #endif
2367 subpage_writelen(opaque, addr, value, 0);
2370 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2372 #if defined(DEBUG_SUBPAGE)
2373 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2374 #endif
2376 return subpage_readlen(opaque, addr, 1);
2379 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2380 uint32_t value)
2382 #if defined(DEBUG_SUBPAGE)
2383 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2384 #endif
2385 subpage_writelen(opaque, addr, value, 1);
2388 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2390 #if defined(DEBUG_SUBPAGE)
2391 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2392 #endif
2394 return subpage_readlen(opaque, addr, 2);
2397 static void subpage_writel (void *opaque,
2398 target_phys_addr_t addr, uint32_t value)
2400 #if defined(DEBUG_SUBPAGE)
2401 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2402 #endif
2403 subpage_writelen(opaque, addr, value, 2);
2406 static CPUReadMemoryFunc *subpage_read[] = {
2407 &subpage_readb,
2408 &subpage_readw,
2409 &subpage_readl,
2412 static CPUWriteMemoryFunc *subpage_write[] = {
2413 &subpage_writeb,
2414 &subpage_writew,
2415 &subpage_writel,
2418 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2419 ram_addr_t memory)
2421 int idx, eidx;
2422 unsigned int i;
2424 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2425 return -1;
2426 idx = SUBPAGE_IDX(start);
2427 eidx = SUBPAGE_IDX(end);
2428 #if defined(DEBUG_SUBPAGE)
2429 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2430 mmio, start, end, idx, eidx, memory);
2431 #endif
2432 memory >>= IO_MEM_SHIFT;
2433 for (; idx <= eidx; idx++) {
2434 for (i = 0; i < 4; i++) {
2435 if (io_mem_read[memory][i]) {
2436 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2437 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2439 if (io_mem_write[memory][i]) {
2440 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2441 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2446 return 0;
2449 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2450 ram_addr_t orig_memory)
2452 subpage_t *mmio;
2453 int subpage_memory;
2455 mmio = qemu_mallocz(sizeof(subpage_t));
2456 if (mmio != NULL) {
2457 mmio->base = base;
2458 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2459 #if defined(DEBUG_SUBPAGE)
2460 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2461 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2462 #endif
2463 *phys = subpage_memory | IO_MEM_SUBPAGE;
2464 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2467 return mmio;
2470 static void io_mem_init(void)
2472 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2473 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2474 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2475 io_mem_nb = 5;
2477 #if defined(CONFIG_SOFTMMU)
2478 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2479 watch_mem_write, NULL);
2480 #endif
2481 /* alloc dirty bits array */
2482 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2483 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2486 /* mem_read and mem_write are arrays of functions containing the
2487 function to access byte (index 0), word (index 1) and dword (index
2488 2). Functions can be omitted with a NULL function pointer. The
2489 registered functions may be modified dynamically later.
2490 If io_index is non zero, the corresponding io zone is
2491 modified. If it is zero, a new io zone is allocated. The return
2492 value can be used with cpu_register_physical_memory(). (-1) is
2493 returned if error. */
2494 int cpu_register_io_memory(int io_index,
2495 CPUReadMemoryFunc **mem_read,
2496 CPUWriteMemoryFunc **mem_write,
2497 void *opaque)
2499 int i, subwidth = 0;
2501 if (io_index <= 0) {
2502 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2503 return -1;
2504 io_index = io_mem_nb++;
2505 } else {
2506 if (io_index >= IO_MEM_NB_ENTRIES)
2507 return -1;
2510 for(i = 0;i < 3; i++) {
2511 if (!mem_read[i] || !mem_write[i])
2512 subwidth = IO_MEM_SUBWIDTH;
2513 io_mem_read[io_index][i] = mem_read[i];
2514 io_mem_write[io_index][i] = mem_write[i];
2516 io_mem_opaque[io_index] = opaque;
2517 return (io_index << IO_MEM_SHIFT) | subwidth;
2520 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2522 return io_mem_write[io_index >> IO_MEM_SHIFT];
2525 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2527 return io_mem_read[io_index >> IO_MEM_SHIFT];
2530 /* physical memory access (slow version, mainly for debug) */
2531 #if defined(CONFIG_USER_ONLY)
2532 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2533 int len, int is_write)
2535 int l, flags;
2536 target_ulong page;
2537 void * p;
2539 while (len > 0) {
2540 page = addr & TARGET_PAGE_MASK;
2541 l = (page + TARGET_PAGE_SIZE) - addr;
2542 if (l > len)
2543 l = len;
2544 flags = page_get_flags(page);
2545 if (!(flags & PAGE_VALID))
2546 return;
2547 if (is_write) {
2548 if (!(flags & PAGE_WRITE))
2549 return;
2550 /* XXX: this code should not depend on lock_user */
2551 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2552 /* FIXME - should this return an error rather than just fail? */
2553 return;
2554 memcpy(p, buf, l);
2555 unlock_user(p, addr, l);
2556 } else {
2557 if (!(flags & PAGE_READ))
2558 return;
2559 /* XXX: this code should not depend on lock_user */
2560 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2561 /* FIXME - should this return an error rather than just fail? */
2562 return;
2563 memcpy(buf, p, l);
2564 unlock_user(p, addr, 0);
2566 len -= l;
2567 buf += l;
2568 addr += l;
2572 #else
2573 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2574 int len, int is_write)
2576 int l, io_index;
2577 uint8_t *ptr;
2578 uint32_t val;
2579 target_phys_addr_t page;
2580 unsigned long pd;
2581 PhysPageDesc *p;
2583 while (len > 0) {
2584 page = addr & TARGET_PAGE_MASK;
2585 l = (page + TARGET_PAGE_SIZE) - addr;
2586 if (l > len)
2587 l = len;
2588 p = phys_page_find(page >> TARGET_PAGE_BITS);
2589 if (!p) {
2590 pd = IO_MEM_UNASSIGNED;
2591 } else {
2592 pd = p->phys_offset;
2595 if (is_write) {
2596 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2597 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2598 /* XXX: could force cpu_single_env to NULL to avoid
2599 potential bugs */
2600 if (l >= 4 && ((addr & 3) == 0)) {
2601 /* 32 bit write access */
2602 val = ldl_p(buf);
2603 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2604 l = 4;
2605 } else if (l >= 2 && ((addr & 1) == 0)) {
2606 /* 16 bit write access */
2607 val = lduw_p(buf);
2608 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2609 l = 2;
2610 } else {
2611 /* 8 bit write access */
2612 val = ldub_p(buf);
2613 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2614 l = 1;
2616 } else {
2617 unsigned long addr1;
2618 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2619 /* RAM case */
2620 ptr = phys_ram_base + addr1;
2621 memcpy(ptr, buf, l);
2622 if (!cpu_physical_memory_is_dirty(addr1)) {
2623 /* invalidate code */
2624 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2625 /* set dirty bit */
2626 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2627 (0xff & ~CODE_DIRTY_FLAG);
2630 } else {
2631 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2632 !(pd & IO_MEM_ROMD)) {
2633 /* I/O case */
2634 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2635 if (l >= 4 && ((addr & 3) == 0)) {
2636 /* 32 bit read access */
2637 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2638 stl_p(buf, val);
2639 l = 4;
2640 } else if (l >= 2 && ((addr & 1) == 0)) {
2641 /* 16 bit read access */
2642 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2643 stw_p(buf, val);
2644 l = 2;
2645 } else {
2646 /* 8 bit read access */
2647 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2648 stb_p(buf, val);
2649 l = 1;
2651 } else {
2652 /* RAM case */
2653 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2654 (addr & ~TARGET_PAGE_MASK);
2655 memcpy(buf, ptr, l);
2658 len -= l;
2659 buf += l;
2660 addr += l;
2664 /* used for ROM loading : can write in RAM and ROM */
2665 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2666 const uint8_t *buf, int len)
2668 int l;
2669 uint8_t *ptr;
2670 target_phys_addr_t page;
2671 unsigned long pd;
2672 PhysPageDesc *p;
2674 while (len > 0) {
2675 page = addr & TARGET_PAGE_MASK;
2676 l = (page + TARGET_PAGE_SIZE) - addr;
2677 if (l > len)
2678 l = len;
2679 p = phys_page_find(page >> TARGET_PAGE_BITS);
2680 if (!p) {
2681 pd = IO_MEM_UNASSIGNED;
2682 } else {
2683 pd = p->phys_offset;
2686 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2687 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2688 !(pd & IO_MEM_ROMD)) {
2689 /* do nothing */
2690 } else {
2691 unsigned long addr1;
2692 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2693 /* ROM/RAM case */
2694 ptr = phys_ram_base + addr1;
2695 memcpy(ptr, buf, l);
2697 len -= l;
2698 buf += l;
2699 addr += l;
2704 /* warning: addr must be aligned */
2705 uint32_t ldl_phys(target_phys_addr_t addr)
2707 int io_index;
2708 uint8_t *ptr;
2709 uint32_t val;
2710 unsigned long pd;
2711 PhysPageDesc *p;
2713 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2714 if (!p) {
2715 pd = IO_MEM_UNASSIGNED;
2716 } else {
2717 pd = p->phys_offset;
2720 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2721 !(pd & IO_MEM_ROMD)) {
2722 /* I/O case */
2723 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2724 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2725 } else {
2726 /* RAM case */
2727 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2728 (addr & ~TARGET_PAGE_MASK);
2729 val = ldl_p(ptr);
2731 return val;
2734 /* warning: addr must be aligned */
2735 uint64_t ldq_phys(target_phys_addr_t addr)
2737 int io_index;
2738 uint8_t *ptr;
2739 uint64_t val;
2740 unsigned long pd;
2741 PhysPageDesc *p;
2743 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2744 if (!p) {
2745 pd = IO_MEM_UNASSIGNED;
2746 } else {
2747 pd = p->phys_offset;
2750 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2751 !(pd & IO_MEM_ROMD)) {
2752 /* I/O case */
2753 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2754 #ifdef TARGET_WORDS_BIGENDIAN
2755 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2756 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2757 #else
2758 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2759 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2760 #endif
2761 } else {
2762 /* RAM case */
2763 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2764 (addr & ~TARGET_PAGE_MASK);
2765 val = ldq_p(ptr);
2767 return val;
2770 /* XXX: optimize */
2771 uint32_t ldub_phys(target_phys_addr_t addr)
2773 uint8_t val;
2774 cpu_physical_memory_read(addr, &val, 1);
2775 return val;
2778 /* XXX: optimize */
2779 uint32_t lduw_phys(target_phys_addr_t addr)
2781 uint16_t val;
2782 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2783 return tswap16(val);
2786 /* warning: addr must be aligned. The ram page is not masked as dirty
2787 and the code inside is not invalidated. It is useful if the dirty
2788 bits are used to track modified PTEs */
2789 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2791 int io_index;
2792 uint8_t *ptr;
2793 unsigned long pd;
2794 PhysPageDesc *p;
2796 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2797 if (!p) {
2798 pd = IO_MEM_UNASSIGNED;
2799 } else {
2800 pd = p->phys_offset;
2803 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2804 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2805 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2806 } else {
2807 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2808 (addr & ~TARGET_PAGE_MASK);
2809 stl_p(ptr, val);
2813 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2815 int io_index;
2816 uint8_t *ptr;
2817 unsigned long pd;
2818 PhysPageDesc *p;
2820 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2821 if (!p) {
2822 pd = IO_MEM_UNASSIGNED;
2823 } else {
2824 pd = p->phys_offset;
2827 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2828 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2829 #ifdef TARGET_WORDS_BIGENDIAN
2830 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2831 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2832 #else
2833 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2834 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2835 #endif
2836 } else {
2837 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2838 (addr & ~TARGET_PAGE_MASK);
2839 stq_p(ptr, val);
2843 /* warning: addr must be aligned */
2844 void stl_phys(target_phys_addr_t addr, uint32_t val)
2846 int io_index;
2847 uint8_t *ptr;
2848 unsigned long pd;
2849 PhysPageDesc *p;
2851 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2852 if (!p) {
2853 pd = IO_MEM_UNASSIGNED;
2854 } else {
2855 pd = p->phys_offset;
2858 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2859 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2860 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2861 } else {
2862 unsigned long addr1;
2863 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2864 /* RAM case */
2865 ptr = phys_ram_base + addr1;
2866 stl_p(ptr, val);
2867 if (!cpu_physical_memory_is_dirty(addr1)) {
2868 /* invalidate code */
2869 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2870 /* set dirty bit */
2871 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2872 (0xff & ~CODE_DIRTY_FLAG);
2877 /* XXX: optimize */
2878 void stb_phys(target_phys_addr_t addr, uint32_t val)
2880 uint8_t v = val;
2881 cpu_physical_memory_write(addr, &v, 1);
2884 /* XXX: optimize */
2885 void stw_phys(target_phys_addr_t addr, uint32_t val)
2887 uint16_t v = tswap16(val);
2888 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2891 /* XXX: optimize */
2892 void stq_phys(target_phys_addr_t addr, uint64_t val)
2894 val = tswap64(val);
2895 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2898 #endif
2900 /* virtual memory access for debug */
2901 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2902 uint8_t *buf, int len, int is_write)
2904 int l;
2905 target_phys_addr_t phys_addr;
2906 target_ulong page;
2908 while (len > 0) {
2909 page = addr & TARGET_PAGE_MASK;
2910 phys_addr = cpu_get_phys_page_debug(env, page);
2911 /* if no physical page mapped, return an error */
2912 if (phys_addr == -1)
2913 return -1;
2914 l = (page + TARGET_PAGE_SIZE) - addr;
2915 if (l > len)
2916 l = len;
2917 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2918 buf, l, is_write);
2919 len -= l;
2920 buf += l;
2921 addr += l;
2923 return 0;
2926 void dump_exec_info(FILE *f,
2927 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2929 int i, target_code_size, max_target_code_size;
2930 int direct_jmp_count, direct_jmp2_count, cross_page;
2931 TranslationBlock *tb;
2933 target_code_size = 0;
2934 max_target_code_size = 0;
2935 cross_page = 0;
2936 direct_jmp_count = 0;
2937 direct_jmp2_count = 0;
2938 for(i = 0; i < nb_tbs; i++) {
2939 tb = &tbs[i];
2940 target_code_size += tb->size;
2941 if (tb->size > max_target_code_size)
2942 max_target_code_size = tb->size;
2943 if (tb->page_addr[1] != -1)
2944 cross_page++;
2945 if (tb->tb_next_offset[0] != 0xffff) {
2946 direct_jmp_count++;
2947 if (tb->tb_next_offset[1] != 0xffff) {
2948 direct_jmp2_count++;
2952 /* XXX: avoid using doubles ? */
2953 cpu_fprintf(f, "Translation buffer state:\n");
2954 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2955 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2956 nb_tbs ? target_code_size / nb_tbs : 0,
2957 max_target_code_size);
2958 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2959 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2960 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2961 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2962 cross_page,
2963 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2964 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2965 direct_jmp_count,
2966 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2967 direct_jmp2_count,
2968 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2969 cpu_fprintf(f, "\nStatistics:\n");
2970 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2971 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2972 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2973 #ifdef CONFIG_PROFILER
2975 int64_t tot;
2976 tot = dyngen_interm_time + dyngen_code_time;
2977 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2978 tot, tot / 2.4e9);
2979 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2980 dyngen_tb_count,
2981 dyngen_tb_count1 - dyngen_tb_count,
2982 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
2983 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2984 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
2985 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
2986 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
2987 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2988 dyngen_tb_count ?
2989 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
2990 cpu_fprintf(f, "cycles/op %0.1f\n",
2991 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
2992 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2993 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
2994 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2995 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
2996 if (tot == 0)
2997 tot = 1;
2998 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2999 (double)dyngen_interm_time / tot * 100.0);
3000 cpu_fprintf(f, " gen_code time %0.1f%%\n",
3001 (double)dyngen_code_time / tot * 100.0);
3002 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
3003 dyngen_restore_count);
3004 cpu_fprintf(f, " avg cycles %0.1f\n",
3005 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
3007 extern void dump_op_count(void);
3008 dump_op_count();
3011 #endif
3014 #if !defined(CONFIG_USER_ONLY)
3016 #define MMUSUFFIX _cmmu
3017 #define GETPC() NULL
3018 #define env cpu_single_env
3019 #define SOFTMMU_CODE_ACCESS
3021 #define SHIFT 0
3022 #include "softmmu_template.h"
3024 #define SHIFT 1
3025 #include "softmmu_template.h"
3027 #define SHIFT 2
3028 #include "softmmu_template.h"
3030 #define SHIFT 3
3031 #include "softmmu_template.h"
3033 #undef env
3035 #endif