support for non continuous RAM or ROM
[qemu.git] / exec.c
blobc0b6a8f32f609c326cd2c3dc40caea721b2f5734
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <stdarg.h>
24 #include <string.h>
25 #include <errno.h>
26 #include <unistd.h>
27 #include <inttypes.h>
28 #if !defined(CONFIG_SOFTMMU)
29 #include <sys/mman.h>
30 #endif
32 #include "cpu.h"
33 #include "exec-all.h"
35 //#define DEBUG_TB_INVALIDATE
36 //#define DEBUG_FLUSH
37 //#define DEBUG_TLB
39 /* make various TB consistency checks */
40 //#define DEBUG_TB_CHECK
41 //#define DEBUG_TLB_CHECK
43 /* threshold to flush the translated code buffer */
44 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
46 #define SMC_BITMAP_USE_THRESHOLD 10
48 #define MMAP_AREA_START 0x00000000
49 #define MMAP_AREA_END 0xa8000000
51 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
52 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
53 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
54 int nb_tbs;
55 /* any access to the tbs or the page table must use this lock */
56 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
58 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
59 uint8_t *code_gen_ptr;
61 int phys_ram_size;
62 int phys_ram_fd;
63 uint8_t *phys_ram_base;
64 uint8_t *phys_ram_dirty;
66 typedef struct PageDesc {
67 /* list of TBs intersecting this ram page */
68 TranslationBlock *first_tb;
69 /* in order to optimize self modifying code, we count the number
70 of lookups we do to a given page to use a bitmap */
71 unsigned int code_write_count;
72 uint8_t *code_bitmap;
73 #if defined(CONFIG_USER_ONLY)
74 unsigned long flags;
75 #endif
76 } PageDesc;
78 typedef struct PhysPageDesc {
79 /* offset in host memory of the page + io_index in the low 12 bits */
80 unsigned long phys_offset;
81 } PhysPageDesc;
83 typedef struct VirtPageDesc {
84 /* physical address of code page. It is valid only if 'valid_tag'
85 matches 'virt_valid_tag' */
86 target_ulong phys_addr;
87 unsigned int valid_tag;
88 #if !defined(CONFIG_SOFTMMU)
89 /* original page access rights. It is valid only if 'valid_tag'
90 matches 'virt_valid_tag' */
91 unsigned int prot;
92 #endif
93 } VirtPageDesc;
95 #define L2_BITS 10
96 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
98 #define L1_SIZE (1 << L1_BITS)
99 #define L2_SIZE (1 << L2_BITS)
101 static void io_mem_init(void);
103 unsigned long real_host_page_size;
104 unsigned long host_page_bits;
105 unsigned long host_page_size;
106 unsigned long host_page_mask;
108 /* XXX: for system emulation, it could just be an array */
109 static PageDesc *l1_map[L1_SIZE];
110 static PhysPageDesc *l1_phys_map[L1_SIZE];
112 #if !defined(CONFIG_USER_ONLY)
113 static VirtPageDesc *l1_virt_map[L1_SIZE];
114 static unsigned int virt_valid_tag;
115 #endif
117 /* io memory support */
118 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
119 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
120 static int io_mem_nb;
122 /* log support */
123 char *logfilename = "/tmp/qemu.log";
124 FILE *logfile;
125 int loglevel;
127 static void page_init(void)
129 /* NOTE: we can always suppose that host_page_size >=
130 TARGET_PAGE_SIZE */
131 #ifdef _WIN32
132 real_host_page_size = 4096;
133 #else
134 real_host_page_size = getpagesize();
135 #endif
136 if (host_page_size == 0)
137 host_page_size = real_host_page_size;
138 if (host_page_size < TARGET_PAGE_SIZE)
139 host_page_size = TARGET_PAGE_SIZE;
140 host_page_bits = 0;
141 while ((1 << host_page_bits) < host_page_size)
142 host_page_bits++;
143 host_page_mask = ~(host_page_size - 1);
144 #if !defined(CONFIG_USER_ONLY)
145 virt_valid_tag = 1;
146 #endif
149 static inline PageDesc *page_find_alloc(unsigned int index)
151 PageDesc **lp, *p;
153 lp = &l1_map[index >> L2_BITS];
154 p = *lp;
155 if (!p) {
156 /* allocate if not found */
157 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
158 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
159 *lp = p;
161 return p + (index & (L2_SIZE - 1));
164 static inline PageDesc *page_find(unsigned int index)
166 PageDesc *p;
168 p = l1_map[index >> L2_BITS];
169 if (!p)
170 return 0;
171 return p + (index & (L2_SIZE - 1));
174 static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
176 PhysPageDesc **lp, *p;
178 lp = &l1_phys_map[index >> L2_BITS];
179 p = *lp;
180 if (!p) {
181 /* allocate if not found */
182 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
183 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
184 *lp = p;
186 return p + (index & (L2_SIZE - 1));
189 static inline PhysPageDesc *phys_page_find(unsigned int index)
191 PhysPageDesc *p;
193 p = l1_phys_map[index >> L2_BITS];
194 if (!p)
195 return 0;
196 return p + (index & (L2_SIZE - 1));
199 #if !defined(CONFIG_USER_ONLY)
200 static void tlb_protect_code(CPUState *env, target_ulong addr);
201 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
203 static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
205 VirtPageDesc **lp, *p;
207 lp = &l1_virt_map[index >> L2_BITS];
208 p = *lp;
209 if (!p) {
210 /* allocate if not found */
211 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
212 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
213 *lp = p;
215 return p + (index & (L2_SIZE - 1));
218 static inline VirtPageDesc *virt_page_find(unsigned int index)
220 VirtPageDesc *p;
222 p = l1_virt_map[index >> L2_BITS];
223 if (!p)
224 return 0;
225 return p + (index & (L2_SIZE - 1));
228 static void virt_page_flush(void)
230 int i, j;
231 VirtPageDesc *p;
233 virt_valid_tag++;
235 if (virt_valid_tag == 0) {
236 virt_valid_tag = 1;
237 for(i = 0; i < L1_SIZE; i++) {
238 p = l1_virt_map[i];
239 if (p) {
240 for(j = 0; j < L2_SIZE; j++)
241 p[j].valid_tag = 0;
246 #else
247 static void virt_page_flush(void)
250 #endif
252 void cpu_exec_init(void)
254 if (!code_gen_ptr) {
255 code_gen_ptr = code_gen_buffer;
256 page_init();
257 io_mem_init();
261 static inline void invalidate_page_bitmap(PageDesc *p)
263 if (p->code_bitmap) {
264 qemu_free(p->code_bitmap);
265 p->code_bitmap = NULL;
267 p->code_write_count = 0;
270 /* set to NULL all the 'first_tb' fields in all PageDescs */
271 static void page_flush_tb(void)
273 int i, j;
274 PageDesc *p;
276 for(i = 0; i < L1_SIZE; i++) {
277 p = l1_map[i];
278 if (p) {
279 for(j = 0; j < L2_SIZE; j++) {
280 p->first_tb = NULL;
281 invalidate_page_bitmap(p);
282 p++;
288 /* flush all the translation blocks */
289 /* XXX: tb_flush is currently not thread safe */
290 void tb_flush(CPUState *env)
292 int i;
293 #if defined(DEBUG_FLUSH)
294 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
295 code_gen_ptr - code_gen_buffer,
296 nb_tbs,
297 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
298 #endif
299 nb_tbs = 0;
300 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
301 tb_hash[i] = NULL;
302 virt_page_flush();
304 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
305 tb_phys_hash[i] = NULL;
306 page_flush_tb();
308 code_gen_ptr = code_gen_buffer;
309 /* XXX: flush processor icache at this point if cache flush is
310 expensive */
313 #ifdef DEBUG_TB_CHECK
315 static void tb_invalidate_check(unsigned long address)
317 TranslationBlock *tb;
318 int i;
319 address &= TARGET_PAGE_MASK;
320 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
321 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
322 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
323 address >= tb->pc + tb->size)) {
324 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
325 address, tb->pc, tb->size);
331 /* verify that all the pages have correct rights for code */
332 static void tb_page_check(void)
334 TranslationBlock *tb;
335 int i, flags1, flags2;
337 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
338 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
339 flags1 = page_get_flags(tb->pc);
340 flags2 = page_get_flags(tb->pc + tb->size - 1);
341 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
342 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
343 tb->pc, tb->size, flags1, flags2);
349 void tb_jmp_check(TranslationBlock *tb)
351 TranslationBlock *tb1;
352 unsigned int n1;
354 /* suppress any remaining jumps to this TB */
355 tb1 = tb->jmp_first;
356 for(;;) {
357 n1 = (long)tb1 & 3;
358 tb1 = (TranslationBlock *)((long)tb1 & ~3);
359 if (n1 == 2)
360 break;
361 tb1 = tb1->jmp_next[n1];
363 /* check end of list */
364 if (tb1 != tb) {
365 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
369 #endif
371 /* invalidate one TB */
372 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
373 int next_offset)
375 TranslationBlock *tb1;
376 for(;;) {
377 tb1 = *ptb;
378 if (tb1 == tb) {
379 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
380 break;
382 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
386 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
388 TranslationBlock *tb1;
389 unsigned int n1;
391 for(;;) {
392 tb1 = *ptb;
393 n1 = (long)tb1 & 3;
394 tb1 = (TranslationBlock *)((long)tb1 & ~3);
395 if (tb1 == tb) {
396 *ptb = tb1->page_next[n1];
397 break;
399 ptb = &tb1->page_next[n1];
403 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
405 TranslationBlock *tb1, **ptb;
406 unsigned int n1;
408 ptb = &tb->jmp_next[n];
409 tb1 = *ptb;
410 if (tb1) {
411 /* find tb(n) in circular list */
412 for(;;) {
413 tb1 = *ptb;
414 n1 = (long)tb1 & 3;
415 tb1 = (TranslationBlock *)((long)tb1 & ~3);
416 if (n1 == n && tb1 == tb)
417 break;
418 if (n1 == 2) {
419 ptb = &tb1->jmp_first;
420 } else {
421 ptb = &tb1->jmp_next[n1];
424 /* now we can suppress tb(n) from the list */
425 *ptb = tb->jmp_next[n];
427 tb->jmp_next[n] = NULL;
431 /* reset the jump entry 'n' of a TB so that it is not chained to
432 another TB */
433 static inline void tb_reset_jump(TranslationBlock *tb, int n)
435 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
438 static inline void tb_invalidate(TranslationBlock *tb)
440 unsigned int h, n1;
441 TranslationBlock *tb1, *tb2, **ptb;
443 tb_invalidated_flag = 1;
445 /* remove the TB from the hash list */
446 h = tb_hash_func(tb->pc);
447 ptb = &tb_hash[h];
448 for(;;) {
449 tb1 = *ptb;
450 /* NOTE: the TB is not necessarily linked in the hash. It
451 indicates that it is not currently used */
452 if (tb1 == NULL)
453 return;
454 if (tb1 == tb) {
455 *ptb = tb1->hash_next;
456 break;
458 ptb = &tb1->hash_next;
461 /* suppress this TB from the two jump lists */
462 tb_jmp_remove(tb, 0);
463 tb_jmp_remove(tb, 1);
465 /* suppress any remaining jumps to this TB */
466 tb1 = tb->jmp_first;
467 for(;;) {
468 n1 = (long)tb1 & 3;
469 if (n1 == 2)
470 break;
471 tb1 = (TranslationBlock *)((long)tb1 & ~3);
472 tb2 = tb1->jmp_next[n1];
473 tb_reset_jump(tb1, n1);
474 tb1->jmp_next[n1] = NULL;
475 tb1 = tb2;
477 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
480 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
482 PageDesc *p;
483 unsigned int h;
484 target_ulong phys_pc;
486 /* remove the TB from the hash list */
487 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
488 h = tb_phys_hash_func(phys_pc);
489 tb_remove(&tb_phys_hash[h], tb,
490 offsetof(TranslationBlock, phys_hash_next));
492 /* remove the TB from the page list */
493 if (tb->page_addr[0] != page_addr) {
494 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
495 tb_page_remove(&p->first_tb, tb);
496 invalidate_page_bitmap(p);
498 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
499 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
500 tb_page_remove(&p->first_tb, tb);
501 invalidate_page_bitmap(p);
504 tb_invalidate(tb);
507 static inline void set_bits(uint8_t *tab, int start, int len)
509 int end, mask, end1;
511 end = start + len;
512 tab += start >> 3;
513 mask = 0xff << (start & 7);
514 if ((start & ~7) == (end & ~7)) {
515 if (start < end) {
516 mask &= ~(0xff << (end & 7));
517 *tab |= mask;
519 } else {
520 *tab++ |= mask;
521 start = (start + 8) & ~7;
522 end1 = end & ~7;
523 while (start < end1) {
524 *tab++ = 0xff;
525 start += 8;
527 if (start < end) {
528 mask = ~(0xff << (end & 7));
529 *tab |= mask;
534 static void build_page_bitmap(PageDesc *p)
536 int n, tb_start, tb_end;
537 TranslationBlock *tb;
539 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
540 if (!p->code_bitmap)
541 return;
542 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
544 tb = p->first_tb;
545 while (tb != NULL) {
546 n = (long)tb & 3;
547 tb = (TranslationBlock *)((long)tb & ~3);
548 /* NOTE: this is subtle as a TB may span two physical pages */
549 if (n == 0) {
550 /* NOTE: tb_end may be after the end of the page, but
551 it is not a problem */
552 tb_start = tb->pc & ~TARGET_PAGE_MASK;
553 tb_end = tb_start + tb->size;
554 if (tb_end > TARGET_PAGE_SIZE)
555 tb_end = TARGET_PAGE_SIZE;
556 } else {
557 tb_start = 0;
558 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
560 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
561 tb = tb->page_next[n];
565 #ifdef TARGET_HAS_PRECISE_SMC
567 static void tb_gen_code(CPUState *env,
568 target_ulong pc, target_ulong cs_base, int flags,
569 int cflags)
571 TranslationBlock *tb;
572 uint8_t *tc_ptr;
573 target_ulong phys_pc, phys_page2, virt_page2;
574 int code_gen_size;
576 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
577 tb = tb_alloc((unsigned long)pc);
578 if (!tb) {
579 /* flush must be done */
580 tb_flush(env);
581 /* cannot fail at this point */
582 tb = tb_alloc((unsigned long)pc);
584 tc_ptr = code_gen_ptr;
585 tb->tc_ptr = tc_ptr;
586 tb->cs_base = cs_base;
587 tb->flags = flags;
588 tb->cflags = cflags;
589 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
590 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
592 /* check next page if needed */
593 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
594 phys_page2 = -1;
595 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
596 phys_page2 = get_phys_addr_code(env, virt_page2);
598 tb_link_phys(tb, phys_pc, phys_page2);
600 #endif
602 /* invalidate all TBs which intersect with the target physical page
603 starting in range [start;end[. NOTE: start and end must refer to
604 the same physical page. 'is_cpu_write_access' should be true if called
605 from a real cpu write access: the virtual CPU will exit the current
606 TB if code is modified inside this TB. */
607 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
608 int is_cpu_write_access)
610 int n, current_tb_modified, current_tb_not_found, current_flags;
611 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
612 CPUState *env = cpu_single_env;
613 #endif
614 PageDesc *p;
615 TranslationBlock *tb, *tb_next, *current_tb;
616 target_ulong tb_start, tb_end;
617 target_ulong current_pc, current_cs_base;
619 p = page_find(start >> TARGET_PAGE_BITS);
620 if (!p)
621 return;
622 if (!p->code_bitmap &&
623 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
624 is_cpu_write_access) {
625 /* build code bitmap */
626 build_page_bitmap(p);
629 /* we remove all the TBs in the range [start, end[ */
630 /* XXX: see if in some cases it could be faster to invalidate all the code */
631 current_tb_not_found = is_cpu_write_access;
632 current_tb_modified = 0;
633 current_tb = NULL; /* avoid warning */
634 current_pc = 0; /* avoid warning */
635 current_cs_base = 0; /* avoid warning */
636 current_flags = 0; /* avoid warning */
637 tb = p->first_tb;
638 while (tb != NULL) {
639 n = (long)tb & 3;
640 tb = (TranslationBlock *)((long)tb & ~3);
641 tb_next = tb->page_next[n];
642 /* NOTE: this is subtle as a TB may span two physical pages */
643 if (n == 0) {
644 /* NOTE: tb_end may be after the end of the page, but
645 it is not a problem */
646 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
647 tb_end = tb_start + tb->size;
648 } else {
649 tb_start = tb->page_addr[1];
650 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
652 if (!(tb_end <= start || tb_start >= end)) {
653 #ifdef TARGET_HAS_PRECISE_SMC
654 if (current_tb_not_found) {
655 current_tb_not_found = 0;
656 current_tb = NULL;
657 if (env->mem_write_pc) {
658 /* now we have a real cpu fault */
659 current_tb = tb_find_pc(env->mem_write_pc);
662 if (current_tb == tb &&
663 !(current_tb->cflags & CF_SINGLE_INSN)) {
664 /* If we are modifying the current TB, we must stop
665 its execution. We could be more precise by checking
666 that the modification is after the current PC, but it
667 would require a specialized function to partially
668 restore the CPU state */
670 current_tb_modified = 1;
671 cpu_restore_state(current_tb, env,
672 env->mem_write_pc, NULL);
673 #if defined(TARGET_I386)
674 current_flags = env->hflags;
675 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
676 current_cs_base = (target_ulong)env->segs[R_CS].base;
677 current_pc = current_cs_base + env->eip;
678 #else
679 #error unsupported CPU
680 #endif
682 #endif /* TARGET_HAS_PRECISE_SMC */
683 tb_phys_invalidate(tb, -1);
685 tb = tb_next;
687 #if !defined(CONFIG_USER_ONLY)
688 /* if no code remaining, no need to continue to use slow writes */
689 if (!p->first_tb) {
690 invalidate_page_bitmap(p);
691 if (is_cpu_write_access) {
692 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
695 #endif
696 #ifdef TARGET_HAS_PRECISE_SMC
697 if (current_tb_modified) {
698 /* we generate a block containing just the instruction
699 modifying the memory. It will ensure that it cannot modify
700 itself */
701 tb_gen_code(env, current_pc, current_cs_base, current_flags,
702 CF_SINGLE_INSN);
703 cpu_resume_from_signal(env, NULL);
705 #endif
708 /* len must be <= 8 and start must be a multiple of len */
709 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
711 PageDesc *p;
712 int offset, b;
713 #if 0
714 if (cpu_single_env->cr[0] & CR0_PE_MASK) {
715 printf("modifying code at 0x%x size=%d EIP=%x\n",
716 (vaddr & TARGET_PAGE_MASK) | (start & ~TARGET_PAGE_MASK), len,
717 cpu_single_env->eip);
719 #endif
720 p = page_find(start >> TARGET_PAGE_BITS);
721 if (!p)
722 return;
723 if (p->code_bitmap) {
724 offset = start & ~TARGET_PAGE_MASK;
725 b = p->code_bitmap[offset >> 3] >> (offset & 7);
726 if (b & ((1 << len) - 1))
727 goto do_invalidate;
728 } else {
729 do_invalidate:
730 tb_invalidate_phys_page_range(start, start + len, 1);
734 #if !defined(CONFIG_SOFTMMU)
735 static void tb_invalidate_phys_page(target_ulong addr,
736 unsigned long pc, void *puc)
738 int n, current_flags, current_tb_modified;
739 target_ulong current_pc, current_cs_base;
740 PageDesc *p;
741 TranslationBlock *tb, *current_tb;
742 #ifdef TARGET_HAS_PRECISE_SMC
743 CPUState *env = cpu_single_env;
744 #endif
746 addr &= TARGET_PAGE_MASK;
747 p = page_find(addr >> TARGET_PAGE_BITS);
748 if (!p)
749 return;
750 tb = p->first_tb;
751 current_tb_modified = 0;
752 current_tb = NULL;
753 current_pc = 0; /* avoid warning */
754 current_cs_base = 0; /* avoid warning */
755 current_flags = 0; /* avoid warning */
756 #ifdef TARGET_HAS_PRECISE_SMC
757 if (tb && pc != 0) {
758 current_tb = tb_find_pc(pc);
760 #endif
761 while (tb != NULL) {
762 n = (long)tb & 3;
763 tb = (TranslationBlock *)((long)tb & ~3);
764 #ifdef TARGET_HAS_PRECISE_SMC
765 if (current_tb == tb &&
766 !(current_tb->cflags & CF_SINGLE_INSN)) {
767 /* If we are modifying the current TB, we must stop
768 its execution. We could be more precise by checking
769 that the modification is after the current PC, but it
770 would require a specialized function to partially
771 restore the CPU state */
773 current_tb_modified = 1;
774 cpu_restore_state(current_tb, env, pc, puc);
775 #if defined(TARGET_I386)
776 current_flags = env->hflags;
777 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
778 current_cs_base = (target_ulong)env->segs[R_CS].base;
779 current_pc = current_cs_base + env->eip;
780 #else
781 #error unsupported CPU
782 #endif
784 #endif /* TARGET_HAS_PRECISE_SMC */
785 tb_phys_invalidate(tb, addr);
786 tb = tb->page_next[n];
788 p->first_tb = NULL;
789 #ifdef TARGET_HAS_PRECISE_SMC
790 if (current_tb_modified) {
791 /* we generate a block containing just the instruction
792 modifying the memory. It will ensure that it cannot modify
793 itself */
794 tb_gen_code(env, current_pc, current_cs_base, current_flags,
795 CF_SINGLE_INSN);
796 cpu_resume_from_signal(env, puc);
798 #endif
800 #endif
802 /* add the tb in the target page and protect it if necessary */
803 static inline void tb_alloc_page(TranslationBlock *tb,
804 unsigned int n, unsigned int page_addr)
806 PageDesc *p;
807 TranslationBlock *last_first_tb;
809 tb->page_addr[n] = page_addr;
810 p = page_find(page_addr >> TARGET_PAGE_BITS);
811 tb->page_next[n] = p->first_tb;
812 last_first_tb = p->first_tb;
813 p->first_tb = (TranslationBlock *)((long)tb | n);
814 invalidate_page_bitmap(p);
816 #ifdef TARGET_HAS_SMC
818 #if defined(CONFIG_USER_ONLY)
819 if (p->flags & PAGE_WRITE) {
820 unsigned long host_start, host_end, addr;
821 int prot;
823 /* force the host page as non writable (writes will have a
824 page fault + mprotect overhead) */
825 host_start = page_addr & host_page_mask;
826 host_end = host_start + host_page_size;
827 prot = 0;
828 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
829 prot |= page_get_flags(addr);
830 mprotect((void *)host_start, host_page_size,
831 (prot & PAGE_BITS) & ~PAGE_WRITE);
832 #ifdef DEBUG_TB_INVALIDATE
833 printf("protecting code page: 0x%08lx\n",
834 host_start);
835 #endif
836 p->flags &= ~PAGE_WRITE;
838 #else
839 /* if some code is already present, then the pages are already
840 protected. So we handle the case where only the first TB is
841 allocated in a physical page */
842 if (!last_first_tb) {
843 target_ulong virt_addr;
845 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
846 tlb_protect_code(cpu_single_env, virt_addr);
848 #endif
850 #endif /* TARGET_HAS_SMC */
853 /* Allocate a new translation block. Flush the translation buffer if
854 too many translation blocks or too much generated code. */
855 TranslationBlock *tb_alloc(unsigned long pc)
857 TranslationBlock *tb;
859 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
860 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
861 return NULL;
862 tb = &tbs[nb_tbs++];
863 tb->pc = pc;
864 tb->cflags = 0;
865 return tb;
868 /* add a new TB and link it to the physical page tables. phys_page2 is
869 (-1) to indicate that only one page contains the TB. */
870 void tb_link_phys(TranslationBlock *tb,
871 target_ulong phys_pc, target_ulong phys_page2)
873 unsigned int h;
874 TranslationBlock **ptb;
876 /* add in the physical hash table */
877 h = tb_phys_hash_func(phys_pc);
878 ptb = &tb_phys_hash[h];
879 tb->phys_hash_next = *ptb;
880 *ptb = tb;
882 /* add in the page list */
883 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
884 if (phys_page2 != -1)
885 tb_alloc_page(tb, 1, phys_page2);
886 else
887 tb->page_addr[1] = -1;
888 #ifdef DEBUG_TB_CHECK
889 tb_page_check();
890 #endif
893 /* link the tb with the other TBs */
894 void tb_link(TranslationBlock *tb)
896 #if !defined(CONFIG_USER_ONLY)
898 VirtPageDesc *vp;
899 target_ulong addr;
901 /* save the code memory mappings (needed to invalidate the code) */
902 addr = tb->pc & TARGET_PAGE_MASK;
903 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
904 #ifdef DEBUG_TLB_CHECK
905 if (vp->valid_tag == virt_valid_tag &&
906 vp->phys_addr != tb->page_addr[0]) {
907 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
908 addr, tb->page_addr[0], vp->phys_addr);
910 #endif
911 vp->phys_addr = tb->page_addr[0];
912 if (vp->valid_tag != virt_valid_tag) {
913 vp->valid_tag = virt_valid_tag;
914 #if !defined(CONFIG_SOFTMMU)
915 vp->prot = 0;
916 #endif
919 if (tb->page_addr[1] != -1) {
920 addr += TARGET_PAGE_SIZE;
921 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
922 #ifdef DEBUG_TLB_CHECK
923 if (vp->valid_tag == virt_valid_tag &&
924 vp->phys_addr != tb->page_addr[1]) {
925 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
926 addr, tb->page_addr[1], vp->phys_addr);
928 #endif
929 vp->phys_addr = tb->page_addr[1];
930 if (vp->valid_tag != virt_valid_tag) {
931 vp->valid_tag = virt_valid_tag;
932 #if !defined(CONFIG_SOFTMMU)
933 vp->prot = 0;
934 #endif
938 #endif
940 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
941 tb->jmp_next[0] = NULL;
942 tb->jmp_next[1] = NULL;
943 #ifdef USE_CODE_COPY
944 tb->cflags &= ~CF_FP_USED;
945 if (tb->cflags & CF_TB_FP_USED)
946 tb->cflags |= CF_FP_USED;
947 #endif
949 /* init original jump addresses */
950 if (tb->tb_next_offset[0] != 0xffff)
951 tb_reset_jump(tb, 0);
952 if (tb->tb_next_offset[1] != 0xffff)
953 tb_reset_jump(tb, 1);
956 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
957 tb[1].tc_ptr. Return NULL if not found */
958 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
960 int m_min, m_max, m;
961 unsigned long v;
962 TranslationBlock *tb;
964 if (nb_tbs <= 0)
965 return NULL;
966 if (tc_ptr < (unsigned long)code_gen_buffer ||
967 tc_ptr >= (unsigned long)code_gen_ptr)
968 return NULL;
969 /* binary search (cf Knuth) */
970 m_min = 0;
971 m_max = nb_tbs - 1;
972 while (m_min <= m_max) {
973 m = (m_min + m_max) >> 1;
974 tb = &tbs[m];
975 v = (unsigned long)tb->tc_ptr;
976 if (v == tc_ptr)
977 return tb;
978 else if (tc_ptr < v) {
979 m_max = m - 1;
980 } else {
981 m_min = m + 1;
984 return &tbs[m_max];
987 static void tb_reset_jump_recursive(TranslationBlock *tb);
989 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
991 TranslationBlock *tb1, *tb_next, **ptb;
992 unsigned int n1;
994 tb1 = tb->jmp_next[n];
995 if (tb1 != NULL) {
996 /* find head of list */
997 for(;;) {
998 n1 = (long)tb1 & 3;
999 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1000 if (n1 == 2)
1001 break;
1002 tb1 = tb1->jmp_next[n1];
1004 /* we are now sure now that tb jumps to tb1 */
1005 tb_next = tb1;
1007 /* remove tb from the jmp_first list */
1008 ptb = &tb_next->jmp_first;
1009 for(;;) {
1010 tb1 = *ptb;
1011 n1 = (long)tb1 & 3;
1012 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1013 if (n1 == n && tb1 == tb)
1014 break;
1015 ptb = &tb1->jmp_next[n1];
1017 *ptb = tb->jmp_next[n];
1018 tb->jmp_next[n] = NULL;
1020 /* suppress the jump to next tb in generated code */
1021 tb_reset_jump(tb, n);
1023 /* suppress jumps in the tb on which we could have jumped */
1024 tb_reset_jump_recursive(tb_next);
1028 static void tb_reset_jump_recursive(TranslationBlock *tb)
1030 tb_reset_jump_recursive2(tb, 0);
1031 tb_reset_jump_recursive2(tb, 1);
1034 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1036 target_ulong phys_addr;
1038 phys_addr = cpu_get_phys_page_debug(env, pc);
1039 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1042 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1043 breakpoint is reached */
1044 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1046 #if defined(TARGET_I386) || defined(TARGET_PPC)
1047 int i;
1049 for(i = 0; i < env->nb_breakpoints; i++) {
1050 if (env->breakpoints[i] == pc)
1051 return 0;
1054 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1055 return -1;
1056 env->breakpoints[env->nb_breakpoints++] = pc;
1058 breakpoint_invalidate(env, pc);
1059 return 0;
1060 #else
1061 return -1;
1062 #endif
1065 /* remove a breakpoint */
1066 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1068 #if defined(TARGET_I386) || defined(TARGET_PPC)
1069 int i;
1070 for(i = 0; i < env->nb_breakpoints; i++) {
1071 if (env->breakpoints[i] == pc)
1072 goto found;
1074 return -1;
1075 found:
1076 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1077 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1078 env->nb_breakpoints--;
1080 breakpoint_invalidate(env, pc);
1081 return 0;
1082 #else
1083 return -1;
1084 #endif
1087 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1088 CPU loop after each instruction */
1089 void cpu_single_step(CPUState *env, int enabled)
1091 #if defined(TARGET_I386) || defined(TARGET_PPC)
1092 if (env->singlestep_enabled != enabled) {
1093 env->singlestep_enabled = enabled;
1094 /* must flush all the translated code to avoid inconsistancies */
1095 /* XXX: only flush what is necessary */
1096 tb_flush(env);
1098 #endif
1101 /* enable or disable low levels log */
1102 void cpu_set_log(int log_flags)
1104 loglevel = log_flags;
1105 if (loglevel && !logfile) {
1106 logfile = fopen(logfilename, "w");
1107 if (!logfile) {
1108 perror(logfilename);
1109 _exit(1);
1111 #if !defined(CONFIG_SOFTMMU)
1112 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1114 static uint8_t logfile_buf[4096];
1115 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1117 #else
1118 setvbuf(logfile, NULL, _IOLBF, 0);
1119 #endif
1123 void cpu_set_log_filename(const char *filename)
1125 logfilename = strdup(filename);
1128 /* mask must never be zero, except for A20 change call */
1129 void cpu_interrupt(CPUState *env, int mask)
1131 TranslationBlock *tb;
1132 static int interrupt_lock;
1134 env->interrupt_request |= mask;
1135 /* if the cpu is currently executing code, we must unlink it and
1136 all the potentially executing TB */
1137 tb = env->current_tb;
1138 if (tb && !testandset(&interrupt_lock)) {
1139 env->current_tb = NULL;
1140 tb_reset_jump_recursive(tb);
1141 interrupt_lock = 0;
1145 void cpu_reset_interrupt(CPUState *env, int mask)
1147 env->interrupt_request &= ~mask;
1150 CPULogItem cpu_log_items[] = {
1151 { CPU_LOG_TB_OUT_ASM, "out_asm",
1152 "show generated host assembly code for each compiled TB" },
1153 { CPU_LOG_TB_IN_ASM, "in_asm",
1154 "show target assembly code for each compiled TB" },
1155 { CPU_LOG_TB_OP, "op",
1156 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1157 #ifdef TARGET_I386
1158 { CPU_LOG_TB_OP_OPT, "op_opt",
1159 "show micro ops after optimization for each compiled TB" },
1160 #endif
1161 { CPU_LOG_INT, "int",
1162 "show interrupts/exceptions in short format" },
1163 { CPU_LOG_EXEC, "exec",
1164 "show trace before each executed TB (lots of logs)" },
1165 { CPU_LOG_TB_CPU, "cpu",
1166 "show CPU state before bloc translation" },
1167 #ifdef TARGET_I386
1168 { CPU_LOG_PCALL, "pcall",
1169 "show protected mode far calls/returns/exceptions" },
1170 #endif
1171 { CPU_LOG_IOPORT, "ioport",
1172 "show all i/o ports accesses" },
1173 { 0, NULL, NULL },
1176 static int cmp1(const char *s1, int n, const char *s2)
1178 if (strlen(s2) != n)
1179 return 0;
1180 return memcmp(s1, s2, n) == 0;
1183 /* takes a comma separated list of log masks. Return 0 if error. */
1184 int cpu_str_to_log_mask(const char *str)
1186 CPULogItem *item;
1187 int mask;
1188 const char *p, *p1;
1190 p = str;
1191 mask = 0;
1192 for(;;) {
1193 p1 = strchr(p, ',');
1194 if (!p1)
1195 p1 = p + strlen(p);
1196 for(item = cpu_log_items; item->mask != 0; item++) {
1197 if (cmp1(p, p1 - p, item->name))
1198 goto found;
1200 return 0;
1201 found:
1202 mask |= item->mask;
1203 if (*p1 != ',')
1204 break;
1205 p = p1 + 1;
1207 return mask;
1210 void cpu_abort(CPUState *env, const char *fmt, ...)
1212 va_list ap;
1214 va_start(ap, fmt);
1215 fprintf(stderr, "qemu: fatal: ");
1216 vfprintf(stderr, fmt, ap);
1217 fprintf(stderr, "\n");
1218 #ifdef TARGET_I386
1219 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1220 #endif
1221 va_end(ap);
1222 abort();
1225 #if !defined(CONFIG_USER_ONLY)
1227 /* NOTE: if flush_global is true, also flush global entries (not
1228 implemented yet) */
1229 void tlb_flush(CPUState *env, int flush_global)
1231 int i;
1233 #if defined(DEBUG_TLB)
1234 printf("tlb_flush:\n");
1235 #endif
1236 /* must reset current TB so that interrupts cannot modify the
1237 links while we are modifying them */
1238 env->current_tb = NULL;
1240 for(i = 0; i < CPU_TLB_SIZE; i++) {
1241 env->tlb_read[0][i].address = -1;
1242 env->tlb_write[0][i].address = -1;
1243 env->tlb_read[1][i].address = -1;
1244 env->tlb_write[1][i].address = -1;
1247 virt_page_flush();
1248 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1249 tb_hash[i] = NULL;
1251 #if !defined(CONFIG_SOFTMMU)
1252 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1253 #endif
1256 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1258 if (addr == (tlb_entry->address &
1259 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1260 tlb_entry->address = -1;
1263 void tlb_flush_page(CPUState *env, target_ulong addr)
1265 int i, n;
1266 VirtPageDesc *vp;
1267 PageDesc *p;
1268 TranslationBlock *tb;
1270 #if defined(DEBUG_TLB)
1271 printf("tlb_flush_page: 0x%08x\n", addr);
1272 #endif
1273 /* must reset current TB so that interrupts cannot modify the
1274 links while we are modifying them */
1275 env->current_tb = NULL;
1277 addr &= TARGET_PAGE_MASK;
1278 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1279 tlb_flush_entry(&env->tlb_read[0][i], addr);
1280 tlb_flush_entry(&env->tlb_write[0][i], addr);
1281 tlb_flush_entry(&env->tlb_read[1][i], addr);
1282 tlb_flush_entry(&env->tlb_write[1][i], addr);
1284 /* remove from the virtual pc hash table all the TB at this
1285 virtual address */
1287 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1288 if (vp && vp->valid_tag == virt_valid_tag) {
1289 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1290 if (p) {
1291 /* we remove all the links to the TBs in this virtual page */
1292 tb = p->first_tb;
1293 while (tb != NULL) {
1294 n = (long)tb & 3;
1295 tb = (TranslationBlock *)((long)tb & ~3);
1296 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1297 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1298 tb_invalidate(tb);
1300 tb = tb->page_next[n];
1303 vp->valid_tag = 0;
1306 #if !defined(CONFIG_SOFTMMU)
1307 if (addr < MMAP_AREA_END)
1308 munmap((void *)addr, TARGET_PAGE_SIZE);
1309 #endif
1312 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1314 if (addr == (tlb_entry->address &
1315 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1316 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1317 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1318 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1322 /* update the TLBs so that writes to code in the virtual page 'addr'
1323 can be detected */
1324 static void tlb_protect_code(CPUState *env, target_ulong addr)
1326 int i;
1328 addr &= TARGET_PAGE_MASK;
1329 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1330 tlb_protect_code1(&env->tlb_write[0][i], addr);
1331 tlb_protect_code1(&env->tlb_write[1][i], addr);
1332 #if !defined(CONFIG_SOFTMMU)
1333 /* NOTE: as we generated the code for this page, it is already at
1334 least readable */
1335 if (addr < MMAP_AREA_END)
1336 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1337 #endif
1340 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1341 unsigned long phys_addr)
1343 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1344 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1345 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1349 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1350 tested self modifying code */
1351 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1353 int i;
1355 phys_addr &= TARGET_PAGE_MASK;
1356 phys_addr += (long)phys_ram_base;
1357 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1358 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1359 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1362 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1363 unsigned long start, unsigned long length)
1365 unsigned long addr;
1366 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1367 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1368 if ((addr - start) < length) {
1369 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1374 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1376 CPUState *env;
1377 unsigned long length, start1;
1378 int i;
1380 start &= TARGET_PAGE_MASK;
1381 end = TARGET_PAGE_ALIGN(end);
1383 length = end - start;
1384 if (length == 0)
1385 return;
1386 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1388 env = cpu_single_env;
1389 /* we modify the TLB cache so that the dirty bit will be set again
1390 when accessing the range */
1391 start1 = start + (unsigned long)phys_ram_base;
1392 for(i = 0; i < CPU_TLB_SIZE; i++)
1393 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1394 for(i = 0; i < CPU_TLB_SIZE; i++)
1395 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1397 #if !defined(CONFIG_SOFTMMU)
1398 /* XXX: this is expensive */
1400 VirtPageDesc *p;
1401 int j;
1402 target_ulong addr;
1404 for(i = 0; i < L1_SIZE; i++) {
1405 p = l1_virt_map[i];
1406 if (p) {
1407 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1408 for(j = 0; j < L2_SIZE; j++) {
1409 if (p->valid_tag == virt_valid_tag &&
1410 p->phys_addr >= start && p->phys_addr < end &&
1411 (p->prot & PROT_WRITE)) {
1412 if (addr < MMAP_AREA_END) {
1413 mprotect((void *)addr, TARGET_PAGE_SIZE,
1414 p->prot & ~PROT_WRITE);
1417 addr += TARGET_PAGE_SIZE;
1418 p++;
1423 #endif
1426 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1427 unsigned long start)
1429 unsigned long addr;
1430 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1431 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1432 if (addr == start) {
1433 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1438 /* update the TLB corresponding to virtual page vaddr and phys addr
1439 addr so that it is no longer dirty */
1440 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1442 CPUState *env = cpu_single_env;
1443 int i;
1445 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1447 addr &= TARGET_PAGE_MASK;
1448 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1449 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1450 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1453 /* add a new TLB entry. At most one entry for a given virtual address
1454 is permitted. Return 0 if OK or 2 if the page could not be mapped
1455 (can only happen in non SOFTMMU mode for I/O pages or pages
1456 conflicting with the host address space). */
1457 int tlb_set_page(CPUState *env, target_ulong vaddr,
1458 target_phys_addr_t paddr, int prot,
1459 int is_user, int is_softmmu)
1461 PhysPageDesc *p;
1462 unsigned long pd;
1463 TranslationBlock *first_tb;
1464 unsigned int index;
1465 target_ulong address;
1466 unsigned long addend;
1467 int ret;
1469 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1470 first_tb = NULL;
1471 if (!p) {
1472 pd = IO_MEM_UNASSIGNED;
1473 } else {
1474 PageDesc *p1;
1475 pd = p->phys_offset;
1476 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1477 /* NOTE: we also allocate the page at this stage */
1478 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1479 first_tb = p1->first_tb;
1482 #if defined(DEBUG_TLB)
1483 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1484 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1485 #endif
1487 ret = 0;
1488 #if !defined(CONFIG_SOFTMMU)
1489 if (is_softmmu)
1490 #endif
1492 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1493 /* IO memory case */
1494 address = vaddr | pd;
1495 addend = paddr;
1496 } else {
1497 /* standard memory */
1498 address = vaddr;
1499 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1502 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1503 addend -= vaddr;
1504 if (prot & PAGE_READ) {
1505 env->tlb_read[is_user][index].address = address;
1506 env->tlb_read[is_user][index].addend = addend;
1507 } else {
1508 env->tlb_read[is_user][index].address = -1;
1509 env->tlb_read[is_user][index].addend = -1;
1511 if (prot & PAGE_WRITE) {
1512 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1513 /* ROM: access is ignored (same as unassigned) */
1514 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1515 env->tlb_write[is_user][index].addend = addend;
1516 } else
1517 /* XXX: the PowerPC code seems not ready to handle
1518 self modifying code with DCBI */
1519 #if defined(TARGET_HAS_SMC) || 1
1520 if (first_tb) {
1521 /* if code is present, we use a specific memory
1522 handler. It works only for physical memory access */
1523 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1524 env->tlb_write[is_user][index].addend = addend;
1525 } else
1526 #endif
1527 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1528 !cpu_physical_memory_is_dirty(pd)) {
1529 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1530 env->tlb_write[is_user][index].addend = addend;
1531 } else {
1532 env->tlb_write[is_user][index].address = address;
1533 env->tlb_write[is_user][index].addend = addend;
1535 } else {
1536 env->tlb_write[is_user][index].address = -1;
1537 env->tlb_write[is_user][index].addend = -1;
1540 #if !defined(CONFIG_SOFTMMU)
1541 else {
1542 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1543 /* IO access: no mapping is done as it will be handled by the
1544 soft MMU */
1545 if (!(env->hflags & HF_SOFTMMU_MASK))
1546 ret = 2;
1547 } else {
1548 void *map_addr;
1550 if (vaddr >= MMAP_AREA_END) {
1551 ret = 2;
1552 } else {
1553 if (prot & PROT_WRITE) {
1554 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1555 #if defined(TARGET_HAS_SMC) || 1
1556 first_tb ||
1557 #endif
1558 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1559 !cpu_physical_memory_is_dirty(pd))) {
1560 /* ROM: we do as if code was inside */
1561 /* if code is present, we only map as read only and save the
1562 original mapping */
1563 VirtPageDesc *vp;
1565 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1566 vp->phys_addr = pd;
1567 vp->prot = prot;
1568 vp->valid_tag = virt_valid_tag;
1569 prot &= ~PAGE_WRITE;
1572 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1573 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1574 if (map_addr == MAP_FAILED) {
1575 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1576 paddr, vaddr);
1581 #endif
1582 return ret;
1585 /* called from signal handler: invalidate the code and unprotect the
1586 page. Return TRUE if the fault was succesfully handled. */
1587 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1589 #if !defined(CONFIG_SOFTMMU)
1590 VirtPageDesc *vp;
1592 #if defined(DEBUG_TLB)
1593 printf("page_unprotect: addr=0x%08x\n", addr);
1594 #endif
1595 addr &= TARGET_PAGE_MASK;
1597 /* if it is not mapped, no need to worry here */
1598 if (addr >= MMAP_AREA_END)
1599 return 0;
1600 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1601 if (!vp)
1602 return 0;
1603 /* NOTE: in this case, validate_tag is _not_ tested as it
1604 validates only the code TLB */
1605 if (vp->valid_tag != virt_valid_tag)
1606 return 0;
1607 if (!(vp->prot & PAGE_WRITE))
1608 return 0;
1609 #if defined(DEBUG_TLB)
1610 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1611 addr, vp->phys_addr, vp->prot);
1612 #endif
1613 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1614 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1615 (unsigned long)addr, vp->prot);
1616 /* set the dirty bit */
1617 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1618 /* flush the code inside */
1619 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1620 return 1;
1621 #else
1622 return 0;
1623 #endif
1626 #else
1628 void tlb_flush(CPUState *env, int flush_global)
1632 void tlb_flush_page(CPUState *env, target_ulong addr)
1636 int tlb_set_page(CPUState *env, target_ulong vaddr,
1637 target_phys_addr_t paddr, int prot,
1638 int is_user, int is_softmmu)
1640 return 0;
1643 /* dump memory mappings */
1644 void page_dump(FILE *f)
1646 unsigned long start, end;
1647 int i, j, prot, prot1;
1648 PageDesc *p;
1650 fprintf(f, "%-8s %-8s %-8s %s\n",
1651 "start", "end", "size", "prot");
1652 start = -1;
1653 end = -1;
1654 prot = 0;
1655 for(i = 0; i <= L1_SIZE; i++) {
1656 if (i < L1_SIZE)
1657 p = l1_map[i];
1658 else
1659 p = NULL;
1660 for(j = 0;j < L2_SIZE; j++) {
1661 if (!p)
1662 prot1 = 0;
1663 else
1664 prot1 = p[j].flags;
1665 if (prot1 != prot) {
1666 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1667 if (start != -1) {
1668 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1669 start, end, end - start,
1670 prot & PAGE_READ ? 'r' : '-',
1671 prot & PAGE_WRITE ? 'w' : '-',
1672 prot & PAGE_EXEC ? 'x' : '-');
1674 if (prot1 != 0)
1675 start = end;
1676 else
1677 start = -1;
1678 prot = prot1;
1680 if (!p)
1681 break;
1686 int page_get_flags(unsigned long address)
1688 PageDesc *p;
1690 p = page_find(address >> TARGET_PAGE_BITS);
1691 if (!p)
1692 return 0;
1693 return p->flags;
1696 /* modify the flags of a page and invalidate the code if
1697 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1698 depending on PAGE_WRITE */
1699 void page_set_flags(unsigned long start, unsigned long end, int flags)
1701 PageDesc *p;
1702 unsigned long addr;
1704 start = start & TARGET_PAGE_MASK;
1705 end = TARGET_PAGE_ALIGN(end);
1706 if (flags & PAGE_WRITE)
1707 flags |= PAGE_WRITE_ORG;
1708 spin_lock(&tb_lock);
1709 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1710 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1711 /* if the write protection is set, then we invalidate the code
1712 inside */
1713 if (!(p->flags & PAGE_WRITE) &&
1714 (flags & PAGE_WRITE) &&
1715 p->first_tb) {
1716 tb_invalidate_phys_page(addr, 0, NULL);
1718 p->flags = flags;
1720 spin_unlock(&tb_lock);
1723 /* called from signal handler: invalidate the code and unprotect the
1724 page. Return TRUE if the fault was succesfully handled. */
1725 int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1727 unsigned int page_index, prot, pindex;
1728 PageDesc *p, *p1;
1729 unsigned long host_start, host_end, addr;
1731 host_start = address & host_page_mask;
1732 page_index = host_start >> TARGET_PAGE_BITS;
1733 p1 = page_find(page_index);
1734 if (!p1)
1735 return 0;
1736 host_end = host_start + host_page_size;
1737 p = p1;
1738 prot = 0;
1739 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1740 prot |= p->flags;
1741 p++;
1743 /* if the page was really writable, then we change its
1744 protection back to writable */
1745 if (prot & PAGE_WRITE_ORG) {
1746 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1747 if (!(p1[pindex].flags & PAGE_WRITE)) {
1748 mprotect((void *)host_start, host_page_size,
1749 (prot & PAGE_BITS) | PAGE_WRITE);
1750 p1[pindex].flags |= PAGE_WRITE;
1751 /* and since the content will be modified, we must invalidate
1752 the corresponding translated code. */
1753 tb_invalidate_phys_page(address, pc, puc);
1754 #ifdef DEBUG_TB_CHECK
1755 tb_invalidate_check(address);
1756 #endif
1757 return 1;
1760 return 0;
1763 /* call this function when system calls directly modify a memory area */
1764 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1766 unsigned long start, end, addr;
1768 start = (unsigned long)data;
1769 end = start + data_size;
1770 start &= TARGET_PAGE_MASK;
1771 end = TARGET_PAGE_ALIGN(end);
1772 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1773 page_unprotect(addr, 0, NULL);
1777 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1780 #endif /* defined(CONFIG_USER_ONLY) */
1782 /* register physical memory. 'size' must be a multiple of the target
1783 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1784 io memory page */
1785 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1786 unsigned long size,
1787 unsigned long phys_offset)
1789 unsigned long addr, end_addr;
1790 PhysPageDesc *p;
1792 end_addr = start_addr + size;
1793 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
1794 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1795 p->phys_offset = phys_offset;
1796 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1797 phys_offset += TARGET_PAGE_SIZE;
1801 static uint32_t unassigned_mem_readb(target_phys_addr_t addr)
1803 return 0;
1806 static void unassigned_mem_writeb(target_phys_addr_t addr, uint32_t val)
1810 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1811 unassigned_mem_readb,
1812 unassigned_mem_readb,
1813 unassigned_mem_readb,
1816 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1817 unassigned_mem_writeb,
1818 unassigned_mem_writeb,
1819 unassigned_mem_writeb,
1822 /* self modifying code support in soft mmu mode : writing to a page
1823 containing code comes to these functions */
1825 static void code_mem_writeb(target_phys_addr_t addr, uint32_t val)
1827 unsigned long phys_addr;
1829 phys_addr = addr - (unsigned long)phys_ram_base;
1830 #if !defined(CONFIG_USER_ONLY)
1831 tb_invalidate_phys_page_fast(phys_addr, 1);
1832 #endif
1833 stb_raw((uint8_t *)addr, val);
1834 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1837 static void code_mem_writew(target_phys_addr_t addr, uint32_t val)
1839 unsigned long phys_addr;
1841 phys_addr = addr - (unsigned long)phys_ram_base;
1842 #if !defined(CONFIG_USER_ONLY)
1843 tb_invalidate_phys_page_fast(phys_addr, 2);
1844 #endif
1845 stw_raw((uint8_t *)addr, val);
1846 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1849 static void code_mem_writel(target_phys_addr_t addr, uint32_t val)
1851 unsigned long phys_addr;
1853 phys_addr = addr - (unsigned long)phys_ram_base;
1854 #if !defined(CONFIG_USER_ONLY)
1855 tb_invalidate_phys_page_fast(phys_addr, 4);
1856 #endif
1857 stl_raw((uint8_t *)addr, val);
1858 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1861 static CPUReadMemoryFunc *code_mem_read[3] = {
1862 NULL, /* never used */
1863 NULL, /* never used */
1864 NULL, /* never used */
1867 static CPUWriteMemoryFunc *code_mem_write[3] = {
1868 code_mem_writeb,
1869 code_mem_writew,
1870 code_mem_writel,
1873 static void notdirty_mem_writeb(target_phys_addr_t addr, uint32_t val)
1875 stb_raw((uint8_t *)addr, val);
1876 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1879 static void notdirty_mem_writew(target_phys_addr_t addr, uint32_t val)
1881 stw_raw((uint8_t *)addr, val);
1882 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1885 static void notdirty_mem_writel(target_phys_addr_t addr, uint32_t val)
1887 stl_raw((uint8_t *)addr, val);
1888 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1891 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1892 notdirty_mem_writeb,
1893 notdirty_mem_writew,
1894 notdirty_mem_writel,
1897 static void io_mem_init(void)
1899 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1900 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1901 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1902 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write);
1903 io_mem_nb = 5;
1905 /* alloc dirty bits array */
1906 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1909 /* mem_read and mem_write are arrays of functions containing the
1910 function to access byte (index 0), word (index 1) and dword (index
1911 2). All functions must be supplied. If io_index is non zero, the
1912 corresponding io zone is modified. If it is zero, a new io zone is
1913 allocated. The return value can be used with
1914 cpu_register_physical_memory(). (-1) is returned if error. */
1915 int cpu_register_io_memory(int io_index,
1916 CPUReadMemoryFunc **mem_read,
1917 CPUWriteMemoryFunc **mem_write)
1919 int i;
1921 if (io_index <= 0) {
1922 if (io_index >= IO_MEM_NB_ENTRIES)
1923 return -1;
1924 io_index = io_mem_nb++;
1925 } else {
1926 if (io_index >= IO_MEM_NB_ENTRIES)
1927 return -1;
1930 for(i = 0;i < 3; i++) {
1931 io_mem_read[io_index][i] = mem_read[i];
1932 io_mem_write[io_index][i] = mem_write[i];
1934 return io_index << IO_MEM_SHIFT;
1937 /* physical memory access (slow version, mainly for debug) */
1938 #if defined(CONFIG_USER_ONLY)
1939 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1940 int len, int is_write)
1942 int l, flags;
1943 target_ulong page;
1945 while (len > 0) {
1946 page = addr & TARGET_PAGE_MASK;
1947 l = (page + TARGET_PAGE_SIZE) - addr;
1948 if (l > len)
1949 l = len;
1950 flags = page_get_flags(page);
1951 if (!(flags & PAGE_VALID))
1952 return;
1953 if (is_write) {
1954 if (!(flags & PAGE_WRITE))
1955 return;
1956 memcpy((uint8_t *)addr, buf, len);
1957 } else {
1958 if (!(flags & PAGE_READ))
1959 return;
1960 memcpy(buf, (uint8_t *)addr, len);
1962 len -= l;
1963 buf += l;
1964 addr += l;
1967 #else
1968 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1969 int len, int is_write)
1971 int l, io_index;
1972 uint8_t *ptr;
1973 uint32_t val;
1974 target_phys_addr_t page;
1975 unsigned long pd;
1976 PhysPageDesc *p;
1978 while (len > 0) {
1979 page = addr & TARGET_PAGE_MASK;
1980 l = (page + TARGET_PAGE_SIZE) - addr;
1981 if (l > len)
1982 l = len;
1983 p = phys_page_find(page >> TARGET_PAGE_BITS);
1984 if (!p) {
1985 pd = IO_MEM_UNASSIGNED;
1986 } else {
1987 pd = p->phys_offset;
1990 if (is_write) {
1991 if ((pd & ~TARGET_PAGE_MASK) != 0) {
1992 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1993 if (l >= 4 && ((addr & 3) == 0)) {
1994 /* 32 bit read access */
1995 val = ldl_raw(buf);
1996 io_mem_write[io_index][2](addr, val);
1997 l = 4;
1998 } else if (l >= 2 && ((addr & 1) == 0)) {
1999 /* 16 bit read access */
2000 val = lduw_raw(buf);
2001 io_mem_write[io_index][1](addr, val);
2002 l = 2;
2003 } else {
2004 /* 8 bit access */
2005 val = ldub_raw(buf);
2006 io_mem_write[io_index][0](addr, val);
2007 l = 1;
2009 } else {
2010 unsigned long addr1;
2011 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2012 /* RAM case */
2013 ptr = phys_ram_base + addr1;
2014 memcpy(ptr, buf, l);
2015 /* invalidate code */
2016 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2017 /* set dirty bit */
2018 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
2020 } else {
2021 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2022 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2023 /* I/O case */
2024 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2025 if (l >= 4 && ((addr & 3) == 0)) {
2026 /* 32 bit read access */
2027 val = io_mem_read[io_index][2](addr);
2028 stl_raw(buf, val);
2029 l = 4;
2030 } else if (l >= 2 && ((addr & 1) == 0)) {
2031 /* 16 bit read access */
2032 val = io_mem_read[io_index][1](addr);
2033 stw_raw(buf, val);
2034 l = 2;
2035 } else {
2036 /* 8 bit access */
2037 val = io_mem_read[io_index][0](addr);
2038 stb_raw(buf, val);
2039 l = 1;
2041 } else {
2042 /* RAM case */
2043 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2044 (addr & ~TARGET_PAGE_MASK);
2045 memcpy(buf, ptr, l);
2048 len -= l;
2049 buf += l;
2050 addr += l;
2053 #endif
2055 /* virtual memory access for debug */
2056 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2057 uint8_t *buf, int len, int is_write)
2059 int l;
2060 target_ulong page, phys_addr;
2062 while (len > 0) {
2063 page = addr & TARGET_PAGE_MASK;
2064 phys_addr = cpu_get_phys_page_debug(env, page);
2065 /* if no physical page mapped, return an error */
2066 if (phys_addr == -1)
2067 return -1;
2068 l = (page + TARGET_PAGE_SIZE) - addr;
2069 if (l > len)
2070 l = len;
2071 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2072 buf, l, is_write);
2073 len -= l;
2074 buf += l;
2075 addr += l;
2077 return 0;
2080 #if !defined(CONFIG_USER_ONLY)
2082 #define MMUSUFFIX _cmmu
2083 #define GETPC() NULL
2084 #define env cpu_single_env
2086 #define SHIFT 0
2087 #include "softmmu_template.h"
2089 #define SHIFT 1
2090 #include "softmmu_template.h"
2092 #define SHIFT 2
2093 #include "softmmu_template.h"
2095 #define SHIFT 3
2096 #include "softmmu_template.h"
2098 #undef env
2100 #endif