Add quotes missing from previous patch.
[qemu/mini2440.git] / exec.c
blob9843ae5e741b38585f7dd5dfe9638bc0d59be066
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
45 /* make various TB consistency checks */
46 //#define DEBUG_TB_CHECK
47 //#define DEBUG_TLB_CHECK
49 /* threshold to flush the translated code buffer */
50 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
52 #define SMC_BITMAP_USE_THRESHOLD 10
54 #define MMAP_AREA_START 0x00000000
55 #define MMAP_AREA_END 0xa8000000
57 #if defined(TARGET_SPARC64)
58 #define TARGET_PHYS_ADDR_SPACE_BITS 41
59 #elif defined(TARGET_PPC64)
60 #define TARGET_PHYS_ADDR_SPACE_BITS 42
61 #else
62 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
63 #define TARGET_PHYS_ADDR_SPACE_BITS 32
64 #endif
66 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
67 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
68 int nb_tbs;
69 /* any access to the tbs or the page table must use this lock */
70 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
72 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
73 uint8_t *code_gen_ptr;
75 int phys_ram_size;
76 int phys_ram_fd;
77 uint8_t *phys_ram_base;
78 uint8_t *phys_ram_dirty;
80 CPUState *first_cpu;
81 /* current CPU in the current thread. It is only valid inside
82 cpu_exec() */
83 CPUState *cpu_single_env;
85 typedef struct PageDesc {
86 /* list of TBs intersecting this ram page */
87 TranslationBlock *first_tb;
88 /* in order to optimize self modifying code, we count the number
89 of lookups we do to a given page to use a bitmap */
90 unsigned int code_write_count;
91 uint8_t *code_bitmap;
92 #if defined(CONFIG_USER_ONLY)
93 unsigned long flags;
94 #endif
95 } PageDesc;
97 typedef struct PhysPageDesc {
98 /* offset in host memory of the page + io_index in the low 12 bits */
99 uint32_t phys_offset;
100 } PhysPageDesc;
102 #define L2_BITS 10
103 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
105 #define L1_SIZE (1 << L1_BITS)
106 #define L2_SIZE (1 << L2_BITS)
108 static void io_mem_init(void);
110 unsigned long qemu_real_host_page_size;
111 unsigned long qemu_host_page_bits;
112 unsigned long qemu_host_page_size;
113 unsigned long qemu_host_page_mask;
115 /* XXX: for system emulation, it could just be an array */
116 static PageDesc *l1_map[L1_SIZE];
117 PhysPageDesc **l1_phys_map;
119 /* io memory support */
120 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
121 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
122 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
123 static int io_mem_nb;
125 /* log support */
126 char *logfilename = "/tmp/qemu.log";
127 FILE *logfile;
128 int loglevel;
130 /* statistics */
131 static int tlb_flush_count;
132 static int tb_flush_count;
133 static int tb_phys_invalidate_count;
135 static void page_init(void)
137 /* NOTE: we can always suppose that qemu_host_page_size >=
138 TARGET_PAGE_SIZE */
139 #ifdef _WIN32
141 SYSTEM_INFO system_info;
142 DWORD old_protect;
144 GetSystemInfo(&system_info);
145 qemu_real_host_page_size = system_info.dwPageSize;
147 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
148 PAGE_EXECUTE_READWRITE, &old_protect);
150 #else
151 qemu_real_host_page_size = getpagesize();
153 unsigned long start, end;
155 start = (unsigned long)code_gen_buffer;
156 start &= ~(qemu_real_host_page_size - 1);
158 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
159 end += qemu_real_host_page_size - 1;
160 end &= ~(qemu_real_host_page_size - 1);
162 mprotect((void *)start, end - start,
163 PROT_READ | PROT_WRITE | PROT_EXEC);
165 #endif
167 if (qemu_host_page_size == 0)
168 qemu_host_page_size = qemu_real_host_page_size;
169 if (qemu_host_page_size < TARGET_PAGE_SIZE)
170 qemu_host_page_size = TARGET_PAGE_SIZE;
171 qemu_host_page_bits = 0;
172 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
173 qemu_host_page_bits++;
174 qemu_host_page_mask = ~(qemu_host_page_size - 1);
175 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
176 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
179 static inline PageDesc *page_find_alloc(unsigned int index)
181 PageDesc **lp, *p;
183 lp = &l1_map[index >> L2_BITS];
184 p = *lp;
185 if (!p) {
186 /* allocate if not found */
187 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
188 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
189 *lp = p;
191 return p + (index & (L2_SIZE - 1));
194 static inline PageDesc *page_find(unsigned int index)
196 PageDesc *p;
198 p = l1_map[index >> L2_BITS];
199 if (!p)
200 return 0;
201 return p + (index & (L2_SIZE - 1));
204 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
206 void **lp, **p;
207 PhysPageDesc *pd;
209 p = (void **)l1_phys_map;
210 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
212 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
213 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
214 #endif
215 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
216 p = *lp;
217 if (!p) {
218 /* allocate if not found */
219 if (!alloc)
220 return NULL;
221 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
222 memset(p, 0, sizeof(void *) * L1_SIZE);
223 *lp = p;
225 #endif
226 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
227 pd = *lp;
228 if (!pd) {
229 int i;
230 /* allocate if not found */
231 if (!alloc)
232 return NULL;
233 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
234 *lp = pd;
235 for (i = 0; i < L2_SIZE; i++)
236 pd[i].phys_offset = IO_MEM_UNASSIGNED;
238 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
241 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
243 return phys_page_find_alloc(index, 0);
246 #if !defined(CONFIG_USER_ONLY)
247 static void tlb_protect_code(ram_addr_t ram_addr);
248 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
249 target_ulong vaddr);
250 #endif
252 void cpu_exec_init(CPUState *env)
254 CPUState **penv;
255 int cpu_index;
257 if (!code_gen_ptr) {
258 code_gen_ptr = code_gen_buffer;
259 page_init();
260 io_mem_init();
262 env->next_cpu = NULL;
263 penv = &first_cpu;
264 cpu_index = 0;
265 while (*penv != NULL) {
266 penv = (CPUState **)&(*penv)->next_cpu;
267 cpu_index++;
269 env->cpu_index = cpu_index;
270 *penv = env;
273 static inline void invalidate_page_bitmap(PageDesc *p)
275 if (p->code_bitmap) {
276 qemu_free(p->code_bitmap);
277 p->code_bitmap = NULL;
279 p->code_write_count = 0;
282 /* set to NULL all the 'first_tb' fields in all PageDescs */
283 static void page_flush_tb(void)
285 int i, j;
286 PageDesc *p;
288 for(i = 0; i < L1_SIZE; i++) {
289 p = l1_map[i];
290 if (p) {
291 for(j = 0; j < L2_SIZE; j++) {
292 p->first_tb = NULL;
293 invalidate_page_bitmap(p);
294 p++;
300 /* flush all the translation blocks */
301 /* XXX: tb_flush is currently not thread safe */
302 void tb_flush(CPUState *env1)
304 CPUState *env;
305 #if defined(DEBUG_FLUSH)
306 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
307 code_gen_ptr - code_gen_buffer,
308 nb_tbs,
309 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
310 #endif
311 nb_tbs = 0;
313 for(env = first_cpu; env != NULL; env = env->next_cpu) {
314 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
317 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
318 page_flush_tb();
320 code_gen_ptr = code_gen_buffer;
321 /* XXX: flush processor icache at this point if cache flush is
322 expensive */
323 tb_flush_count++;
326 #ifdef DEBUG_TB_CHECK
328 static void tb_invalidate_check(unsigned long address)
330 TranslationBlock *tb;
331 int i;
332 address &= TARGET_PAGE_MASK;
333 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
334 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
335 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
336 address >= tb->pc + tb->size)) {
337 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
338 address, tb->pc, tb->size);
344 /* verify that all the pages have correct rights for code */
345 static void tb_page_check(void)
347 TranslationBlock *tb;
348 int i, flags1, flags2;
350 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
351 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
352 flags1 = page_get_flags(tb->pc);
353 flags2 = page_get_flags(tb->pc + tb->size - 1);
354 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
355 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
356 tb->pc, tb->size, flags1, flags2);
362 void tb_jmp_check(TranslationBlock *tb)
364 TranslationBlock *tb1;
365 unsigned int n1;
367 /* suppress any remaining jumps to this TB */
368 tb1 = tb->jmp_first;
369 for(;;) {
370 n1 = (long)tb1 & 3;
371 tb1 = (TranslationBlock *)((long)tb1 & ~3);
372 if (n1 == 2)
373 break;
374 tb1 = tb1->jmp_next[n1];
376 /* check end of list */
377 if (tb1 != tb) {
378 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
382 #endif
384 /* invalidate one TB */
385 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
386 int next_offset)
388 TranslationBlock *tb1;
389 for(;;) {
390 tb1 = *ptb;
391 if (tb1 == tb) {
392 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
393 break;
395 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
399 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
401 TranslationBlock *tb1;
402 unsigned int n1;
404 for(;;) {
405 tb1 = *ptb;
406 n1 = (long)tb1 & 3;
407 tb1 = (TranslationBlock *)((long)tb1 & ~3);
408 if (tb1 == tb) {
409 *ptb = tb1->page_next[n1];
410 break;
412 ptb = &tb1->page_next[n1];
416 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
418 TranslationBlock *tb1, **ptb;
419 unsigned int n1;
421 ptb = &tb->jmp_next[n];
422 tb1 = *ptb;
423 if (tb1) {
424 /* find tb(n) in circular list */
425 for(;;) {
426 tb1 = *ptb;
427 n1 = (long)tb1 & 3;
428 tb1 = (TranslationBlock *)((long)tb1 & ~3);
429 if (n1 == n && tb1 == tb)
430 break;
431 if (n1 == 2) {
432 ptb = &tb1->jmp_first;
433 } else {
434 ptb = &tb1->jmp_next[n1];
437 /* now we can suppress tb(n) from the list */
438 *ptb = tb->jmp_next[n];
440 tb->jmp_next[n] = NULL;
444 /* reset the jump entry 'n' of a TB so that it is not chained to
445 another TB */
446 static inline void tb_reset_jump(TranslationBlock *tb, int n)
448 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
451 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
453 CPUState *env;
454 PageDesc *p;
455 unsigned int h, n1;
456 target_ulong phys_pc;
457 TranslationBlock *tb1, *tb2;
459 /* remove the TB from the hash list */
460 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
461 h = tb_phys_hash_func(phys_pc);
462 tb_remove(&tb_phys_hash[h], tb,
463 offsetof(TranslationBlock, phys_hash_next));
465 /* remove the TB from the page list */
466 if (tb->page_addr[0] != page_addr) {
467 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
468 tb_page_remove(&p->first_tb, tb);
469 invalidate_page_bitmap(p);
471 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
472 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
473 tb_page_remove(&p->first_tb, tb);
474 invalidate_page_bitmap(p);
477 tb_invalidated_flag = 1;
479 /* remove the TB from the hash list */
480 h = tb_jmp_cache_hash_func(tb->pc);
481 for(env = first_cpu; env != NULL; env = env->next_cpu) {
482 if (env->tb_jmp_cache[h] == tb)
483 env->tb_jmp_cache[h] = NULL;
486 /* suppress this TB from the two jump lists */
487 tb_jmp_remove(tb, 0);
488 tb_jmp_remove(tb, 1);
490 /* suppress any remaining jumps to this TB */
491 tb1 = tb->jmp_first;
492 for(;;) {
493 n1 = (long)tb1 & 3;
494 if (n1 == 2)
495 break;
496 tb1 = (TranslationBlock *)((long)tb1 & ~3);
497 tb2 = tb1->jmp_next[n1];
498 tb_reset_jump(tb1, n1);
499 tb1->jmp_next[n1] = NULL;
500 tb1 = tb2;
502 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
504 tb_phys_invalidate_count++;
507 static inline void set_bits(uint8_t *tab, int start, int len)
509 int end, mask, end1;
511 end = start + len;
512 tab += start >> 3;
513 mask = 0xff << (start & 7);
514 if ((start & ~7) == (end & ~7)) {
515 if (start < end) {
516 mask &= ~(0xff << (end & 7));
517 *tab |= mask;
519 } else {
520 *tab++ |= mask;
521 start = (start + 8) & ~7;
522 end1 = end & ~7;
523 while (start < end1) {
524 *tab++ = 0xff;
525 start += 8;
527 if (start < end) {
528 mask = ~(0xff << (end & 7));
529 *tab |= mask;
534 static void build_page_bitmap(PageDesc *p)
536 int n, tb_start, tb_end;
537 TranslationBlock *tb;
539 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
540 if (!p->code_bitmap)
541 return;
542 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
544 tb = p->first_tb;
545 while (tb != NULL) {
546 n = (long)tb & 3;
547 tb = (TranslationBlock *)((long)tb & ~3);
548 /* NOTE: this is subtle as a TB may span two physical pages */
549 if (n == 0) {
550 /* NOTE: tb_end may be after the end of the page, but
551 it is not a problem */
552 tb_start = tb->pc & ~TARGET_PAGE_MASK;
553 tb_end = tb_start + tb->size;
554 if (tb_end > TARGET_PAGE_SIZE)
555 tb_end = TARGET_PAGE_SIZE;
556 } else {
557 tb_start = 0;
558 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
560 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
561 tb = tb->page_next[n];
565 #ifdef TARGET_HAS_PRECISE_SMC
567 static void tb_gen_code(CPUState *env,
568 target_ulong pc, target_ulong cs_base, int flags,
569 int cflags)
571 TranslationBlock *tb;
572 uint8_t *tc_ptr;
573 target_ulong phys_pc, phys_page2, virt_page2;
574 int code_gen_size;
576 phys_pc = get_phys_addr_code(env, pc);
577 tb = tb_alloc(pc);
578 if (!tb) {
579 /* flush must be done */
580 tb_flush(env);
581 /* cannot fail at this point */
582 tb = tb_alloc(pc);
584 tc_ptr = code_gen_ptr;
585 tb->tc_ptr = tc_ptr;
586 tb->cs_base = cs_base;
587 tb->flags = flags;
588 tb->cflags = cflags;
589 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
590 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
592 /* check next page if needed */
593 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
594 phys_page2 = -1;
595 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
596 phys_page2 = get_phys_addr_code(env, virt_page2);
598 tb_link_phys(tb, phys_pc, phys_page2);
600 #endif
602 /* invalidate all TBs which intersect with the target physical page
603 starting in range [start;end[. NOTE: start and end must refer to
604 the same physical page. 'is_cpu_write_access' should be true if called
605 from a real cpu write access: the virtual CPU will exit the current
606 TB if code is modified inside this TB. */
607 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
608 int is_cpu_write_access)
610 int n, current_tb_modified, current_tb_not_found, current_flags;
611 CPUState *env = cpu_single_env;
612 PageDesc *p;
613 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
614 target_ulong tb_start, tb_end;
615 target_ulong current_pc, current_cs_base;
617 p = page_find(start >> TARGET_PAGE_BITS);
618 if (!p)
619 return;
620 if (!p->code_bitmap &&
621 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
622 is_cpu_write_access) {
623 /* build code bitmap */
624 build_page_bitmap(p);
627 /* we remove all the TBs in the range [start, end[ */
628 /* XXX: see if in some cases it could be faster to invalidate all the code */
629 current_tb_not_found = is_cpu_write_access;
630 current_tb_modified = 0;
631 current_tb = NULL; /* avoid warning */
632 current_pc = 0; /* avoid warning */
633 current_cs_base = 0; /* avoid warning */
634 current_flags = 0; /* avoid warning */
635 tb = p->first_tb;
636 while (tb != NULL) {
637 n = (long)tb & 3;
638 tb = (TranslationBlock *)((long)tb & ~3);
639 tb_next = tb->page_next[n];
640 /* NOTE: this is subtle as a TB may span two physical pages */
641 if (n == 0) {
642 /* NOTE: tb_end may be after the end of the page, but
643 it is not a problem */
644 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
645 tb_end = tb_start + tb->size;
646 } else {
647 tb_start = tb->page_addr[1];
648 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
650 if (!(tb_end <= start || tb_start >= end)) {
651 #ifdef TARGET_HAS_PRECISE_SMC
652 if (current_tb_not_found) {
653 current_tb_not_found = 0;
654 current_tb = NULL;
655 if (env->mem_write_pc) {
656 /* now we have a real cpu fault */
657 current_tb = tb_find_pc(env->mem_write_pc);
660 if (current_tb == tb &&
661 !(current_tb->cflags & CF_SINGLE_INSN)) {
662 /* If we are modifying the current TB, we must stop
663 its execution. We could be more precise by checking
664 that the modification is after the current PC, but it
665 would require a specialized function to partially
666 restore the CPU state */
668 current_tb_modified = 1;
669 cpu_restore_state(current_tb, env,
670 env->mem_write_pc, NULL);
671 #if defined(TARGET_I386)
672 current_flags = env->hflags;
673 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
674 current_cs_base = (target_ulong)env->segs[R_CS].base;
675 current_pc = current_cs_base + env->eip;
676 #else
677 #error unsupported CPU
678 #endif
680 #endif /* TARGET_HAS_PRECISE_SMC */
681 /* we need to do that to handle the case where a signal
682 occurs while doing tb_phys_invalidate() */
683 saved_tb = NULL;
684 if (env) {
685 saved_tb = env->current_tb;
686 env->current_tb = NULL;
688 tb_phys_invalidate(tb, -1);
689 if (env) {
690 env->current_tb = saved_tb;
691 if (env->interrupt_request && env->current_tb)
692 cpu_interrupt(env, env->interrupt_request);
695 tb = tb_next;
697 #if !defined(CONFIG_USER_ONLY)
698 /* if no code remaining, no need to continue to use slow writes */
699 if (!p->first_tb) {
700 invalidate_page_bitmap(p);
701 if (is_cpu_write_access) {
702 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
705 #endif
706 #ifdef TARGET_HAS_PRECISE_SMC
707 if (current_tb_modified) {
708 /* we generate a block containing just the instruction
709 modifying the memory. It will ensure that it cannot modify
710 itself */
711 env->current_tb = NULL;
712 tb_gen_code(env, current_pc, current_cs_base, current_flags,
713 CF_SINGLE_INSN);
714 cpu_resume_from_signal(env, NULL);
716 #endif
719 /* len must be <= 8 and start must be a multiple of len */
720 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
722 PageDesc *p;
723 int offset, b;
724 #if 0
725 if (1) {
726 if (loglevel) {
727 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
728 cpu_single_env->mem_write_vaddr, len,
729 cpu_single_env->eip,
730 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
733 #endif
734 p = page_find(start >> TARGET_PAGE_BITS);
735 if (!p)
736 return;
737 if (p->code_bitmap) {
738 offset = start & ~TARGET_PAGE_MASK;
739 b = p->code_bitmap[offset >> 3] >> (offset & 7);
740 if (b & ((1 << len) - 1))
741 goto do_invalidate;
742 } else {
743 do_invalidate:
744 tb_invalidate_phys_page_range(start, start + len, 1);
748 #if !defined(CONFIG_SOFTMMU)
749 static void tb_invalidate_phys_page(target_ulong addr,
750 unsigned long pc, void *puc)
752 int n, current_flags, current_tb_modified;
753 target_ulong current_pc, current_cs_base;
754 PageDesc *p;
755 TranslationBlock *tb, *current_tb;
756 #ifdef TARGET_HAS_PRECISE_SMC
757 CPUState *env = cpu_single_env;
758 #endif
760 addr &= TARGET_PAGE_MASK;
761 p = page_find(addr >> TARGET_PAGE_BITS);
762 if (!p)
763 return;
764 tb = p->first_tb;
765 current_tb_modified = 0;
766 current_tb = NULL;
767 current_pc = 0; /* avoid warning */
768 current_cs_base = 0; /* avoid warning */
769 current_flags = 0; /* avoid warning */
770 #ifdef TARGET_HAS_PRECISE_SMC
771 if (tb && pc != 0) {
772 current_tb = tb_find_pc(pc);
774 #endif
775 while (tb != NULL) {
776 n = (long)tb & 3;
777 tb = (TranslationBlock *)((long)tb & ~3);
778 #ifdef TARGET_HAS_PRECISE_SMC
779 if (current_tb == tb &&
780 !(current_tb->cflags & CF_SINGLE_INSN)) {
781 /* If we are modifying the current TB, we must stop
782 its execution. We could be more precise by checking
783 that the modification is after the current PC, but it
784 would require a specialized function to partially
785 restore the CPU state */
787 current_tb_modified = 1;
788 cpu_restore_state(current_tb, env, pc, puc);
789 #if defined(TARGET_I386)
790 current_flags = env->hflags;
791 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
792 current_cs_base = (target_ulong)env->segs[R_CS].base;
793 current_pc = current_cs_base + env->eip;
794 #else
795 #error unsupported CPU
796 #endif
798 #endif /* TARGET_HAS_PRECISE_SMC */
799 tb_phys_invalidate(tb, addr);
800 tb = tb->page_next[n];
802 p->first_tb = NULL;
803 #ifdef TARGET_HAS_PRECISE_SMC
804 if (current_tb_modified) {
805 /* we generate a block containing just the instruction
806 modifying the memory. It will ensure that it cannot modify
807 itself */
808 env->current_tb = NULL;
809 tb_gen_code(env, current_pc, current_cs_base, current_flags,
810 CF_SINGLE_INSN);
811 cpu_resume_from_signal(env, puc);
813 #endif
815 #endif
817 /* add the tb in the target page and protect it if necessary */
818 static inline void tb_alloc_page(TranslationBlock *tb,
819 unsigned int n, target_ulong page_addr)
821 PageDesc *p;
822 TranslationBlock *last_first_tb;
824 tb->page_addr[n] = page_addr;
825 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
826 tb->page_next[n] = p->first_tb;
827 last_first_tb = p->first_tb;
828 p->first_tb = (TranslationBlock *)((long)tb | n);
829 invalidate_page_bitmap(p);
831 #if defined(TARGET_HAS_SMC) || 1
833 #if defined(CONFIG_USER_ONLY)
834 if (p->flags & PAGE_WRITE) {
835 target_ulong addr;
836 PageDesc *p2;
837 int prot;
839 /* force the host page as non writable (writes will have a
840 page fault + mprotect overhead) */
841 page_addr &= qemu_host_page_mask;
842 prot = 0;
843 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
844 addr += TARGET_PAGE_SIZE) {
846 p2 = page_find (addr >> TARGET_PAGE_BITS);
847 if (!p2)
848 continue;
849 prot |= p2->flags;
850 p2->flags &= ~PAGE_WRITE;
851 page_get_flags(addr);
853 mprotect(g2h(page_addr), qemu_host_page_size,
854 (prot & PAGE_BITS) & ~PAGE_WRITE);
855 #ifdef DEBUG_TB_INVALIDATE
856 printf("protecting code page: 0x%08lx\n",
857 page_addr);
858 #endif
860 #else
861 /* if some code is already present, then the pages are already
862 protected. So we handle the case where only the first TB is
863 allocated in a physical page */
864 if (!last_first_tb) {
865 tlb_protect_code(page_addr);
867 #endif
869 #endif /* TARGET_HAS_SMC */
872 /* Allocate a new translation block. Flush the translation buffer if
873 too many translation blocks or too much generated code. */
874 TranslationBlock *tb_alloc(target_ulong pc)
876 TranslationBlock *tb;
878 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
879 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
880 return NULL;
881 tb = &tbs[nb_tbs++];
882 tb->pc = pc;
883 tb->cflags = 0;
884 return tb;
887 /* add a new TB and link it to the physical page tables. phys_page2 is
888 (-1) to indicate that only one page contains the TB. */
889 void tb_link_phys(TranslationBlock *tb,
890 target_ulong phys_pc, target_ulong phys_page2)
892 unsigned int h;
893 TranslationBlock **ptb;
895 /* add in the physical hash table */
896 h = tb_phys_hash_func(phys_pc);
897 ptb = &tb_phys_hash[h];
898 tb->phys_hash_next = *ptb;
899 *ptb = tb;
901 /* add in the page list */
902 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
903 if (phys_page2 != -1)
904 tb_alloc_page(tb, 1, phys_page2);
905 else
906 tb->page_addr[1] = -1;
908 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
909 tb->jmp_next[0] = NULL;
910 tb->jmp_next[1] = NULL;
911 #ifdef USE_CODE_COPY
912 tb->cflags &= ~CF_FP_USED;
913 if (tb->cflags & CF_TB_FP_USED)
914 tb->cflags |= CF_FP_USED;
915 #endif
917 /* init original jump addresses */
918 if (tb->tb_next_offset[0] != 0xffff)
919 tb_reset_jump(tb, 0);
920 if (tb->tb_next_offset[1] != 0xffff)
921 tb_reset_jump(tb, 1);
923 #ifdef DEBUG_TB_CHECK
924 tb_page_check();
925 #endif
928 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
929 tb[1].tc_ptr. Return NULL if not found */
930 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
932 int m_min, m_max, m;
933 unsigned long v;
934 TranslationBlock *tb;
936 if (nb_tbs <= 0)
937 return NULL;
938 if (tc_ptr < (unsigned long)code_gen_buffer ||
939 tc_ptr >= (unsigned long)code_gen_ptr)
940 return NULL;
941 /* binary search (cf Knuth) */
942 m_min = 0;
943 m_max = nb_tbs - 1;
944 while (m_min <= m_max) {
945 m = (m_min + m_max) >> 1;
946 tb = &tbs[m];
947 v = (unsigned long)tb->tc_ptr;
948 if (v == tc_ptr)
949 return tb;
950 else if (tc_ptr < v) {
951 m_max = m - 1;
952 } else {
953 m_min = m + 1;
956 return &tbs[m_max];
959 static void tb_reset_jump_recursive(TranslationBlock *tb);
961 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
963 TranslationBlock *tb1, *tb_next, **ptb;
964 unsigned int n1;
966 tb1 = tb->jmp_next[n];
967 if (tb1 != NULL) {
968 /* find head of list */
969 for(;;) {
970 n1 = (long)tb1 & 3;
971 tb1 = (TranslationBlock *)((long)tb1 & ~3);
972 if (n1 == 2)
973 break;
974 tb1 = tb1->jmp_next[n1];
976 /* we are now sure now that tb jumps to tb1 */
977 tb_next = tb1;
979 /* remove tb from the jmp_first list */
980 ptb = &tb_next->jmp_first;
981 for(;;) {
982 tb1 = *ptb;
983 n1 = (long)tb1 & 3;
984 tb1 = (TranslationBlock *)((long)tb1 & ~3);
985 if (n1 == n && tb1 == tb)
986 break;
987 ptb = &tb1->jmp_next[n1];
989 *ptb = tb->jmp_next[n];
990 tb->jmp_next[n] = NULL;
992 /* suppress the jump to next tb in generated code */
993 tb_reset_jump(tb, n);
995 /* suppress jumps in the tb on which we could have jumped */
996 tb_reset_jump_recursive(tb_next);
1000 static void tb_reset_jump_recursive(TranslationBlock *tb)
1002 tb_reset_jump_recursive2(tb, 0);
1003 tb_reset_jump_recursive2(tb, 1);
1006 #if defined(TARGET_HAS_ICE)
1007 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1009 target_ulong addr, pd;
1010 ram_addr_t ram_addr;
1011 PhysPageDesc *p;
1013 addr = cpu_get_phys_page_debug(env, pc);
1014 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1015 if (!p) {
1016 pd = IO_MEM_UNASSIGNED;
1017 } else {
1018 pd = p->phys_offset;
1020 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1021 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1023 #endif
1025 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1026 breakpoint is reached */
1027 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1029 #if defined(TARGET_HAS_ICE)
1030 int i;
1032 for(i = 0; i < env->nb_breakpoints; i++) {
1033 if (env->breakpoints[i] == pc)
1034 return 0;
1037 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1038 return -1;
1039 env->breakpoints[env->nb_breakpoints++] = pc;
1041 breakpoint_invalidate(env, pc);
1042 return 0;
1043 #else
1044 return -1;
1045 #endif
1048 /* remove a breakpoint */
1049 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1051 #if defined(TARGET_HAS_ICE)
1052 int i;
1053 for(i = 0; i < env->nb_breakpoints; i++) {
1054 if (env->breakpoints[i] == pc)
1055 goto found;
1057 return -1;
1058 found:
1059 env->nb_breakpoints--;
1060 if (i < env->nb_breakpoints)
1061 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1063 breakpoint_invalidate(env, pc);
1064 return 0;
1065 #else
1066 return -1;
1067 #endif
1070 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1071 CPU loop after each instruction */
1072 void cpu_single_step(CPUState *env, int enabled)
1074 #if defined(TARGET_HAS_ICE)
1075 if (env->singlestep_enabled != enabled) {
1076 env->singlestep_enabled = enabled;
1077 /* must flush all the translated code to avoid inconsistancies */
1078 /* XXX: only flush what is necessary */
1079 tb_flush(env);
1081 #endif
1084 /* enable or disable low levels log */
1085 void cpu_set_log(int log_flags)
1087 loglevel = log_flags;
1088 if (loglevel && !logfile) {
1089 logfile = fopen(logfilename, "w");
1090 if (!logfile) {
1091 perror(logfilename);
1092 _exit(1);
1094 #if !defined(CONFIG_SOFTMMU)
1095 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1097 static uint8_t logfile_buf[4096];
1098 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1100 #else
1101 setvbuf(logfile, NULL, _IOLBF, 0);
1102 #endif
1106 void cpu_set_log_filename(const char *filename)
1108 logfilename = strdup(filename);
1111 /* mask must never be zero, except for A20 change call */
1112 void cpu_interrupt(CPUState *env, int mask)
1114 TranslationBlock *tb;
1115 static int interrupt_lock;
1117 env->interrupt_request |= mask;
1118 /* if the cpu is currently executing code, we must unlink it and
1119 all the potentially executing TB */
1120 tb = env->current_tb;
1121 if (tb && !testandset(&interrupt_lock)) {
1122 env->current_tb = NULL;
1123 tb_reset_jump_recursive(tb);
1124 interrupt_lock = 0;
1128 void cpu_reset_interrupt(CPUState *env, int mask)
1130 env->interrupt_request &= ~mask;
1133 CPULogItem cpu_log_items[] = {
1134 { CPU_LOG_TB_OUT_ASM, "out_asm",
1135 "show generated host assembly code for each compiled TB" },
1136 { CPU_LOG_TB_IN_ASM, "in_asm",
1137 "show target assembly code for each compiled TB" },
1138 { CPU_LOG_TB_OP, "op",
1139 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1140 #ifdef TARGET_I386
1141 { CPU_LOG_TB_OP_OPT, "op_opt",
1142 "show micro ops after optimization for each compiled TB" },
1143 #endif
1144 { CPU_LOG_INT, "int",
1145 "show interrupts/exceptions in short format" },
1146 { CPU_LOG_EXEC, "exec",
1147 "show trace before each executed TB (lots of logs)" },
1148 { CPU_LOG_TB_CPU, "cpu",
1149 "show CPU state before bloc translation" },
1150 #ifdef TARGET_I386
1151 { CPU_LOG_PCALL, "pcall",
1152 "show protected mode far calls/returns/exceptions" },
1153 #endif
1154 #ifdef DEBUG_IOPORT
1155 { CPU_LOG_IOPORT, "ioport",
1156 "show all i/o ports accesses" },
1157 #endif
1158 { 0, NULL, NULL },
1161 static int cmp1(const char *s1, int n, const char *s2)
1163 if (strlen(s2) != n)
1164 return 0;
1165 return memcmp(s1, s2, n) == 0;
1168 /* takes a comma separated list of log masks. Return 0 if error. */
1169 int cpu_str_to_log_mask(const char *str)
1171 CPULogItem *item;
1172 int mask;
1173 const char *p, *p1;
1175 p = str;
1176 mask = 0;
1177 for(;;) {
1178 p1 = strchr(p, ',');
1179 if (!p1)
1180 p1 = p + strlen(p);
1181 if(cmp1(p,p1-p,"all")) {
1182 for(item = cpu_log_items; item->mask != 0; item++) {
1183 mask |= item->mask;
1185 } else {
1186 for(item = cpu_log_items; item->mask != 0; item++) {
1187 if (cmp1(p, p1 - p, item->name))
1188 goto found;
1190 return 0;
1192 found:
1193 mask |= item->mask;
1194 if (*p1 != ',')
1195 break;
1196 p = p1 + 1;
1198 return mask;
1201 void cpu_abort(CPUState *env, const char *fmt, ...)
1203 va_list ap;
1205 va_start(ap, fmt);
1206 fprintf(stderr, "qemu: fatal: ");
1207 vfprintf(stderr, fmt, ap);
1208 fprintf(stderr, "\n");
1209 #ifdef TARGET_I386
1210 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1211 #else
1212 cpu_dump_state(env, stderr, fprintf, 0);
1213 #endif
1214 va_end(ap);
1215 abort();
1218 #if !defined(CONFIG_USER_ONLY)
1220 /* NOTE: if flush_global is true, also flush global entries (not
1221 implemented yet) */
1222 void tlb_flush(CPUState *env, int flush_global)
1224 int i;
1226 #if defined(DEBUG_TLB)
1227 printf("tlb_flush:\n");
1228 #endif
1229 /* must reset current TB so that interrupts cannot modify the
1230 links while we are modifying them */
1231 env->current_tb = NULL;
1233 for(i = 0; i < CPU_TLB_SIZE; i++) {
1234 env->tlb_table[0][i].addr_read = -1;
1235 env->tlb_table[0][i].addr_write = -1;
1236 env->tlb_table[0][i].addr_code = -1;
1237 env->tlb_table[1][i].addr_read = -1;
1238 env->tlb_table[1][i].addr_write = -1;
1239 env->tlb_table[1][i].addr_code = -1;
1242 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1244 #if !defined(CONFIG_SOFTMMU)
1245 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1246 #endif
1247 #ifdef USE_KQEMU
1248 if (env->kqemu_enabled) {
1249 kqemu_flush(env, flush_global);
1251 #endif
1252 tlb_flush_count++;
1255 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1257 if (addr == (tlb_entry->addr_read &
1258 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1259 addr == (tlb_entry->addr_write &
1260 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1261 addr == (tlb_entry->addr_code &
1262 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1263 tlb_entry->addr_read = -1;
1264 tlb_entry->addr_write = -1;
1265 tlb_entry->addr_code = -1;
1269 void tlb_flush_page(CPUState *env, target_ulong addr)
1271 int i;
1272 TranslationBlock *tb;
1274 #if defined(DEBUG_TLB)
1275 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1276 #endif
1277 /* must reset current TB so that interrupts cannot modify the
1278 links while we are modifying them */
1279 env->current_tb = NULL;
1281 addr &= TARGET_PAGE_MASK;
1282 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1283 tlb_flush_entry(&env->tlb_table[0][i], addr);
1284 tlb_flush_entry(&env->tlb_table[1][i], addr);
1286 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1287 tb = env->tb_jmp_cache[i];
1288 if (tb &&
1289 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1290 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1291 env->tb_jmp_cache[i] = NULL;
1295 #if !defined(CONFIG_SOFTMMU)
1296 if (addr < MMAP_AREA_END)
1297 munmap((void *)addr, TARGET_PAGE_SIZE);
1298 #endif
1299 #ifdef USE_KQEMU
1300 if (env->kqemu_enabled) {
1301 kqemu_flush_page(env, addr);
1303 #endif
1306 /* update the TLBs so that writes to code in the virtual page 'addr'
1307 can be detected */
1308 static void tlb_protect_code(ram_addr_t ram_addr)
1310 cpu_physical_memory_reset_dirty(ram_addr,
1311 ram_addr + TARGET_PAGE_SIZE,
1312 CODE_DIRTY_FLAG);
1315 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1316 tested for self modifying code */
1317 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1318 target_ulong vaddr)
1320 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1323 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1324 unsigned long start, unsigned long length)
1326 unsigned long addr;
1327 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1328 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1329 if ((addr - start) < length) {
1330 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1335 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1336 int dirty_flags)
1338 CPUState *env;
1339 unsigned long length, start1;
1340 int i, mask, len;
1341 uint8_t *p;
1343 start &= TARGET_PAGE_MASK;
1344 end = TARGET_PAGE_ALIGN(end);
1346 length = end - start;
1347 if (length == 0)
1348 return;
1349 len = length >> TARGET_PAGE_BITS;
1350 #ifdef USE_KQEMU
1351 /* XXX: should not depend on cpu context */
1352 env = first_cpu;
1353 if (env->kqemu_enabled) {
1354 ram_addr_t addr;
1355 addr = start;
1356 for(i = 0; i < len; i++) {
1357 kqemu_set_notdirty(env, addr);
1358 addr += TARGET_PAGE_SIZE;
1361 #endif
1362 mask = ~dirty_flags;
1363 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1364 for(i = 0; i < len; i++)
1365 p[i] &= mask;
1367 /* we modify the TLB cache so that the dirty bit will be set again
1368 when accessing the range */
1369 start1 = start + (unsigned long)phys_ram_base;
1370 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1371 for(i = 0; i < CPU_TLB_SIZE; i++)
1372 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1373 for(i = 0; i < CPU_TLB_SIZE; i++)
1374 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1377 #if !defined(CONFIG_SOFTMMU)
1378 /* XXX: this is expensive */
1380 VirtPageDesc *p;
1381 int j;
1382 target_ulong addr;
1384 for(i = 0; i < L1_SIZE; i++) {
1385 p = l1_virt_map[i];
1386 if (p) {
1387 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1388 for(j = 0; j < L2_SIZE; j++) {
1389 if (p->valid_tag == virt_valid_tag &&
1390 p->phys_addr >= start && p->phys_addr < end &&
1391 (p->prot & PROT_WRITE)) {
1392 if (addr < MMAP_AREA_END) {
1393 mprotect((void *)addr, TARGET_PAGE_SIZE,
1394 p->prot & ~PROT_WRITE);
1397 addr += TARGET_PAGE_SIZE;
1398 p++;
1403 #endif
1406 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1408 ram_addr_t ram_addr;
1410 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1411 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1412 tlb_entry->addend - (unsigned long)phys_ram_base;
1413 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1414 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1419 /* update the TLB according to the current state of the dirty bits */
1420 void cpu_tlb_update_dirty(CPUState *env)
1422 int i;
1423 for(i = 0; i < CPU_TLB_SIZE; i++)
1424 tlb_update_dirty(&env->tlb_table[0][i]);
1425 for(i = 0; i < CPU_TLB_SIZE; i++)
1426 tlb_update_dirty(&env->tlb_table[1][i]);
1429 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1430 unsigned long start)
1432 unsigned long addr;
1433 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1434 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1435 if (addr == start) {
1436 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1441 /* update the TLB corresponding to virtual page vaddr and phys addr
1442 addr so that it is no longer dirty */
1443 static inline void tlb_set_dirty(CPUState *env,
1444 unsigned long addr, target_ulong vaddr)
1446 int i;
1448 addr &= TARGET_PAGE_MASK;
1449 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1450 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1451 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1454 /* add a new TLB entry. At most one entry for a given virtual address
1455 is permitted. Return 0 if OK or 2 if the page could not be mapped
1456 (can only happen in non SOFTMMU mode for I/O pages or pages
1457 conflicting with the host address space). */
1458 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1459 target_phys_addr_t paddr, int prot,
1460 int is_user, int is_softmmu)
1462 PhysPageDesc *p;
1463 unsigned long pd;
1464 unsigned int index;
1465 target_ulong address;
1466 target_phys_addr_t addend;
1467 int ret;
1468 CPUTLBEntry *te;
1470 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1471 if (!p) {
1472 pd = IO_MEM_UNASSIGNED;
1473 } else {
1474 pd = p->phys_offset;
1476 #if defined(DEBUG_TLB)
1477 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1478 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1479 #endif
1481 ret = 0;
1482 #if !defined(CONFIG_SOFTMMU)
1483 if (is_softmmu)
1484 #endif
1486 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1487 /* IO memory case */
1488 address = vaddr | pd;
1489 addend = paddr;
1490 } else {
1491 /* standard memory */
1492 address = vaddr;
1493 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1496 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1497 addend -= vaddr;
1498 te = &env->tlb_table[is_user][index];
1499 te->addend = addend;
1500 if (prot & PAGE_READ) {
1501 te->addr_read = address;
1502 } else {
1503 te->addr_read = -1;
1505 if (prot & PAGE_EXEC) {
1506 te->addr_code = address;
1507 } else {
1508 te->addr_code = -1;
1510 if (prot & PAGE_WRITE) {
1511 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1512 /* ROM: access is ignored (same as unassigned) */
1513 te->addr_write = vaddr | IO_MEM_ROM;
1514 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1515 !cpu_physical_memory_is_dirty(pd)) {
1516 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1517 } else {
1518 te->addr_write = address;
1520 } else {
1521 te->addr_write = -1;
1524 #if !defined(CONFIG_SOFTMMU)
1525 else {
1526 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1527 /* IO access: no mapping is done as it will be handled by the
1528 soft MMU */
1529 if (!(env->hflags & HF_SOFTMMU_MASK))
1530 ret = 2;
1531 } else {
1532 void *map_addr;
1534 if (vaddr >= MMAP_AREA_END) {
1535 ret = 2;
1536 } else {
1537 if (prot & PROT_WRITE) {
1538 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1539 #if defined(TARGET_HAS_SMC) || 1
1540 first_tb ||
1541 #endif
1542 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1543 !cpu_physical_memory_is_dirty(pd))) {
1544 /* ROM: we do as if code was inside */
1545 /* if code is present, we only map as read only and save the
1546 original mapping */
1547 VirtPageDesc *vp;
1549 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1550 vp->phys_addr = pd;
1551 vp->prot = prot;
1552 vp->valid_tag = virt_valid_tag;
1553 prot &= ~PAGE_WRITE;
1556 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1557 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1558 if (map_addr == MAP_FAILED) {
1559 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1560 paddr, vaddr);
1565 #endif
1566 return ret;
1569 /* called from signal handler: invalidate the code and unprotect the
1570 page. Return TRUE if the fault was succesfully handled. */
1571 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1573 #if !defined(CONFIG_SOFTMMU)
1574 VirtPageDesc *vp;
1576 #if defined(DEBUG_TLB)
1577 printf("page_unprotect: addr=0x%08x\n", addr);
1578 #endif
1579 addr &= TARGET_PAGE_MASK;
1581 /* if it is not mapped, no need to worry here */
1582 if (addr >= MMAP_AREA_END)
1583 return 0;
1584 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1585 if (!vp)
1586 return 0;
1587 /* NOTE: in this case, validate_tag is _not_ tested as it
1588 validates only the code TLB */
1589 if (vp->valid_tag != virt_valid_tag)
1590 return 0;
1591 if (!(vp->prot & PAGE_WRITE))
1592 return 0;
1593 #if defined(DEBUG_TLB)
1594 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1595 addr, vp->phys_addr, vp->prot);
1596 #endif
1597 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1598 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1599 (unsigned long)addr, vp->prot);
1600 /* set the dirty bit */
1601 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1602 /* flush the code inside */
1603 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1604 return 1;
1605 #else
1606 return 0;
1607 #endif
1610 #else
1612 void tlb_flush(CPUState *env, int flush_global)
1616 void tlb_flush_page(CPUState *env, target_ulong addr)
1620 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1621 target_phys_addr_t paddr, int prot,
1622 int is_user, int is_softmmu)
1624 return 0;
1627 /* dump memory mappings */
1628 void page_dump(FILE *f)
1630 unsigned long start, end;
1631 int i, j, prot, prot1;
1632 PageDesc *p;
1634 fprintf(f, "%-8s %-8s %-8s %s\n",
1635 "start", "end", "size", "prot");
1636 start = -1;
1637 end = -1;
1638 prot = 0;
1639 for(i = 0; i <= L1_SIZE; i++) {
1640 if (i < L1_SIZE)
1641 p = l1_map[i];
1642 else
1643 p = NULL;
1644 for(j = 0;j < L2_SIZE; j++) {
1645 if (!p)
1646 prot1 = 0;
1647 else
1648 prot1 = p[j].flags;
1649 if (prot1 != prot) {
1650 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1651 if (start != -1) {
1652 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1653 start, end, end - start,
1654 prot & PAGE_READ ? 'r' : '-',
1655 prot & PAGE_WRITE ? 'w' : '-',
1656 prot & PAGE_EXEC ? 'x' : '-');
1658 if (prot1 != 0)
1659 start = end;
1660 else
1661 start = -1;
1662 prot = prot1;
1664 if (!p)
1665 break;
1670 int page_get_flags(target_ulong address)
1672 PageDesc *p;
1674 p = page_find(address >> TARGET_PAGE_BITS);
1675 if (!p)
1676 return 0;
1677 return p->flags;
1680 /* modify the flags of a page and invalidate the code if
1681 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1682 depending on PAGE_WRITE */
1683 void page_set_flags(target_ulong start, target_ulong end, int flags)
1685 PageDesc *p;
1686 target_ulong addr;
1688 start = start & TARGET_PAGE_MASK;
1689 end = TARGET_PAGE_ALIGN(end);
1690 if (flags & PAGE_WRITE)
1691 flags |= PAGE_WRITE_ORG;
1692 spin_lock(&tb_lock);
1693 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1694 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1695 /* if the write protection is set, then we invalidate the code
1696 inside */
1697 if (!(p->flags & PAGE_WRITE) &&
1698 (flags & PAGE_WRITE) &&
1699 p->first_tb) {
1700 tb_invalidate_phys_page(addr, 0, NULL);
1702 p->flags = flags;
1704 spin_unlock(&tb_lock);
1707 /* called from signal handler: invalidate the code and unprotect the
1708 page. Return TRUE if the fault was succesfully handled. */
1709 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1711 unsigned int page_index, prot, pindex;
1712 PageDesc *p, *p1;
1713 target_ulong host_start, host_end, addr;
1715 host_start = address & qemu_host_page_mask;
1716 page_index = host_start >> TARGET_PAGE_BITS;
1717 p1 = page_find(page_index);
1718 if (!p1)
1719 return 0;
1720 host_end = host_start + qemu_host_page_size;
1721 p = p1;
1722 prot = 0;
1723 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1724 prot |= p->flags;
1725 p++;
1727 /* if the page was really writable, then we change its
1728 protection back to writable */
1729 if (prot & PAGE_WRITE_ORG) {
1730 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1731 if (!(p1[pindex].flags & PAGE_WRITE)) {
1732 mprotect((void *)g2h(host_start), qemu_host_page_size,
1733 (prot & PAGE_BITS) | PAGE_WRITE);
1734 p1[pindex].flags |= PAGE_WRITE;
1735 /* and since the content will be modified, we must invalidate
1736 the corresponding translated code. */
1737 tb_invalidate_phys_page(address, pc, puc);
1738 #ifdef DEBUG_TB_CHECK
1739 tb_invalidate_check(address);
1740 #endif
1741 return 1;
1744 return 0;
1747 /* call this function when system calls directly modify a memory area */
1748 /* ??? This should be redundant now we have lock_user. */
1749 void page_unprotect_range(target_ulong data, target_ulong data_size)
1751 target_ulong start, end, addr;
1753 start = data;
1754 end = start + data_size;
1755 start &= TARGET_PAGE_MASK;
1756 end = TARGET_PAGE_ALIGN(end);
1757 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1758 page_unprotect(addr, 0, NULL);
1762 static inline void tlb_set_dirty(CPUState *env,
1763 unsigned long addr, target_ulong vaddr)
1766 #endif /* defined(CONFIG_USER_ONLY) */
1768 /* register physical memory. 'size' must be a multiple of the target
1769 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1770 io memory page */
1771 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1772 unsigned long size,
1773 unsigned long phys_offset)
1775 target_phys_addr_t addr, end_addr;
1776 PhysPageDesc *p;
1778 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1779 end_addr = start_addr + size;
1780 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1781 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1782 p->phys_offset = phys_offset;
1783 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1784 phys_offset += TARGET_PAGE_SIZE;
1788 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1790 return 0;
1793 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1797 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1798 unassigned_mem_readb,
1799 unassigned_mem_readb,
1800 unassigned_mem_readb,
1803 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1804 unassigned_mem_writeb,
1805 unassigned_mem_writeb,
1806 unassigned_mem_writeb,
1809 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1811 unsigned long ram_addr;
1812 int dirty_flags;
1813 ram_addr = addr - (unsigned long)phys_ram_base;
1814 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1815 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1816 #if !defined(CONFIG_USER_ONLY)
1817 tb_invalidate_phys_page_fast(ram_addr, 1);
1818 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1819 #endif
1821 stb_p((uint8_t *)(long)addr, val);
1822 #ifdef USE_KQEMU
1823 if (cpu_single_env->kqemu_enabled &&
1824 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1825 kqemu_modify_page(cpu_single_env, ram_addr);
1826 #endif
1827 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1828 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1829 /* we remove the notdirty callback only if the code has been
1830 flushed */
1831 if (dirty_flags == 0xff)
1832 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1835 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1837 unsigned long ram_addr;
1838 int dirty_flags;
1839 ram_addr = addr - (unsigned long)phys_ram_base;
1840 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1841 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1842 #if !defined(CONFIG_USER_ONLY)
1843 tb_invalidate_phys_page_fast(ram_addr, 2);
1844 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1845 #endif
1847 stw_p((uint8_t *)(long)addr, val);
1848 #ifdef USE_KQEMU
1849 if (cpu_single_env->kqemu_enabled &&
1850 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1851 kqemu_modify_page(cpu_single_env, ram_addr);
1852 #endif
1853 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1854 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1855 /* we remove the notdirty callback only if the code has been
1856 flushed */
1857 if (dirty_flags == 0xff)
1858 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1861 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1863 unsigned long ram_addr;
1864 int dirty_flags;
1865 ram_addr = addr - (unsigned long)phys_ram_base;
1866 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1867 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1868 #if !defined(CONFIG_USER_ONLY)
1869 tb_invalidate_phys_page_fast(ram_addr, 4);
1870 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1871 #endif
1873 stl_p((uint8_t *)(long)addr, val);
1874 #ifdef USE_KQEMU
1875 if (cpu_single_env->kqemu_enabled &&
1876 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1877 kqemu_modify_page(cpu_single_env, ram_addr);
1878 #endif
1879 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1880 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1881 /* we remove the notdirty callback only if the code has been
1882 flushed */
1883 if (dirty_flags == 0xff)
1884 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1887 static CPUReadMemoryFunc *error_mem_read[3] = {
1888 NULL, /* never used */
1889 NULL, /* never used */
1890 NULL, /* never used */
1893 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1894 notdirty_mem_writeb,
1895 notdirty_mem_writew,
1896 notdirty_mem_writel,
1899 static void io_mem_init(void)
1901 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1902 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1903 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1904 io_mem_nb = 5;
1906 /* alloc dirty bits array */
1907 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1908 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1911 /* mem_read and mem_write are arrays of functions containing the
1912 function to access byte (index 0), word (index 1) and dword (index
1913 2). All functions must be supplied. If io_index is non zero, the
1914 corresponding io zone is modified. If it is zero, a new io zone is
1915 allocated. The return value can be used with
1916 cpu_register_physical_memory(). (-1) is returned if error. */
1917 int cpu_register_io_memory(int io_index,
1918 CPUReadMemoryFunc **mem_read,
1919 CPUWriteMemoryFunc **mem_write,
1920 void *opaque)
1922 int i;
1924 if (io_index <= 0) {
1925 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1926 return -1;
1927 io_index = io_mem_nb++;
1928 } else {
1929 if (io_index >= IO_MEM_NB_ENTRIES)
1930 return -1;
1933 for(i = 0;i < 3; i++) {
1934 io_mem_read[io_index][i] = mem_read[i];
1935 io_mem_write[io_index][i] = mem_write[i];
1937 io_mem_opaque[io_index] = opaque;
1938 return io_index << IO_MEM_SHIFT;
1941 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1943 return io_mem_write[io_index >> IO_MEM_SHIFT];
1946 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1948 return io_mem_read[io_index >> IO_MEM_SHIFT];
1951 /* physical memory access (slow version, mainly for debug) */
1952 #if defined(CONFIG_USER_ONLY)
1953 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1954 int len, int is_write)
1956 int l, flags;
1957 target_ulong page;
1958 void * p;
1960 while (len > 0) {
1961 page = addr & TARGET_PAGE_MASK;
1962 l = (page + TARGET_PAGE_SIZE) - addr;
1963 if (l > len)
1964 l = len;
1965 flags = page_get_flags(page);
1966 if (!(flags & PAGE_VALID))
1967 return;
1968 if (is_write) {
1969 if (!(flags & PAGE_WRITE))
1970 return;
1971 p = lock_user(addr, len, 0);
1972 memcpy(p, buf, len);
1973 unlock_user(p, addr, len);
1974 } else {
1975 if (!(flags & PAGE_READ))
1976 return;
1977 p = lock_user(addr, len, 1);
1978 memcpy(buf, p, len);
1979 unlock_user(p, addr, 0);
1981 len -= l;
1982 buf += l;
1983 addr += l;
1987 #else
1988 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1989 int len, int is_write)
1991 int l, io_index;
1992 uint8_t *ptr;
1993 uint32_t val;
1994 target_phys_addr_t page;
1995 unsigned long pd;
1996 PhysPageDesc *p;
1998 while (len > 0) {
1999 page = addr & TARGET_PAGE_MASK;
2000 l = (page + TARGET_PAGE_SIZE) - addr;
2001 if (l > len)
2002 l = len;
2003 p = phys_page_find(page >> TARGET_PAGE_BITS);
2004 if (!p) {
2005 pd = IO_MEM_UNASSIGNED;
2006 } else {
2007 pd = p->phys_offset;
2010 if (is_write) {
2011 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2012 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2013 /* XXX: could force cpu_single_env to NULL to avoid
2014 potential bugs */
2015 if (l >= 4 && ((addr & 3) == 0)) {
2016 /* 32 bit write access */
2017 val = ldl_p(buf);
2018 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2019 l = 4;
2020 } else if (l >= 2 && ((addr & 1) == 0)) {
2021 /* 16 bit write access */
2022 val = lduw_p(buf);
2023 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2024 l = 2;
2025 } else {
2026 /* 8 bit write access */
2027 val = ldub_p(buf);
2028 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2029 l = 1;
2031 } else {
2032 unsigned long addr1;
2033 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2034 /* RAM case */
2035 ptr = phys_ram_base + addr1;
2036 memcpy(ptr, buf, l);
2037 if (!cpu_physical_memory_is_dirty(addr1)) {
2038 /* invalidate code */
2039 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2040 /* set dirty bit */
2041 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2042 (0xff & ~CODE_DIRTY_FLAG);
2045 } else {
2046 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2047 /* I/O case */
2048 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2049 if (l >= 4 && ((addr & 3) == 0)) {
2050 /* 32 bit read access */
2051 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2052 stl_p(buf, val);
2053 l = 4;
2054 } else if (l >= 2 && ((addr & 1) == 0)) {
2055 /* 16 bit read access */
2056 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2057 stw_p(buf, val);
2058 l = 2;
2059 } else {
2060 /* 8 bit read access */
2061 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2062 stb_p(buf, val);
2063 l = 1;
2065 } else {
2066 /* RAM case */
2067 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2068 (addr & ~TARGET_PAGE_MASK);
2069 memcpy(buf, ptr, l);
2072 len -= l;
2073 buf += l;
2074 addr += l;
2078 /* warning: addr must be aligned */
2079 uint32_t ldl_phys(target_phys_addr_t addr)
2081 int io_index;
2082 uint8_t *ptr;
2083 uint32_t val;
2084 unsigned long pd;
2085 PhysPageDesc *p;
2087 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2088 if (!p) {
2089 pd = IO_MEM_UNASSIGNED;
2090 } else {
2091 pd = p->phys_offset;
2094 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2095 /* I/O case */
2096 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2097 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2098 } else {
2099 /* RAM case */
2100 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2101 (addr & ~TARGET_PAGE_MASK);
2102 val = ldl_p(ptr);
2104 return val;
2107 /* warning: addr must be aligned */
2108 uint64_t ldq_phys(target_phys_addr_t addr)
2110 int io_index;
2111 uint8_t *ptr;
2112 uint64_t val;
2113 unsigned long pd;
2114 PhysPageDesc *p;
2116 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2117 if (!p) {
2118 pd = IO_MEM_UNASSIGNED;
2119 } else {
2120 pd = p->phys_offset;
2123 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2124 /* I/O case */
2125 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2126 #ifdef TARGET_WORDS_BIGENDIAN
2127 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2128 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2129 #else
2130 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2131 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2132 #endif
2133 } else {
2134 /* RAM case */
2135 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2136 (addr & ~TARGET_PAGE_MASK);
2137 val = ldq_p(ptr);
2139 return val;
2142 /* XXX: optimize */
2143 uint32_t ldub_phys(target_phys_addr_t addr)
2145 uint8_t val;
2146 cpu_physical_memory_read(addr, &val, 1);
2147 return val;
2150 /* XXX: optimize */
2151 uint32_t lduw_phys(target_phys_addr_t addr)
2153 uint16_t val;
2154 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2155 return tswap16(val);
2158 /* warning: addr must be aligned. The ram page is not masked as dirty
2159 and the code inside is not invalidated. It is useful if the dirty
2160 bits are used to track modified PTEs */
2161 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2163 int io_index;
2164 uint8_t *ptr;
2165 unsigned long pd;
2166 PhysPageDesc *p;
2168 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2169 if (!p) {
2170 pd = IO_MEM_UNASSIGNED;
2171 } else {
2172 pd = p->phys_offset;
2175 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2176 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2177 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2178 } else {
2179 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2180 (addr & ~TARGET_PAGE_MASK);
2181 stl_p(ptr, val);
2185 /* warning: addr must be aligned */
2186 void stl_phys(target_phys_addr_t addr, uint32_t val)
2188 int io_index;
2189 uint8_t *ptr;
2190 unsigned long pd;
2191 PhysPageDesc *p;
2193 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2194 if (!p) {
2195 pd = IO_MEM_UNASSIGNED;
2196 } else {
2197 pd = p->phys_offset;
2200 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2201 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2202 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2203 } else {
2204 unsigned long addr1;
2205 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2206 /* RAM case */
2207 ptr = phys_ram_base + addr1;
2208 stl_p(ptr, val);
2209 if (!cpu_physical_memory_is_dirty(addr1)) {
2210 /* invalidate code */
2211 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2212 /* set dirty bit */
2213 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2214 (0xff & ~CODE_DIRTY_FLAG);
2219 /* XXX: optimize */
2220 void stb_phys(target_phys_addr_t addr, uint32_t val)
2222 uint8_t v = val;
2223 cpu_physical_memory_write(addr, &v, 1);
2226 /* XXX: optimize */
2227 void stw_phys(target_phys_addr_t addr, uint32_t val)
2229 uint16_t v = tswap16(val);
2230 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2233 /* XXX: optimize */
2234 void stq_phys(target_phys_addr_t addr, uint64_t val)
2236 val = tswap64(val);
2237 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2240 #endif
2242 /* virtual memory access for debug */
2243 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2244 uint8_t *buf, int len, int is_write)
2246 int l;
2247 target_ulong page, phys_addr;
2249 while (len > 0) {
2250 page = addr & TARGET_PAGE_MASK;
2251 phys_addr = cpu_get_phys_page_debug(env, page);
2252 /* if no physical page mapped, return an error */
2253 if (phys_addr == -1)
2254 return -1;
2255 l = (page + TARGET_PAGE_SIZE) - addr;
2256 if (l > len)
2257 l = len;
2258 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2259 buf, l, is_write);
2260 len -= l;
2261 buf += l;
2262 addr += l;
2264 return 0;
2267 void dump_exec_info(FILE *f,
2268 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2270 int i, target_code_size, max_target_code_size;
2271 int direct_jmp_count, direct_jmp2_count, cross_page;
2272 TranslationBlock *tb;
2274 target_code_size = 0;
2275 max_target_code_size = 0;
2276 cross_page = 0;
2277 direct_jmp_count = 0;
2278 direct_jmp2_count = 0;
2279 for(i = 0; i < nb_tbs; i++) {
2280 tb = &tbs[i];
2281 target_code_size += tb->size;
2282 if (tb->size > max_target_code_size)
2283 max_target_code_size = tb->size;
2284 if (tb->page_addr[1] != -1)
2285 cross_page++;
2286 if (tb->tb_next_offset[0] != 0xffff) {
2287 direct_jmp_count++;
2288 if (tb->tb_next_offset[1] != 0xffff) {
2289 direct_jmp2_count++;
2293 /* XXX: avoid using doubles ? */
2294 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2295 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2296 nb_tbs ? target_code_size / nb_tbs : 0,
2297 max_target_code_size);
2298 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2299 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2300 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2301 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2302 cross_page,
2303 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2304 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2305 direct_jmp_count,
2306 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2307 direct_jmp2_count,
2308 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2309 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2310 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2311 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2314 #if !defined(CONFIG_USER_ONLY)
2316 #define MMUSUFFIX _cmmu
2317 #define GETPC() NULL
2318 #define env cpu_single_env
2319 #define SOFTMMU_CODE_ACCESS
2321 #define SHIFT 0
2322 #include "softmmu_template.h"
2324 #define SHIFT 1
2325 #include "softmmu_template.h"
2327 #define SHIFT 2
2328 #include "softmmu_template.h"
2330 #define SHIFT 3
2331 #include "softmmu_template.h"
2333 #undef env
2335 #endif