Update USB documentation.
[qemu/mini2440.git] / exec.c
blobf900e09f007fc4786f9ae3b061c25d36f2d94a22
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
45 /* make various TB consistency checks */
46 //#define DEBUG_TB_CHECK
47 //#define DEBUG_TLB_CHECK
49 #if !defined(CONFIG_USER_ONLY)
50 /* TB consistency checks only implemented for usermode emulation. */
51 #undef DEBUG_TB_CHECK
52 #endif
54 /* threshold to flush the translated code buffer */
55 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
57 #define SMC_BITMAP_USE_THRESHOLD 10
59 #define MMAP_AREA_START 0x00000000
60 #define MMAP_AREA_END 0xa8000000
62 #if defined(TARGET_SPARC64)
63 #define TARGET_PHYS_ADDR_SPACE_BITS 41
64 #elif defined(TARGET_PPC64)
65 #define TARGET_PHYS_ADDR_SPACE_BITS 42
66 #else
67 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
68 #define TARGET_PHYS_ADDR_SPACE_BITS 32
69 #endif
71 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
72 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
73 int nb_tbs;
74 /* any access to the tbs or the page table must use this lock */
75 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
77 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
78 uint8_t *code_gen_ptr;
80 int phys_ram_size;
81 int phys_ram_fd;
82 uint8_t *phys_ram_base;
83 uint8_t *phys_ram_dirty;
85 CPUState *first_cpu;
86 /* current CPU in the current thread. It is only valid inside
87 cpu_exec() */
88 CPUState *cpu_single_env;
90 typedef struct PageDesc {
91 /* list of TBs intersecting this ram page */
92 TranslationBlock *first_tb;
93 /* in order to optimize self modifying code, we count the number
94 of lookups we do to a given page to use a bitmap */
95 unsigned int code_write_count;
96 uint8_t *code_bitmap;
97 #if defined(CONFIG_USER_ONLY)
98 unsigned long flags;
99 #endif
100 } PageDesc;
102 typedef struct PhysPageDesc {
103 /* offset in host memory of the page + io_index in the low 12 bits */
104 uint32_t phys_offset;
105 } PhysPageDesc;
107 #define L2_BITS 10
108 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
110 #define L1_SIZE (1 << L1_BITS)
111 #define L2_SIZE (1 << L2_BITS)
113 static void io_mem_init(void);
115 unsigned long qemu_real_host_page_size;
116 unsigned long qemu_host_page_bits;
117 unsigned long qemu_host_page_size;
118 unsigned long qemu_host_page_mask;
120 /* XXX: for system emulation, it could just be an array */
121 static PageDesc *l1_map[L1_SIZE];
122 PhysPageDesc **l1_phys_map;
124 /* io memory support */
125 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
126 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
127 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
128 static int io_mem_nb;
130 /* log support */
131 char *logfilename = "/tmp/qemu.log";
132 FILE *logfile;
133 int loglevel;
135 /* statistics */
136 static int tlb_flush_count;
137 static int tb_flush_count;
138 static int tb_phys_invalidate_count;
140 static void page_init(void)
142 /* NOTE: we can always suppose that qemu_host_page_size >=
143 TARGET_PAGE_SIZE */
144 #ifdef _WIN32
146 SYSTEM_INFO system_info;
147 DWORD old_protect;
149 GetSystemInfo(&system_info);
150 qemu_real_host_page_size = system_info.dwPageSize;
152 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
153 PAGE_EXECUTE_READWRITE, &old_protect);
155 #else
156 qemu_real_host_page_size = getpagesize();
158 unsigned long start, end;
160 start = (unsigned long)code_gen_buffer;
161 start &= ~(qemu_real_host_page_size - 1);
163 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
164 end += qemu_real_host_page_size - 1;
165 end &= ~(qemu_real_host_page_size - 1);
167 mprotect((void *)start, end - start,
168 PROT_READ | PROT_WRITE | PROT_EXEC);
170 #endif
172 if (qemu_host_page_size == 0)
173 qemu_host_page_size = qemu_real_host_page_size;
174 if (qemu_host_page_size < TARGET_PAGE_SIZE)
175 qemu_host_page_size = TARGET_PAGE_SIZE;
176 qemu_host_page_bits = 0;
177 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
178 qemu_host_page_bits++;
179 qemu_host_page_mask = ~(qemu_host_page_size - 1);
180 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
181 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
184 static inline PageDesc *page_find_alloc(unsigned int index)
186 PageDesc **lp, *p;
188 lp = &l1_map[index >> L2_BITS];
189 p = *lp;
190 if (!p) {
191 /* allocate if not found */
192 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
193 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
194 *lp = p;
196 return p + (index & (L2_SIZE - 1));
199 static inline PageDesc *page_find(unsigned int index)
201 PageDesc *p;
203 p = l1_map[index >> L2_BITS];
204 if (!p)
205 return 0;
206 return p + (index & (L2_SIZE - 1));
209 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
211 void **lp, **p;
212 PhysPageDesc *pd;
214 p = (void **)l1_phys_map;
215 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
217 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
218 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
219 #endif
220 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
221 p = *lp;
222 if (!p) {
223 /* allocate if not found */
224 if (!alloc)
225 return NULL;
226 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
227 memset(p, 0, sizeof(void *) * L1_SIZE);
228 *lp = p;
230 #endif
231 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
232 pd = *lp;
233 if (!pd) {
234 int i;
235 /* allocate if not found */
236 if (!alloc)
237 return NULL;
238 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
239 *lp = pd;
240 for (i = 0; i < L2_SIZE; i++)
241 pd[i].phys_offset = IO_MEM_UNASSIGNED;
243 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
246 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
248 return phys_page_find_alloc(index, 0);
251 #if !defined(CONFIG_USER_ONLY)
252 static void tlb_protect_code(ram_addr_t ram_addr);
253 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
254 target_ulong vaddr);
255 #endif
257 void cpu_exec_init(CPUState *env)
259 CPUState **penv;
260 int cpu_index;
262 if (!code_gen_ptr) {
263 code_gen_ptr = code_gen_buffer;
264 page_init();
265 io_mem_init();
267 env->next_cpu = NULL;
268 penv = &first_cpu;
269 cpu_index = 0;
270 while (*penv != NULL) {
271 penv = (CPUState **)&(*penv)->next_cpu;
272 cpu_index++;
274 env->cpu_index = cpu_index;
275 *penv = env;
278 static inline void invalidate_page_bitmap(PageDesc *p)
280 if (p->code_bitmap) {
281 qemu_free(p->code_bitmap);
282 p->code_bitmap = NULL;
284 p->code_write_count = 0;
287 /* set to NULL all the 'first_tb' fields in all PageDescs */
288 static void page_flush_tb(void)
290 int i, j;
291 PageDesc *p;
293 for(i = 0; i < L1_SIZE; i++) {
294 p = l1_map[i];
295 if (p) {
296 for(j = 0; j < L2_SIZE; j++) {
297 p->first_tb = NULL;
298 invalidate_page_bitmap(p);
299 p++;
305 /* flush all the translation blocks */
306 /* XXX: tb_flush is currently not thread safe */
307 void tb_flush(CPUState *env1)
309 CPUState *env;
310 #if defined(DEBUG_FLUSH)
311 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
312 code_gen_ptr - code_gen_buffer,
313 nb_tbs,
314 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
315 #endif
316 nb_tbs = 0;
318 for(env = first_cpu; env != NULL; env = env->next_cpu) {
319 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
322 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
323 page_flush_tb();
325 code_gen_ptr = code_gen_buffer;
326 /* XXX: flush processor icache at this point if cache flush is
327 expensive */
328 tb_flush_count++;
331 #ifdef DEBUG_TB_CHECK
333 static void tb_invalidate_check(unsigned long address)
335 TranslationBlock *tb;
336 int i;
337 address &= TARGET_PAGE_MASK;
338 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
339 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
340 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
341 address >= tb->pc + tb->size)) {
342 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
343 address, (long)tb->pc, tb->size);
349 /* verify that all the pages have correct rights for code */
350 static void tb_page_check(void)
352 TranslationBlock *tb;
353 int i, flags1, flags2;
355 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
356 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
357 flags1 = page_get_flags(tb->pc);
358 flags2 = page_get_flags(tb->pc + tb->size - 1);
359 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
360 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
361 (long)tb->pc, tb->size, flags1, flags2);
367 void tb_jmp_check(TranslationBlock *tb)
369 TranslationBlock *tb1;
370 unsigned int n1;
372 /* suppress any remaining jumps to this TB */
373 tb1 = tb->jmp_first;
374 for(;;) {
375 n1 = (long)tb1 & 3;
376 tb1 = (TranslationBlock *)((long)tb1 & ~3);
377 if (n1 == 2)
378 break;
379 tb1 = tb1->jmp_next[n1];
381 /* check end of list */
382 if (tb1 != tb) {
383 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
387 #endif
389 /* invalidate one TB */
390 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
391 int next_offset)
393 TranslationBlock *tb1;
394 for(;;) {
395 tb1 = *ptb;
396 if (tb1 == tb) {
397 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
398 break;
400 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
404 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
406 TranslationBlock *tb1;
407 unsigned int n1;
409 for(;;) {
410 tb1 = *ptb;
411 n1 = (long)tb1 & 3;
412 tb1 = (TranslationBlock *)((long)tb1 & ~3);
413 if (tb1 == tb) {
414 *ptb = tb1->page_next[n1];
415 break;
417 ptb = &tb1->page_next[n1];
421 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
423 TranslationBlock *tb1, **ptb;
424 unsigned int n1;
426 ptb = &tb->jmp_next[n];
427 tb1 = *ptb;
428 if (tb1) {
429 /* find tb(n) in circular list */
430 for(;;) {
431 tb1 = *ptb;
432 n1 = (long)tb1 & 3;
433 tb1 = (TranslationBlock *)((long)tb1 & ~3);
434 if (n1 == n && tb1 == tb)
435 break;
436 if (n1 == 2) {
437 ptb = &tb1->jmp_first;
438 } else {
439 ptb = &tb1->jmp_next[n1];
442 /* now we can suppress tb(n) from the list */
443 *ptb = tb->jmp_next[n];
445 tb->jmp_next[n] = NULL;
449 /* reset the jump entry 'n' of a TB so that it is not chained to
450 another TB */
451 static inline void tb_reset_jump(TranslationBlock *tb, int n)
453 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
456 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
458 CPUState *env;
459 PageDesc *p;
460 unsigned int h, n1;
461 target_ulong phys_pc;
462 TranslationBlock *tb1, *tb2;
464 /* remove the TB from the hash list */
465 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
466 h = tb_phys_hash_func(phys_pc);
467 tb_remove(&tb_phys_hash[h], tb,
468 offsetof(TranslationBlock, phys_hash_next));
470 /* remove the TB from the page list */
471 if (tb->page_addr[0] != page_addr) {
472 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
473 tb_page_remove(&p->first_tb, tb);
474 invalidate_page_bitmap(p);
476 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
477 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
478 tb_page_remove(&p->first_tb, tb);
479 invalidate_page_bitmap(p);
482 tb_invalidated_flag = 1;
484 /* remove the TB from the hash list */
485 h = tb_jmp_cache_hash_func(tb->pc);
486 for(env = first_cpu; env != NULL; env = env->next_cpu) {
487 if (env->tb_jmp_cache[h] == tb)
488 env->tb_jmp_cache[h] = NULL;
491 /* suppress this TB from the two jump lists */
492 tb_jmp_remove(tb, 0);
493 tb_jmp_remove(tb, 1);
495 /* suppress any remaining jumps to this TB */
496 tb1 = tb->jmp_first;
497 for(;;) {
498 n1 = (long)tb1 & 3;
499 if (n1 == 2)
500 break;
501 tb1 = (TranslationBlock *)((long)tb1 & ~3);
502 tb2 = tb1->jmp_next[n1];
503 tb_reset_jump(tb1, n1);
504 tb1->jmp_next[n1] = NULL;
505 tb1 = tb2;
507 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
509 tb_phys_invalidate_count++;
512 static inline void set_bits(uint8_t *tab, int start, int len)
514 int end, mask, end1;
516 end = start + len;
517 tab += start >> 3;
518 mask = 0xff << (start & 7);
519 if ((start & ~7) == (end & ~7)) {
520 if (start < end) {
521 mask &= ~(0xff << (end & 7));
522 *tab |= mask;
524 } else {
525 *tab++ |= mask;
526 start = (start + 8) & ~7;
527 end1 = end & ~7;
528 while (start < end1) {
529 *tab++ = 0xff;
530 start += 8;
532 if (start < end) {
533 mask = ~(0xff << (end & 7));
534 *tab |= mask;
539 static void build_page_bitmap(PageDesc *p)
541 int n, tb_start, tb_end;
542 TranslationBlock *tb;
544 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
545 if (!p->code_bitmap)
546 return;
547 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
549 tb = p->first_tb;
550 while (tb != NULL) {
551 n = (long)tb & 3;
552 tb = (TranslationBlock *)((long)tb & ~3);
553 /* NOTE: this is subtle as a TB may span two physical pages */
554 if (n == 0) {
555 /* NOTE: tb_end may be after the end of the page, but
556 it is not a problem */
557 tb_start = tb->pc & ~TARGET_PAGE_MASK;
558 tb_end = tb_start + tb->size;
559 if (tb_end > TARGET_PAGE_SIZE)
560 tb_end = TARGET_PAGE_SIZE;
561 } else {
562 tb_start = 0;
563 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
565 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
566 tb = tb->page_next[n];
570 #ifdef TARGET_HAS_PRECISE_SMC
572 static void tb_gen_code(CPUState *env,
573 target_ulong pc, target_ulong cs_base, int flags,
574 int cflags)
576 TranslationBlock *tb;
577 uint8_t *tc_ptr;
578 target_ulong phys_pc, phys_page2, virt_page2;
579 int code_gen_size;
581 phys_pc = get_phys_addr_code(env, pc);
582 tb = tb_alloc(pc);
583 if (!tb) {
584 /* flush must be done */
585 tb_flush(env);
586 /* cannot fail at this point */
587 tb = tb_alloc(pc);
589 tc_ptr = code_gen_ptr;
590 tb->tc_ptr = tc_ptr;
591 tb->cs_base = cs_base;
592 tb->flags = flags;
593 tb->cflags = cflags;
594 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
595 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
597 /* check next page if needed */
598 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
599 phys_page2 = -1;
600 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
601 phys_page2 = get_phys_addr_code(env, virt_page2);
603 tb_link_phys(tb, phys_pc, phys_page2);
605 #endif
607 /* invalidate all TBs which intersect with the target physical page
608 starting in range [start;end[. NOTE: start and end must refer to
609 the same physical page. 'is_cpu_write_access' should be true if called
610 from a real cpu write access: the virtual CPU will exit the current
611 TB if code is modified inside this TB. */
612 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
613 int is_cpu_write_access)
615 int n, current_tb_modified, current_tb_not_found, current_flags;
616 CPUState *env = cpu_single_env;
617 PageDesc *p;
618 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
619 target_ulong tb_start, tb_end;
620 target_ulong current_pc, current_cs_base;
622 p = page_find(start >> TARGET_PAGE_BITS);
623 if (!p)
624 return;
625 if (!p->code_bitmap &&
626 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
627 is_cpu_write_access) {
628 /* build code bitmap */
629 build_page_bitmap(p);
632 /* we remove all the TBs in the range [start, end[ */
633 /* XXX: see if in some cases it could be faster to invalidate all the code */
634 current_tb_not_found = is_cpu_write_access;
635 current_tb_modified = 0;
636 current_tb = NULL; /* avoid warning */
637 current_pc = 0; /* avoid warning */
638 current_cs_base = 0; /* avoid warning */
639 current_flags = 0; /* avoid warning */
640 tb = p->first_tb;
641 while (tb != NULL) {
642 n = (long)tb & 3;
643 tb = (TranslationBlock *)((long)tb & ~3);
644 tb_next = tb->page_next[n];
645 /* NOTE: this is subtle as a TB may span two physical pages */
646 if (n == 0) {
647 /* NOTE: tb_end may be after the end of the page, but
648 it is not a problem */
649 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
650 tb_end = tb_start + tb->size;
651 } else {
652 tb_start = tb->page_addr[1];
653 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
655 if (!(tb_end <= start || tb_start >= end)) {
656 #ifdef TARGET_HAS_PRECISE_SMC
657 if (current_tb_not_found) {
658 current_tb_not_found = 0;
659 current_tb = NULL;
660 if (env->mem_write_pc) {
661 /* now we have a real cpu fault */
662 current_tb = tb_find_pc(env->mem_write_pc);
665 if (current_tb == tb &&
666 !(current_tb->cflags & CF_SINGLE_INSN)) {
667 /* If we are modifying the current TB, we must stop
668 its execution. We could be more precise by checking
669 that the modification is after the current PC, but it
670 would require a specialized function to partially
671 restore the CPU state */
673 current_tb_modified = 1;
674 cpu_restore_state(current_tb, env,
675 env->mem_write_pc, NULL);
676 #if defined(TARGET_I386)
677 current_flags = env->hflags;
678 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
679 current_cs_base = (target_ulong)env->segs[R_CS].base;
680 current_pc = current_cs_base + env->eip;
681 #else
682 #error unsupported CPU
683 #endif
685 #endif /* TARGET_HAS_PRECISE_SMC */
686 /* we need to do that to handle the case where a signal
687 occurs while doing tb_phys_invalidate() */
688 saved_tb = NULL;
689 if (env) {
690 saved_tb = env->current_tb;
691 env->current_tb = NULL;
693 tb_phys_invalidate(tb, -1);
694 if (env) {
695 env->current_tb = saved_tb;
696 if (env->interrupt_request && env->current_tb)
697 cpu_interrupt(env, env->interrupt_request);
700 tb = tb_next;
702 #if !defined(CONFIG_USER_ONLY)
703 /* if no code remaining, no need to continue to use slow writes */
704 if (!p->first_tb) {
705 invalidate_page_bitmap(p);
706 if (is_cpu_write_access) {
707 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
710 #endif
711 #ifdef TARGET_HAS_PRECISE_SMC
712 if (current_tb_modified) {
713 /* we generate a block containing just the instruction
714 modifying the memory. It will ensure that it cannot modify
715 itself */
716 env->current_tb = NULL;
717 tb_gen_code(env, current_pc, current_cs_base, current_flags,
718 CF_SINGLE_INSN);
719 cpu_resume_from_signal(env, NULL);
721 #endif
724 /* len must be <= 8 and start must be a multiple of len */
725 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
727 PageDesc *p;
728 int offset, b;
729 #if 0
730 if (1) {
731 if (loglevel) {
732 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
733 cpu_single_env->mem_write_vaddr, len,
734 cpu_single_env->eip,
735 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
738 #endif
739 p = page_find(start >> TARGET_PAGE_BITS);
740 if (!p)
741 return;
742 if (p->code_bitmap) {
743 offset = start & ~TARGET_PAGE_MASK;
744 b = p->code_bitmap[offset >> 3] >> (offset & 7);
745 if (b & ((1 << len) - 1))
746 goto do_invalidate;
747 } else {
748 do_invalidate:
749 tb_invalidate_phys_page_range(start, start + len, 1);
753 #if !defined(CONFIG_SOFTMMU)
754 static void tb_invalidate_phys_page(target_ulong addr,
755 unsigned long pc, void *puc)
757 int n, current_flags, current_tb_modified;
758 target_ulong current_pc, current_cs_base;
759 PageDesc *p;
760 TranslationBlock *tb, *current_tb;
761 #ifdef TARGET_HAS_PRECISE_SMC
762 CPUState *env = cpu_single_env;
763 #endif
765 addr &= TARGET_PAGE_MASK;
766 p = page_find(addr >> TARGET_PAGE_BITS);
767 if (!p)
768 return;
769 tb = p->first_tb;
770 current_tb_modified = 0;
771 current_tb = NULL;
772 current_pc = 0; /* avoid warning */
773 current_cs_base = 0; /* avoid warning */
774 current_flags = 0; /* avoid warning */
775 #ifdef TARGET_HAS_PRECISE_SMC
776 if (tb && pc != 0) {
777 current_tb = tb_find_pc(pc);
779 #endif
780 while (tb != NULL) {
781 n = (long)tb & 3;
782 tb = (TranslationBlock *)((long)tb & ~3);
783 #ifdef TARGET_HAS_PRECISE_SMC
784 if (current_tb == tb &&
785 !(current_tb->cflags & CF_SINGLE_INSN)) {
786 /* If we are modifying the current TB, we must stop
787 its execution. We could be more precise by checking
788 that the modification is after the current PC, but it
789 would require a specialized function to partially
790 restore the CPU state */
792 current_tb_modified = 1;
793 cpu_restore_state(current_tb, env, pc, puc);
794 #if defined(TARGET_I386)
795 current_flags = env->hflags;
796 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
797 current_cs_base = (target_ulong)env->segs[R_CS].base;
798 current_pc = current_cs_base + env->eip;
799 #else
800 #error unsupported CPU
801 #endif
803 #endif /* TARGET_HAS_PRECISE_SMC */
804 tb_phys_invalidate(tb, addr);
805 tb = tb->page_next[n];
807 p->first_tb = NULL;
808 #ifdef TARGET_HAS_PRECISE_SMC
809 if (current_tb_modified) {
810 /* we generate a block containing just the instruction
811 modifying the memory. It will ensure that it cannot modify
812 itself */
813 env->current_tb = NULL;
814 tb_gen_code(env, current_pc, current_cs_base, current_flags,
815 CF_SINGLE_INSN);
816 cpu_resume_from_signal(env, puc);
818 #endif
820 #endif
822 /* add the tb in the target page and protect it if necessary */
823 static inline void tb_alloc_page(TranslationBlock *tb,
824 unsigned int n, target_ulong page_addr)
826 PageDesc *p;
827 TranslationBlock *last_first_tb;
829 tb->page_addr[n] = page_addr;
830 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
831 tb->page_next[n] = p->first_tb;
832 last_first_tb = p->first_tb;
833 p->first_tb = (TranslationBlock *)((long)tb | n);
834 invalidate_page_bitmap(p);
836 #if defined(TARGET_HAS_SMC) || 1
838 #if defined(CONFIG_USER_ONLY)
839 if (p->flags & PAGE_WRITE) {
840 target_ulong addr;
841 PageDesc *p2;
842 int prot;
844 /* force the host page as non writable (writes will have a
845 page fault + mprotect overhead) */
846 page_addr &= qemu_host_page_mask;
847 prot = 0;
848 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
849 addr += TARGET_PAGE_SIZE) {
851 p2 = page_find (addr >> TARGET_PAGE_BITS);
852 if (!p2)
853 continue;
854 prot |= p2->flags;
855 p2->flags &= ~PAGE_WRITE;
856 page_get_flags(addr);
858 mprotect(g2h(page_addr), qemu_host_page_size,
859 (prot & PAGE_BITS) & ~PAGE_WRITE);
860 #ifdef DEBUG_TB_INVALIDATE
861 printf("protecting code page: 0x%08lx\n",
862 page_addr);
863 #endif
865 #else
866 /* if some code is already present, then the pages are already
867 protected. So we handle the case where only the first TB is
868 allocated in a physical page */
869 if (!last_first_tb) {
870 tlb_protect_code(page_addr);
872 #endif
874 #endif /* TARGET_HAS_SMC */
877 /* Allocate a new translation block. Flush the translation buffer if
878 too many translation blocks or too much generated code. */
879 TranslationBlock *tb_alloc(target_ulong pc)
881 TranslationBlock *tb;
883 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
884 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
885 return NULL;
886 tb = &tbs[nb_tbs++];
887 tb->pc = pc;
888 tb->cflags = 0;
889 return tb;
892 /* add a new TB and link it to the physical page tables. phys_page2 is
893 (-1) to indicate that only one page contains the TB. */
894 void tb_link_phys(TranslationBlock *tb,
895 target_ulong phys_pc, target_ulong phys_page2)
897 unsigned int h;
898 TranslationBlock **ptb;
900 /* add in the physical hash table */
901 h = tb_phys_hash_func(phys_pc);
902 ptb = &tb_phys_hash[h];
903 tb->phys_hash_next = *ptb;
904 *ptb = tb;
906 /* add in the page list */
907 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
908 if (phys_page2 != -1)
909 tb_alloc_page(tb, 1, phys_page2);
910 else
911 tb->page_addr[1] = -1;
913 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
914 tb->jmp_next[0] = NULL;
915 tb->jmp_next[1] = NULL;
916 #ifdef USE_CODE_COPY
917 tb->cflags &= ~CF_FP_USED;
918 if (tb->cflags & CF_TB_FP_USED)
919 tb->cflags |= CF_FP_USED;
920 #endif
922 /* init original jump addresses */
923 if (tb->tb_next_offset[0] != 0xffff)
924 tb_reset_jump(tb, 0);
925 if (tb->tb_next_offset[1] != 0xffff)
926 tb_reset_jump(tb, 1);
928 #ifdef DEBUG_TB_CHECK
929 tb_page_check();
930 #endif
933 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
934 tb[1].tc_ptr. Return NULL if not found */
935 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
937 int m_min, m_max, m;
938 unsigned long v;
939 TranslationBlock *tb;
941 if (nb_tbs <= 0)
942 return NULL;
943 if (tc_ptr < (unsigned long)code_gen_buffer ||
944 tc_ptr >= (unsigned long)code_gen_ptr)
945 return NULL;
946 /* binary search (cf Knuth) */
947 m_min = 0;
948 m_max = nb_tbs - 1;
949 while (m_min <= m_max) {
950 m = (m_min + m_max) >> 1;
951 tb = &tbs[m];
952 v = (unsigned long)tb->tc_ptr;
953 if (v == tc_ptr)
954 return tb;
955 else if (tc_ptr < v) {
956 m_max = m - 1;
957 } else {
958 m_min = m + 1;
961 return &tbs[m_max];
964 static void tb_reset_jump_recursive(TranslationBlock *tb);
966 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
968 TranslationBlock *tb1, *tb_next, **ptb;
969 unsigned int n1;
971 tb1 = tb->jmp_next[n];
972 if (tb1 != NULL) {
973 /* find head of list */
974 for(;;) {
975 n1 = (long)tb1 & 3;
976 tb1 = (TranslationBlock *)((long)tb1 & ~3);
977 if (n1 == 2)
978 break;
979 tb1 = tb1->jmp_next[n1];
981 /* we are now sure now that tb jumps to tb1 */
982 tb_next = tb1;
984 /* remove tb from the jmp_first list */
985 ptb = &tb_next->jmp_first;
986 for(;;) {
987 tb1 = *ptb;
988 n1 = (long)tb1 & 3;
989 tb1 = (TranslationBlock *)((long)tb1 & ~3);
990 if (n1 == n && tb1 == tb)
991 break;
992 ptb = &tb1->jmp_next[n1];
994 *ptb = tb->jmp_next[n];
995 tb->jmp_next[n] = NULL;
997 /* suppress the jump to next tb in generated code */
998 tb_reset_jump(tb, n);
1000 /* suppress jumps in the tb on which we could have jumped */
1001 tb_reset_jump_recursive(tb_next);
1005 static void tb_reset_jump_recursive(TranslationBlock *tb)
1007 tb_reset_jump_recursive2(tb, 0);
1008 tb_reset_jump_recursive2(tb, 1);
1011 #if defined(TARGET_HAS_ICE)
1012 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1014 target_ulong addr, pd;
1015 ram_addr_t ram_addr;
1016 PhysPageDesc *p;
1018 addr = cpu_get_phys_page_debug(env, pc);
1019 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1020 if (!p) {
1021 pd = IO_MEM_UNASSIGNED;
1022 } else {
1023 pd = p->phys_offset;
1025 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1026 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1028 #endif
1030 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1031 breakpoint is reached */
1032 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1034 #if defined(TARGET_HAS_ICE)
1035 int i;
1037 for(i = 0; i < env->nb_breakpoints; i++) {
1038 if (env->breakpoints[i] == pc)
1039 return 0;
1042 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1043 return -1;
1044 env->breakpoints[env->nb_breakpoints++] = pc;
1046 breakpoint_invalidate(env, pc);
1047 return 0;
1048 #else
1049 return -1;
1050 #endif
1053 /* remove a breakpoint */
1054 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1056 #if defined(TARGET_HAS_ICE)
1057 int i;
1058 for(i = 0; i < env->nb_breakpoints; i++) {
1059 if (env->breakpoints[i] == pc)
1060 goto found;
1062 return -1;
1063 found:
1064 env->nb_breakpoints--;
1065 if (i < env->nb_breakpoints)
1066 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1068 breakpoint_invalidate(env, pc);
1069 return 0;
1070 #else
1071 return -1;
1072 #endif
1075 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1076 CPU loop after each instruction */
1077 void cpu_single_step(CPUState *env, int enabled)
1079 #if defined(TARGET_HAS_ICE)
1080 if (env->singlestep_enabled != enabled) {
1081 env->singlestep_enabled = enabled;
1082 /* must flush all the translated code to avoid inconsistancies */
1083 /* XXX: only flush what is necessary */
1084 tb_flush(env);
1086 #endif
1089 /* enable or disable low levels log */
1090 void cpu_set_log(int log_flags)
1092 loglevel = log_flags;
1093 if (loglevel && !logfile) {
1094 logfile = fopen(logfilename, "w");
1095 if (!logfile) {
1096 perror(logfilename);
1097 _exit(1);
1099 #if !defined(CONFIG_SOFTMMU)
1100 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1102 static uint8_t logfile_buf[4096];
1103 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1105 #else
1106 setvbuf(logfile, NULL, _IOLBF, 0);
1107 #endif
1111 void cpu_set_log_filename(const char *filename)
1113 logfilename = strdup(filename);
1116 /* mask must never be zero, except for A20 change call */
1117 void cpu_interrupt(CPUState *env, int mask)
1119 TranslationBlock *tb;
1120 static int interrupt_lock;
1122 env->interrupt_request |= mask;
1123 /* if the cpu is currently executing code, we must unlink it and
1124 all the potentially executing TB */
1125 tb = env->current_tb;
1126 if (tb && !testandset(&interrupt_lock)) {
1127 env->current_tb = NULL;
1128 tb_reset_jump_recursive(tb);
1129 interrupt_lock = 0;
1133 void cpu_reset_interrupt(CPUState *env, int mask)
1135 env->interrupt_request &= ~mask;
1138 CPULogItem cpu_log_items[] = {
1139 { CPU_LOG_TB_OUT_ASM, "out_asm",
1140 "show generated host assembly code for each compiled TB" },
1141 { CPU_LOG_TB_IN_ASM, "in_asm",
1142 "show target assembly code for each compiled TB" },
1143 { CPU_LOG_TB_OP, "op",
1144 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1145 #ifdef TARGET_I386
1146 { CPU_LOG_TB_OP_OPT, "op_opt",
1147 "show micro ops after optimization for each compiled TB" },
1148 #endif
1149 { CPU_LOG_INT, "int",
1150 "show interrupts/exceptions in short format" },
1151 { CPU_LOG_EXEC, "exec",
1152 "show trace before each executed TB (lots of logs)" },
1153 { CPU_LOG_TB_CPU, "cpu",
1154 "show CPU state before bloc translation" },
1155 #ifdef TARGET_I386
1156 { CPU_LOG_PCALL, "pcall",
1157 "show protected mode far calls/returns/exceptions" },
1158 #endif
1159 #ifdef DEBUG_IOPORT
1160 { CPU_LOG_IOPORT, "ioport",
1161 "show all i/o ports accesses" },
1162 #endif
1163 { 0, NULL, NULL },
1166 static int cmp1(const char *s1, int n, const char *s2)
1168 if (strlen(s2) != n)
1169 return 0;
1170 return memcmp(s1, s2, n) == 0;
1173 /* takes a comma separated list of log masks. Return 0 if error. */
1174 int cpu_str_to_log_mask(const char *str)
1176 CPULogItem *item;
1177 int mask;
1178 const char *p, *p1;
1180 p = str;
1181 mask = 0;
1182 for(;;) {
1183 p1 = strchr(p, ',');
1184 if (!p1)
1185 p1 = p + strlen(p);
1186 if(cmp1(p,p1-p,"all")) {
1187 for(item = cpu_log_items; item->mask != 0; item++) {
1188 mask |= item->mask;
1190 } else {
1191 for(item = cpu_log_items; item->mask != 0; item++) {
1192 if (cmp1(p, p1 - p, item->name))
1193 goto found;
1195 return 0;
1197 found:
1198 mask |= item->mask;
1199 if (*p1 != ',')
1200 break;
1201 p = p1 + 1;
1203 return mask;
1206 void cpu_abort(CPUState *env, const char *fmt, ...)
1208 va_list ap;
1210 va_start(ap, fmt);
1211 fprintf(stderr, "qemu: fatal: ");
1212 vfprintf(stderr, fmt, ap);
1213 fprintf(stderr, "\n");
1214 #ifdef TARGET_I386
1215 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1216 #else
1217 cpu_dump_state(env, stderr, fprintf, 0);
1218 #endif
1219 va_end(ap);
1220 abort();
1223 #if !defined(CONFIG_USER_ONLY)
1225 /* NOTE: if flush_global is true, also flush global entries (not
1226 implemented yet) */
1227 void tlb_flush(CPUState *env, int flush_global)
1229 int i;
1231 #if defined(DEBUG_TLB)
1232 printf("tlb_flush:\n");
1233 #endif
1234 /* must reset current TB so that interrupts cannot modify the
1235 links while we are modifying them */
1236 env->current_tb = NULL;
1238 for(i = 0; i < CPU_TLB_SIZE; i++) {
1239 env->tlb_table[0][i].addr_read = -1;
1240 env->tlb_table[0][i].addr_write = -1;
1241 env->tlb_table[0][i].addr_code = -1;
1242 env->tlb_table[1][i].addr_read = -1;
1243 env->tlb_table[1][i].addr_write = -1;
1244 env->tlb_table[1][i].addr_code = -1;
1247 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1249 #if !defined(CONFIG_SOFTMMU)
1250 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1251 #endif
1252 #ifdef USE_KQEMU
1253 if (env->kqemu_enabled) {
1254 kqemu_flush(env, flush_global);
1256 #endif
1257 tlb_flush_count++;
1260 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1262 if (addr == (tlb_entry->addr_read &
1263 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1264 addr == (tlb_entry->addr_write &
1265 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1266 addr == (tlb_entry->addr_code &
1267 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1268 tlb_entry->addr_read = -1;
1269 tlb_entry->addr_write = -1;
1270 tlb_entry->addr_code = -1;
1274 void tlb_flush_page(CPUState *env, target_ulong addr)
1276 int i;
1277 TranslationBlock *tb;
1279 #if defined(DEBUG_TLB)
1280 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1281 #endif
1282 /* must reset current TB so that interrupts cannot modify the
1283 links while we are modifying them */
1284 env->current_tb = NULL;
1286 addr &= TARGET_PAGE_MASK;
1287 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1288 tlb_flush_entry(&env->tlb_table[0][i], addr);
1289 tlb_flush_entry(&env->tlb_table[1][i], addr);
1291 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1292 tb = env->tb_jmp_cache[i];
1293 if (tb &&
1294 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1295 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1296 env->tb_jmp_cache[i] = NULL;
1300 #if !defined(CONFIG_SOFTMMU)
1301 if (addr < MMAP_AREA_END)
1302 munmap((void *)addr, TARGET_PAGE_SIZE);
1303 #endif
1304 #ifdef USE_KQEMU
1305 if (env->kqemu_enabled) {
1306 kqemu_flush_page(env, addr);
1308 #endif
1311 /* update the TLBs so that writes to code in the virtual page 'addr'
1312 can be detected */
1313 static void tlb_protect_code(ram_addr_t ram_addr)
1315 cpu_physical_memory_reset_dirty(ram_addr,
1316 ram_addr + TARGET_PAGE_SIZE,
1317 CODE_DIRTY_FLAG);
1320 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1321 tested for self modifying code */
1322 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1323 target_ulong vaddr)
1325 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1328 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1329 unsigned long start, unsigned long length)
1331 unsigned long addr;
1332 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1333 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1334 if ((addr - start) < length) {
1335 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1340 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1341 int dirty_flags)
1343 CPUState *env;
1344 unsigned long length, start1;
1345 int i, mask, len;
1346 uint8_t *p;
1348 start &= TARGET_PAGE_MASK;
1349 end = TARGET_PAGE_ALIGN(end);
1351 length = end - start;
1352 if (length == 0)
1353 return;
1354 len = length >> TARGET_PAGE_BITS;
1355 #ifdef USE_KQEMU
1356 /* XXX: should not depend on cpu context */
1357 env = first_cpu;
1358 if (env->kqemu_enabled) {
1359 ram_addr_t addr;
1360 addr = start;
1361 for(i = 0; i < len; i++) {
1362 kqemu_set_notdirty(env, addr);
1363 addr += TARGET_PAGE_SIZE;
1366 #endif
1367 mask = ~dirty_flags;
1368 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1369 for(i = 0; i < len; i++)
1370 p[i] &= mask;
1372 /* we modify the TLB cache so that the dirty bit will be set again
1373 when accessing the range */
1374 start1 = start + (unsigned long)phys_ram_base;
1375 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1376 for(i = 0; i < CPU_TLB_SIZE; i++)
1377 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1378 for(i = 0; i < CPU_TLB_SIZE; i++)
1379 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1382 #if !defined(CONFIG_SOFTMMU)
1383 /* XXX: this is expensive */
1385 VirtPageDesc *p;
1386 int j;
1387 target_ulong addr;
1389 for(i = 0; i < L1_SIZE; i++) {
1390 p = l1_virt_map[i];
1391 if (p) {
1392 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1393 for(j = 0; j < L2_SIZE; j++) {
1394 if (p->valid_tag == virt_valid_tag &&
1395 p->phys_addr >= start && p->phys_addr < end &&
1396 (p->prot & PROT_WRITE)) {
1397 if (addr < MMAP_AREA_END) {
1398 mprotect((void *)addr, TARGET_PAGE_SIZE,
1399 p->prot & ~PROT_WRITE);
1402 addr += TARGET_PAGE_SIZE;
1403 p++;
1408 #endif
1411 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1413 ram_addr_t ram_addr;
1415 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1416 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1417 tlb_entry->addend - (unsigned long)phys_ram_base;
1418 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1419 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1424 /* update the TLB according to the current state of the dirty bits */
1425 void cpu_tlb_update_dirty(CPUState *env)
1427 int i;
1428 for(i = 0; i < CPU_TLB_SIZE; i++)
1429 tlb_update_dirty(&env->tlb_table[0][i]);
1430 for(i = 0; i < CPU_TLB_SIZE; i++)
1431 tlb_update_dirty(&env->tlb_table[1][i]);
1434 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1435 unsigned long start)
1437 unsigned long addr;
1438 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1439 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1440 if (addr == start) {
1441 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1446 /* update the TLB corresponding to virtual page vaddr and phys addr
1447 addr so that it is no longer dirty */
1448 static inline void tlb_set_dirty(CPUState *env,
1449 unsigned long addr, target_ulong vaddr)
1451 int i;
1453 addr &= TARGET_PAGE_MASK;
1454 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1455 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1456 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1459 /* add a new TLB entry. At most one entry for a given virtual address
1460 is permitted. Return 0 if OK or 2 if the page could not be mapped
1461 (can only happen in non SOFTMMU mode for I/O pages or pages
1462 conflicting with the host address space). */
1463 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1464 target_phys_addr_t paddr, int prot,
1465 int is_user, int is_softmmu)
1467 PhysPageDesc *p;
1468 unsigned long pd;
1469 unsigned int index;
1470 target_ulong address;
1471 target_phys_addr_t addend;
1472 int ret;
1473 CPUTLBEntry *te;
1475 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1476 if (!p) {
1477 pd = IO_MEM_UNASSIGNED;
1478 } else {
1479 pd = p->phys_offset;
1481 #if defined(DEBUG_TLB)
1482 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1483 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1484 #endif
1486 ret = 0;
1487 #if !defined(CONFIG_SOFTMMU)
1488 if (is_softmmu)
1489 #endif
1491 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1492 /* IO memory case */
1493 address = vaddr | pd;
1494 addend = paddr;
1495 } else {
1496 /* standard memory */
1497 address = vaddr;
1498 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1501 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1502 addend -= vaddr;
1503 te = &env->tlb_table[is_user][index];
1504 te->addend = addend;
1505 if (prot & PAGE_READ) {
1506 te->addr_read = address;
1507 } else {
1508 te->addr_read = -1;
1510 if (prot & PAGE_EXEC) {
1511 te->addr_code = address;
1512 } else {
1513 te->addr_code = -1;
1515 if (prot & PAGE_WRITE) {
1516 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1517 /* ROM: access is ignored (same as unassigned) */
1518 te->addr_write = vaddr | IO_MEM_ROM;
1519 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1520 !cpu_physical_memory_is_dirty(pd)) {
1521 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1522 } else {
1523 te->addr_write = address;
1525 } else {
1526 te->addr_write = -1;
1529 #if !defined(CONFIG_SOFTMMU)
1530 else {
1531 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1532 /* IO access: no mapping is done as it will be handled by the
1533 soft MMU */
1534 if (!(env->hflags & HF_SOFTMMU_MASK))
1535 ret = 2;
1536 } else {
1537 void *map_addr;
1539 if (vaddr >= MMAP_AREA_END) {
1540 ret = 2;
1541 } else {
1542 if (prot & PROT_WRITE) {
1543 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1544 #if defined(TARGET_HAS_SMC) || 1
1545 first_tb ||
1546 #endif
1547 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1548 !cpu_physical_memory_is_dirty(pd))) {
1549 /* ROM: we do as if code was inside */
1550 /* if code is present, we only map as read only and save the
1551 original mapping */
1552 VirtPageDesc *vp;
1554 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1555 vp->phys_addr = pd;
1556 vp->prot = prot;
1557 vp->valid_tag = virt_valid_tag;
1558 prot &= ~PAGE_WRITE;
1561 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1562 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1563 if (map_addr == MAP_FAILED) {
1564 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1565 paddr, vaddr);
1570 #endif
1571 return ret;
1574 /* called from signal handler: invalidate the code and unprotect the
1575 page. Return TRUE if the fault was succesfully handled. */
1576 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1578 #if !defined(CONFIG_SOFTMMU)
1579 VirtPageDesc *vp;
1581 #if defined(DEBUG_TLB)
1582 printf("page_unprotect: addr=0x%08x\n", addr);
1583 #endif
1584 addr &= TARGET_PAGE_MASK;
1586 /* if it is not mapped, no need to worry here */
1587 if (addr >= MMAP_AREA_END)
1588 return 0;
1589 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1590 if (!vp)
1591 return 0;
1592 /* NOTE: in this case, validate_tag is _not_ tested as it
1593 validates only the code TLB */
1594 if (vp->valid_tag != virt_valid_tag)
1595 return 0;
1596 if (!(vp->prot & PAGE_WRITE))
1597 return 0;
1598 #if defined(DEBUG_TLB)
1599 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1600 addr, vp->phys_addr, vp->prot);
1601 #endif
1602 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1603 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1604 (unsigned long)addr, vp->prot);
1605 /* set the dirty bit */
1606 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1607 /* flush the code inside */
1608 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1609 return 1;
1610 #else
1611 return 0;
1612 #endif
1615 #else
1617 void tlb_flush(CPUState *env, int flush_global)
1621 void tlb_flush_page(CPUState *env, target_ulong addr)
1625 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1626 target_phys_addr_t paddr, int prot,
1627 int is_user, int is_softmmu)
1629 return 0;
1632 /* dump memory mappings */
1633 void page_dump(FILE *f)
1635 unsigned long start, end;
1636 int i, j, prot, prot1;
1637 PageDesc *p;
1639 fprintf(f, "%-8s %-8s %-8s %s\n",
1640 "start", "end", "size", "prot");
1641 start = -1;
1642 end = -1;
1643 prot = 0;
1644 for(i = 0; i <= L1_SIZE; i++) {
1645 if (i < L1_SIZE)
1646 p = l1_map[i];
1647 else
1648 p = NULL;
1649 for(j = 0;j < L2_SIZE; j++) {
1650 if (!p)
1651 prot1 = 0;
1652 else
1653 prot1 = p[j].flags;
1654 if (prot1 != prot) {
1655 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1656 if (start != -1) {
1657 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1658 start, end, end - start,
1659 prot & PAGE_READ ? 'r' : '-',
1660 prot & PAGE_WRITE ? 'w' : '-',
1661 prot & PAGE_EXEC ? 'x' : '-');
1663 if (prot1 != 0)
1664 start = end;
1665 else
1666 start = -1;
1667 prot = prot1;
1669 if (!p)
1670 break;
1675 int page_get_flags(target_ulong address)
1677 PageDesc *p;
1679 p = page_find(address >> TARGET_PAGE_BITS);
1680 if (!p)
1681 return 0;
1682 return p->flags;
1685 /* modify the flags of a page and invalidate the code if
1686 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1687 depending on PAGE_WRITE */
1688 void page_set_flags(target_ulong start, target_ulong end, int flags)
1690 PageDesc *p;
1691 target_ulong addr;
1693 start = start & TARGET_PAGE_MASK;
1694 end = TARGET_PAGE_ALIGN(end);
1695 if (flags & PAGE_WRITE)
1696 flags |= PAGE_WRITE_ORG;
1697 spin_lock(&tb_lock);
1698 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1699 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1700 /* if the write protection is set, then we invalidate the code
1701 inside */
1702 if (!(p->flags & PAGE_WRITE) &&
1703 (flags & PAGE_WRITE) &&
1704 p->first_tb) {
1705 tb_invalidate_phys_page(addr, 0, NULL);
1707 p->flags = flags;
1709 spin_unlock(&tb_lock);
1712 /* called from signal handler: invalidate the code and unprotect the
1713 page. Return TRUE if the fault was succesfully handled. */
1714 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1716 unsigned int page_index, prot, pindex;
1717 PageDesc *p, *p1;
1718 target_ulong host_start, host_end, addr;
1720 host_start = address & qemu_host_page_mask;
1721 page_index = host_start >> TARGET_PAGE_BITS;
1722 p1 = page_find(page_index);
1723 if (!p1)
1724 return 0;
1725 host_end = host_start + qemu_host_page_size;
1726 p = p1;
1727 prot = 0;
1728 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1729 prot |= p->flags;
1730 p++;
1732 /* if the page was really writable, then we change its
1733 protection back to writable */
1734 if (prot & PAGE_WRITE_ORG) {
1735 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1736 if (!(p1[pindex].flags & PAGE_WRITE)) {
1737 mprotect((void *)g2h(host_start), qemu_host_page_size,
1738 (prot & PAGE_BITS) | PAGE_WRITE);
1739 p1[pindex].flags |= PAGE_WRITE;
1740 /* and since the content will be modified, we must invalidate
1741 the corresponding translated code. */
1742 tb_invalidate_phys_page(address, pc, puc);
1743 #ifdef DEBUG_TB_CHECK
1744 tb_invalidate_check(address);
1745 #endif
1746 return 1;
1749 return 0;
1752 /* call this function when system calls directly modify a memory area */
1753 /* ??? This should be redundant now we have lock_user. */
1754 void page_unprotect_range(target_ulong data, target_ulong data_size)
1756 target_ulong start, end, addr;
1758 start = data;
1759 end = start + data_size;
1760 start &= TARGET_PAGE_MASK;
1761 end = TARGET_PAGE_ALIGN(end);
1762 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1763 page_unprotect(addr, 0, NULL);
1767 static inline void tlb_set_dirty(CPUState *env,
1768 unsigned long addr, target_ulong vaddr)
1771 #endif /* defined(CONFIG_USER_ONLY) */
1773 /* register physical memory. 'size' must be a multiple of the target
1774 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1775 io memory page */
1776 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1777 unsigned long size,
1778 unsigned long phys_offset)
1780 target_phys_addr_t addr, end_addr;
1781 PhysPageDesc *p;
1783 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1784 end_addr = start_addr + size;
1785 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1786 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1787 p->phys_offset = phys_offset;
1788 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1789 phys_offset += TARGET_PAGE_SIZE;
1793 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1795 return 0;
1798 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1802 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1803 unassigned_mem_readb,
1804 unassigned_mem_readb,
1805 unassigned_mem_readb,
1808 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1809 unassigned_mem_writeb,
1810 unassigned_mem_writeb,
1811 unassigned_mem_writeb,
1814 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1816 unsigned long ram_addr;
1817 int dirty_flags;
1818 ram_addr = addr - (unsigned long)phys_ram_base;
1819 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1820 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1821 #if !defined(CONFIG_USER_ONLY)
1822 tb_invalidate_phys_page_fast(ram_addr, 1);
1823 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1824 #endif
1826 stb_p((uint8_t *)(long)addr, val);
1827 #ifdef USE_KQEMU
1828 if (cpu_single_env->kqemu_enabled &&
1829 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1830 kqemu_modify_page(cpu_single_env, ram_addr);
1831 #endif
1832 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1833 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1834 /* we remove the notdirty callback only if the code has been
1835 flushed */
1836 if (dirty_flags == 0xff)
1837 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1840 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1842 unsigned long ram_addr;
1843 int dirty_flags;
1844 ram_addr = addr - (unsigned long)phys_ram_base;
1845 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1846 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1847 #if !defined(CONFIG_USER_ONLY)
1848 tb_invalidate_phys_page_fast(ram_addr, 2);
1849 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1850 #endif
1852 stw_p((uint8_t *)(long)addr, val);
1853 #ifdef USE_KQEMU
1854 if (cpu_single_env->kqemu_enabled &&
1855 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1856 kqemu_modify_page(cpu_single_env, ram_addr);
1857 #endif
1858 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1859 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1860 /* we remove the notdirty callback only if the code has been
1861 flushed */
1862 if (dirty_flags == 0xff)
1863 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1866 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1868 unsigned long ram_addr;
1869 int dirty_flags;
1870 ram_addr = addr - (unsigned long)phys_ram_base;
1871 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1872 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1873 #if !defined(CONFIG_USER_ONLY)
1874 tb_invalidate_phys_page_fast(ram_addr, 4);
1875 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1876 #endif
1878 stl_p((uint8_t *)(long)addr, val);
1879 #ifdef USE_KQEMU
1880 if (cpu_single_env->kqemu_enabled &&
1881 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1882 kqemu_modify_page(cpu_single_env, ram_addr);
1883 #endif
1884 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1885 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1886 /* we remove the notdirty callback only if the code has been
1887 flushed */
1888 if (dirty_flags == 0xff)
1889 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1892 static CPUReadMemoryFunc *error_mem_read[3] = {
1893 NULL, /* never used */
1894 NULL, /* never used */
1895 NULL, /* never used */
1898 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1899 notdirty_mem_writeb,
1900 notdirty_mem_writew,
1901 notdirty_mem_writel,
1904 static void io_mem_init(void)
1906 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1907 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1908 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1909 io_mem_nb = 5;
1911 /* alloc dirty bits array */
1912 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1913 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1916 /* mem_read and mem_write are arrays of functions containing the
1917 function to access byte (index 0), word (index 1) and dword (index
1918 2). All functions must be supplied. If io_index is non zero, the
1919 corresponding io zone is modified. If it is zero, a new io zone is
1920 allocated. The return value can be used with
1921 cpu_register_physical_memory(). (-1) is returned if error. */
1922 int cpu_register_io_memory(int io_index,
1923 CPUReadMemoryFunc **mem_read,
1924 CPUWriteMemoryFunc **mem_write,
1925 void *opaque)
1927 int i;
1929 if (io_index <= 0) {
1930 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1931 return -1;
1932 io_index = io_mem_nb++;
1933 } else {
1934 if (io_index >= IO_MEM_NB_ENTRIES)
1935 return -1;
1938 for(i = 0;i < 3; i++) {
1939 io_mem_read[io_index][i] = mem_read[i];
1940 io_mem_write[io_index][i] = mem_write[i];
1942 io_mem_opaque[io_index] = opaque;
1943 return io_index << IO_MEM_SHIFT;
1946 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1948 return io_mem_write[io_index >> IO_MEM_SHIFT];
1951 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1953 return io_mem_read[io_index >> IO_MEM_SHIFT];
1956 /* physical memory access (slow version, mainly for debug) */
1957 #if defined(CONFIG_USER_ONLY)
1958 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1959 int len, int is_write)
1961 int l, flags;
1962 target_ulong page;
1963 void * p;
1965 while (len > 0) {
1966 page = addr & TARGET_PAGE_MASK;
1967 l = (page + TARGET_PAGE_SIZE) - addr;
1968 if (l > len)
1969 l = len;
1970 flags = page_get_flags(page);
1971 if (!(flags & PAGE_VALID))
1972 return;
1973 if (is_write) {
1974 if (!(flags & PAGE_WRITE))
1975 return;
1976 p = lock_user(addr, len, 0);
1977 memcpy(p, buf, len);
1978 unlock_user(p, addr, len);
1979 } else {
1980 if (!(flags & PAGE_READ))
1981 return;
1982 p = lock_user(addr, len, 1);
1983 memcpy(buf, p, len);
1984 unlock_user(p, addr, 0);
1986 len -= l;
1987 buf += l;
1988 addr += l;
1992 #else
1993 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1994 int len, int is_write)
1996 int l, io_index;
1997 uint8_t *ptr;
1998 uint32_t val;
1999 target_phys_addr_t page;
2000 unsigned long pd;
2001 PhysPageDesc *p;
2003 while (len > 0) {
2004 page = addr & TARGET_PAGE_MASK;
2005 l = (page + TARGET_PAGE_SIZE) - addr;
2006 if (l > len)
2007 l = len;
2008 p = phys_page_find(page >> TARGET_PAGE_BITS);
2009 if (!p) {
2010 pd = IO_MEM_UNASSIGNED;
2011 } else {
2012 pd = p->phys_offset;
2015 if (is_write) {
2016 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2017 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2018 /* XXX: could force cpu_single_env to NULL to avoid
2019 potential bugs */
2020 if (l >= 4 && ((addr & 3) == 0)) {
2021 /* 32 bit write access */
2022 val = ldl_p(buf);
2023 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2024 l = 4;
2025 } else if (l >= 2 && ((addr & 1) == 0)) {
2026 /* 16 bit write access */
2027 val = lduw_p(buf);
2028 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2029 l = 2;
2030 } else {
2031 /* 8 bit write access */
2032 val = ldub_p(buf);
2033 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2034 l = 1;
2036 } else {
2037 unsigned long addr1;
2038 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2039 /* RAM case */
2040 ptr = phys_ram_base + addr1;
2041 memcpy(ptr, buf, l);
2042 if (!cpu_physical_memory_is_dirty(addr1)) {
2043 /* invalidate code */
2044 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2045 /* set dirty bit */
2046 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2047 (0xff & ~CODE_DIRTY_FLAG);
2050 } else {
2051 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2052 /* I/O case */
2053 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2054 if (l >= 4 && ((addr & 3) == 0)) {
2055 /* 32 bit read access */
2056 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2057 stl_p(buf, val);
2058 l = 4;
2059 } else if (l >= 2 && ((addr & 1) == 0)) {
2060 /* 16 bit read access */
2061 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2062 stw_p(buf, val);
2063 l = 2;
2064 } else {
2065 /* 8 bit read access */
2066 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2067 stb_p(buf, val);
2068 l = 1;
2070 } else {
2071 /* RAM case */
2072 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2073 (addr & ~TARGET_PAGE_MASK);
2074 memcpy(buf, ptr, l);
2077 len -= l;
2078 buf += l;
2079 addr += l;
2083 /* used for ROM loading : can write in RAM and ROM */
2084 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2085 const uint8_t *buf, int len)
2087 int l;
2088 uint8_t *ptr;
2089 target_phys_addr_t page;
2090 unsigned long pd;
2091 PhysPageDesc *p;
2093 while (len > 0) {
2094 page = addr & TARGET_PAGE_MASK;
2095 l = (page + TARGET_PAGE_SIZE) - addr;
2096 if (l > len)
2097 l = len;
2098 p = phys_page_find(page >> TARGET_PAGE_BITS);
2099 if (!p) {
2100 pd = IO_MEM_UNASSIGNED;
2101 } else {
2102 pd = p->phys_offset;
2105 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2106 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
2107 /* do nothing */
2108 } else {
2109 unsigned long addr1;
2110 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2111 /* ROM/RAM case */
2112 ptr = phys_ram_base + addr1;
2113 memcpy(ptr, buf, l);
2115 len -= l;
2116 buf += l;
2117 addr += l;
2122 /* warning: addr must be aligned */
2123 uint32_t ldl_phys(target_phys_addr_t addr)
2125 int io_index;
2126 uint8_t *ptr;
2127 uint32_t val;
2128 unsigned long pd;
2129 PhysPageDesc *p;
2131 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2132 if (!p) {
2133 pd = IO_MEM_UNASSIGNED;
2134 } else {
2135 pd = p->phys_offset;
2138 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2139 /* I/O case */
2140 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2141 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2142 } else {
2143 /* RAM case */
2144 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2145 (addr & ~TARGET_PAGE_MASK);
2146 val = ldl_p(ptr);
2148 return val;
2151 /* warning: addr must be aligned */
2152 uint64_t ldq_phys(target_phys_addr_t addr)
2154 int io_index;
2155 uint8_t *ptr;
2156 uint64_t val;
2157 unsigned long pd;
2158 PhysPageDesc *p;
2160 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2161 if (!p) {
2162 pd = IO_MEM_UNASSIGNED;
2163 } else {
2164 pd = p->phys_offset;
2167 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2168 /* I/O case */
2169 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2170 #ifdef TARGET_WORDS_BIGENDIAN
2171 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2172 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2173 #else
2174 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2175 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2176 #endif
2177 } else {
2178 /* RAM case */
2179 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2180 (addr & ~TARGET_PAGE_MASK);
2181 val = ldq_p(ptr);
2183 return val;
2186 /* XXX: optimize */
2187 uint32_t ldub_phys(target_phys_addr_t addr)
2189 uint8_t val;
2190 cpu_physical_memory_read(addr, &val, 1);
2191 return val;
2194 /* XXX: optimize */
2195 uint32_t lduw_phys(target_phys_addr_t addr)
2197 uint16_t val;
2198 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2199 return tswap16(val);
2202 /* warning: addr must be aligned. The ram page is not masked as dirty
2203 and the code inside is not invalidated. It is useful if the dirty
2204 bits are used to track modified PTEs */
2205 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2207 int io_index;
2208 uint8_t *ptr;
2209 unsigned long pd;
2210 PhysPageDesc *p;
2212 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2213 if (!p) {
2214 pd = IO_MEM_UNASSIGNED;
2215 } else {
2216 pd = p->phys_offset;
2219 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2220 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2221 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2222 } else {
2223 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2224 (addr & ~TARGET_PAGE_MASK);
2225 stl_p(ptr, val);
2229 /* warning: addr must be aligned */
2230 void stl_phys(target_phys_addr_t addr, uint32_t val)
2232 int io_index;
2233 uint8_t *ptr;
2234 unsigned long pd;
2235 PhysPageDesc *p;
2237 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2238 if (!p) {
2239 pd = IO_MEM_UNASSIGNED;
2240 } else {
2241 pd = p->phys_offset;
2244 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2245 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2246 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2247 } else {
2248 unsigned long addr1;
2249 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2250 /* RAM case */
2251 ptr = phys_ram_base + addr1;
2252 stl_p(ptr, val);
2253 if (!cpu_physical_memory_is_dirty(addr1)) {
2254 /* invalidate code */
2255 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2256 /* set dirty bit */
2257 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2258 (0xff & ~CODE_DIRTY_FLAG);
2263 /* XXX: optimize */
2264 void stb_phys(target_phys_addr_t addr, uint32_t val)
2266 uint8_t v = val;
2267 cpu_physical_memory_write(addr, &v, 1);
2270 /* XXX: optimize */
2271 void stw_phys(target_phys_addr_t addr, uint32_t val)
2273 uint16_t v = tswap16(val);
2274 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2277 /* XXX: optimize */
2278 void stq_phys(target_phys_addr_t addr, uint64_t val)
2280 val = tswap64(val);
2281 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2284 #endif
2286 /* virtual memory access for debug */
2287 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2288 uint8_t *buf, int len, int is_write)
2290 int l;
2291 target_ulong page, phys_addr;
2293 while (len > 0) {
2294 page = addr & TARGET_PAGE_MASK;
2295 phys_addr = cpu_get_phys_page_debug(env, page);
2296 /* if no physical page mapped, return an error */
2297 if (phys_addr == -1)
2298 return -1;
2299 l = (page + TARGET_PAGE_SIZE) - addr;
2300 if (l > len)
2301 l = len;
2302 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2303 buf, l, is_write);
2304 len -= l;
2305 buf += l;
2306 addr += l;
2308 return 0;
2311 void dump_exec_info(FILE *f,
2312 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2314 int i, target_code_size, max_target_code_size;
2315 int direct_jmp_count, direct_jmp2_count, cross_page;
2316 TranslationBlock *tb;
2318 target_code_size = 0;
2319 max_target_code_size = 0;
2320 cross_page = 0;
2321 direct_jmp_count = 0;
2322 direct_jmp2_count = 0;
2323 for(i = 0; i < nb_tbs; i++) {
2324 tb = &tbs[i];
2325 target_code_size += tb->size;
2326 if (tb->size > max_target_code_size)
2327 max_target_code_size = tb->size;
2328 if (tb->page_addr[1] != -1)
2329 cross_page++;
2330 if (tb->tb_next_offset[0] != 0xffff) {
2331 direct_jmp_count++;
2332 if (tb->tb_next_offset[1] != 0xffff) {
2333 direct_jmp2_count++;
2337 /* XXX: avoid using doubles ? */
2338 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2339 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2340 nb_tbs ? target_code_size / nb_tbs : 0,
2341 max_target_code_size);
2342 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2343 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2344 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2345 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2346 cross_page,
2347 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2348 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2349 direct_jmp_count,
2350 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2351 direct_jmp2_count,
2352 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2353 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2354 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2355 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2358 #if !defined(CONFIG_USER_ONLY)
2360 #define MMUSUFFIX _cmmu
2361 #define GETPC() NULL
2362 #define env cpu_single_env
2363 #define SOFTMMU_CODE_ACCESS
2365 #define SHIFT 0
2366 #include "softmmu_template.h"
2368 #define SHIFT 1
2369 #include "softmmu_template.h"
2371 #define SHIFT 2
2372 #include "softmmu_template.h"
2374 #define SHIFT 3
2375 #include "softmmu_template.h"
2377 #undef env
2379 #endif