ARM register index+writeback fix (Lauro Ramos Venancio).
[qemu/mini2440.git] / exec.c
blob6deaf49279ad347a0454a9f379b92d269e04dea0
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 #if !defined(CONFIG_USER_ONLY)
51 /* TB consistency checks only implemented for usermode emulation. */
52 #undef DEBUG_TB_CHECK
53 #endif
55 /* threshold to flush the translated code buffer */
56 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
58 #define SMC_BITMAP_USE_THRESHOLD 10
60 #define MMAP_AREA_START 0x00000000
61 #define MMAP_AREA_END 0xa8000000
63 #if defined(TARGET_SPARC64)
64 #define TARGET_PHYS_ADDR_SPACE_BITS 41
65 #elif defined(TARGET_PPC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 42
67 #else
68 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
69 #define TARGET_PHYS_ADDR_SPACE_BITS 32
70 #endif
72 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
73 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
74 int nb_tbs;
75 /* any access to the tbs or the page table must use this lock */
76 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
78 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
79 uint8_t *code_gen_ptr;
81 int phys_ram_size;
82 int phys_ram_fd;
83 uint8_t *phys_ram_base;
84 uint8_t *phys_ram_dirty;
85 static ram_addr_t phys_ram_alloc_offset = 0;
87 CPUState *first_cpu;
88 /* current CPU in the current thread. It is only valid inside
89 cpu_exec() */
90 CPUState *cpu_single_env;
92 typedef struct PageDesc {
93 /* list of TBs intersecting this ram page */
94 TranslationBlock *first_tb;
95 /* in order to optimize self modifying code, we count the number
96 of lookups we do to a given page to use a bitmap */
97 unsigned int code_write_count;
98 uint8_t *code_bitmap;
99 #if defined(CONFIG_USER_ONLY)
100 unsigned long flags;
101 #endif
102 } PageDesc;
104 typedef struct PhysPageDesc {
105 /* offset in host memory of the page + io_index in the low 12 bits */
106 uint32_t phys_offset;
107 } PhysPageDesc;
109 #define L2_BITS 10
110 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
112 #define L1_SIZE (1 << L1_BITS)
113 #define L2_SIZE (1 << L2_BITS)
115 static void io_mem_init(void);
117 unsigned long qemu_real_host_page_size;
118 unsigned long qemu_host_page_bits;
119 unsigned long qemu_host_page_size;
120 unsigned long qemu_host_page_mask;
122 /* XXX: for system emulation, it could just be an array */
123 static PageDesc *l1_map[L1_SIZE];
124 PhysPageDesc **l1_phys_map;
126 /* io memory support */
127 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
128 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
129 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
130 static int io_mem_nb;
131 #if defined(CONFIG_SOFTMMU)
132 static int io_mem_watch;
133 #endif
135 /* log support */
136 char *logfilename = "/tmp/qemu.log";
137 FILE *logfile;
138 int loglevel;
140 /* statistics */
141 static int tlb_flush_count;
142 static int tb_flush_count;
143 static int tb_phys_invalidate_count;
145 static void page_init(void)
147 /* NOTE: we can always suppose that qemu_host_page_size >=
148 TARGET_PAGE_SIZE */
149 #ifdef _WIN32
151 SYSTEM_INFO system_info;
152 DWORD old_protect;
154 GetSystemInfo(&system_info);
155 qemu_real_host_page_size = system_info.dwPageSize;
157 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
158 PAGE_EXECUTE_READWRITE, &old_protect);
160 #else
161 qemu_real_host_page_size = getpagesize();
163 unsigned long start, end;
165 start = (unsigned long)code_gen_buffer;
166 start &= ~(qemu_real_host_page_size - 1);
168 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
169 end += qemu_real_host_page_size - 1;
170 end &= ~(qemu_real_host_page_size - 1);
172 mprotect((void *)start, end - start,
173 PROT_READ | PROT_WRITE | PROT_EXEC);
175 #endif
177 if (qemu_host_page_size == 0)
178 qemu_host_page_size = qemu_real_host_page_size;
179 if (qemu_host_page_size < TARGET_PAGE_SIZE)
180 qemu_host_page_size = TARGET_PAGE_SIZE;
181 qemu_host_page_bits = 0;
182 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
183 qemu_host_page_bits++;
184 qemu_host_page_mask = ~(qemu_host_page_size - 1);
185 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
186 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
189 static inline PageDesc *page_find_alloc(unsigned int index)
191 PageDesc **lp, *p;
193 lp = &l1_map[index >> L2_BITS];
194 p = *lp;
195 if (!p) {
196 /* allocate if not found */
197 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
198 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
199 *lp = p;
201 return p + (index & (L2_SIZE - 1));
204 static inline PageDesc *page_find(unsigned int index)
206 PageDesc *p;
208 p = l1_map[index >> L2_BITS];
209 if (!p)
210 return 0;
211 return p + (index & (L2_SIZE - 1));
214 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
216 void **lp, **p;
217 PhysPageDesc *pd;
219 p = (void **)l1_phys_map;
220 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
222 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
223 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
224 #endif
225 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
226 p = *lp;
227 if (!p) {
228 /* allocate if not found */
229 if (!alloc)
230 return NULL;
231 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
232 memset(p, 0, sizeof(void *) * L1_SIZE);
233 *lp = p;
235 #endif
236 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
237 pd = *lp;
238 if (!pd) {
239 int i;
240 /* allocate if not found */
241 if (!alloc)
242 return NULL;
243 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
244 *lp = pd;
245 for (i = 0; i < L2_SIZE; i++)
246 pd[i].phys_offset = IO_MEM_UNASSIGNED;
248 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
251 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
253 return phys_page_find_alloc(index, 0);
256 #if !defined(CONFIG_USER_ONLY)
257 static void tlb_protect_code(ram_addr_t ram_addr);
258 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
259 target_ulong vaddr);
260 #endif
262 void cpu_exec_init(CPUState *env)
264 CPUState **penv;
265 int cpu_index;
267 if (!code_gen_ptr) {
268 code_gen_ptr = code_gen_buffer;
269 page_init();
270 io_mem_init();
272 env->next_cpu = NULL;
273 penv = &first_cpu;
274 cpu_index = 0;
275 while (*penv != NULL) {
276 penv = (CPUState **)&(*penv)->next_cpu;
277 cpu_index++;
279 env->cpu_index = cpu_index;
280 env->nb_watchpoints = 0;
281 *penv = env;
284 static inline void invalidate_page_bitmap(PageDesc *p)
286 if (p->code_bitmap) {
287 qemu_free(p->code_bitmap);
288 p->code_bitmap = NULL;
290 p->code_write_count = 0;
293 /* set to NULL all the 'first_tb' fields in all PageDescs */
294 static void page_flush_tb(void)
296 int i, j;
297 PageDesc *p;
299 for(i = 0; i < L1_SIZE; i++) {
300 p = l1_map[i];
301 if (p) {
302 for(j = 0; j < L2_SIZE; j++) {
303 p->first_tb = NULL;
304 invalidate_page_bitmap(p);
305 p++;
311 /* flush all the translation blocks */
312 /* XXX: tb_flush is currently not thread safe */
313 void tb_flush(CPUState *env1)
315 CPUState *env;
316 #if defined(DEBUG_FLUSH)
317 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
318 code_gen_ptr - code_gen_buffer,
319 nb_tbs,
320 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
321 #endif
322 nb_tbs = 0;
324 for(env = first_cpu; env != NULL; env = env->next_cpu) {
325 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
328 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
329 page_flush_tb();
331 code_gen_ptr = code_gen_buffer;
332 /* XXX: flush processor icache at this point if cache flush is
333 expensive */
334 tb_flush_count++;
337 #ifdef DEBUG_TB_CHECK
339 static void tb_invalidate_check(unsigned long address)
341 TranslationBlock *tb;
342 int i;
343 address &= TARGET_PAGE_MASK;
344 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
345 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
346 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
347 address >= tb->pc + tb->size)) {
348 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
349 address, (long)tb->pc, tb->size);
355 /* verify that all the pages have correct rights for code */
356 static void tb_page_check(void)
358 TranslationBlock *tb;
359 int i, flags1, flags2;
361 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
362 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
363 flags1 = page_get_flags(tb->pc);
364 flags2 = page_get_flags(tb->pc + tb->size - 1);
365 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
366 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
367 (long)tb->pc, tb->size, flags1, flags2);
373 void tb_jmp_check(TranslationBlock *tb)
375 TranslationBlock *tb1;
376 unsigned int n1;
378 /* suppress any remaining jumps to this TB */
379 tb1 = tb->jmp_first;
380 for(;;) {
381 n1 = (long)tb1 & 3;
382 tb1 = (TranslationBlock *)((long)tb1 & ~3);
383 if (n1 == 2)
384 break;
385 tb1 = tb1->jmp_next[n1];
387 /* check end of list */
388 if (tb1 != tb) {
389 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
393 #endif
395 /* invalidate one TB */
396 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
397 int next_offset)
399 TranslationBlock *tb1;
400 for(;;) {
401 tb1 = *ptb;
402 if (tb1 == tb) {
403 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
404 break;
406 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
410 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
412 TranslationBlock *tb1;
413 unsigned int n1;
415 for(;;) {
416 tb1 = *ptb;
417 n1 = (long)tb1 & 3;
418 tb1 = (TranslationBlock *)((long)tb1 & ~3);
419 if (tb1 == tb) {
420 *ptb = tb1->page_next[n1];
421 break;
423 ptb = &tb1->page_next[n1];
427 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
429 TranslationBlock *tb1, **ptb;
430 unsigned int n1;
432 ptb = &tb->jmp_next[n];
433 tb1 = *ptb;
434 if (tb1) {
435 /* find tb(n) in circular list */
436 for(;;) {
437 tb1 = *ptb;
438 n1 = (long)tb1 & 3;
439 tb1 = (TranslationBlock *)((long)tb1 & ~3);
440 if (n1 == n && tb1 == tb)
441 break;
442 if (n1 == 2) {
443 ptb = &tb1->jmp_first;
444 } else {
445 ptb = &tb1->jmp_next[n1];
448 /* now we can suppress tb(n) from the list */
449 *ptb = tb->jmp_next[n];
451 tb->jmp_next[n] = NULL;
455 /* reset the jump entry 'n' of a TB so that it is not chained to
456 another TB */
457 static inline void tb_reset_jump(TranslationBlock *tb, int n)
459 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
462 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
464 CPUState *env;
465 PageDesc *p;
466 unsigned int h, n1;
467 target_ulong phys_pc;
468 TranslationBlock *tb1, *tb2;
470 /* remove the TB from the hash list */
471 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
472 h = tb_phys_hash_func(phys_pc);
473 tb_remove(&tb_phys_hash[h], tb,
474 offsetof(TranslationBlock, phys_hash_next));
476 /* remove the TB from the page list */
477 if (tb->page_addr[0] != page_addr) {
478 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
479 tb_page_remove(&p->first_tb, tb);
480 invalidate_page_bitmap(p);
482 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
483 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
484 tb_page_remove(&p->first_tb, tb);
485 invalidate_page_bitmap(p);
488 tb_invalidated_flag = 1;
490 /* remove the TB from the hash list */
491 h = tb_jmp_cache_hash_func(tb->pc);
492 for(env = first_cpu; env != NULL; env = env->next_cpu) {
493 if (env->tb_jmp_cache[h] == tb)
494 env->tb_jmp_cache[h] = NULL;
497 /* suppress this TB from the two jump lists */
498 tb_jmp_remove(tb, 0);
499 tb_jmp_remove(tb, 1);
501 /* suppress any remaining jumps to this TB */
502 tb1 = tb->jmp_first;
503 for(;;) {
504 n1 = (long)tb1 & 3;
505 if (n1 == 2)
506 break;
507 tb1 = (TranslationBlock *)((long)tb1 & ~3);
508 tb2 = tb1->jmp_next[n1];
509 tb_reset_jump(tb1, n1);
510 tb1->jmp_next[n1] = NULL;
511 tb1 = tb2;
513 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
515 tb_phys_invalidate_count++;
518 static inline void set_bits(uint8_t *tab, int start, int len)
520 int end, mask, end1;
522 end = start + len;
523 tab += start >> 3;
524 mask = 0xff << (start & 7);
525 if ((start & ~7) == (end & ~7)) {
526 if (start < end) {
527 mask &= ~(0xff << (end & 7));
528 *tab |= mask;
530 } else {
531 *tab++ |= mask;
532 start = (start + 8) & ~7;
533 end1 = end & ~7;
534 while (start < end1) {
535 *tab++ = 0xff;
536 start += 8;
538 if (start < end) {
539 mask = ~(0xff << (end & 7));
540 *tab |= mask;
545 static void build_page_bitmap(PageDesc *p)
547 int n, tb_start, tb_end;
548 TranslationBlock *tb;
550 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
551 if (!p->code_bitmap)
552 return;
553 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
555 tb = p->first_tb;
556 while (tb != NULL) {
557 n = (long)tb & 3;
558 tb = (TranslationBlock *)((long)tb & ~3);
559 /* NOTE: this is subtle as a TB may span two physical pages */
560 if (n == 0) {
561 /* NOTE: tb_end may be after the end of the page, but
562 it is not a problem */
563 tb_start = tb->pc & ~TARGET_PAGE_MASK;
564 tb_end = tb_start + tb->size;
565 if (tb_end > TARGET_PAGE_SIZE)
566 tb_end = TARGET_PAGE_SIZE;
567 } else {
568 tb_start = 0;
569 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
571 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
572 tb = tb->page_next[n];
576 #ifdef TARGET_HAS_PRECISE_SMC
578 static void tb_gen_code(CPUState *env,
579 target_ulong pc, target_ulong cs_base, int flags,
580 int cflags)
582 TranslationBlock *tb;
583 uint8_t *tc_ptr;
584 target_ulong phys_pc, phys_page2, virt_page2;
585 int code_gen_size;
587 phys_pc = get_phys_addr_code(env, pc);
588 tb = tb_alloc(pc);
589 if (!tb) {
590 /* flush must be done */
591 tb_flush(env);
592 /* cannot fail at this point */
593 tb = tb_alloc(pc);
595 tc_ptr = code_gen_ptr;
596 tb->tc_ptr = tc_ptr;
597 tb->cs_base = cs_base;
598 tb->flags = flags;
599 tb->cflags = cflags;
600 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
601 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
603 /* check next page if needed */
604 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
605 phys_page2 = -1;
606 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
607 phys_page2 = get_phys_addr_code(env, virt_page2);
609 tb_link_phys(tb, phys_pc, phys_page2);
611 #endif
613 /* invalidate all TBs which intersect with the target physical page
614 starting in range [start;end[. NOTE: start and end must refer to
615 the same physical page. 'is_cpu_write_access' should be true if called
616 from a real cpu write access: the virtual CPU will exit the current
617 TB if code is modified inside this TB. */
618 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
619 int is_cpu_write_access)
621 int n, current_tb_modified, current_tb_not_found, current_flags;
622 CPUState *env = cpu_single_env;
623 PageDesc *p;
624 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
625 target_ulong tb_start, tb_end;
626 target_ulong current_pc, current_cs_base;
628 p = page_find(start >> TARGET_PAGE_BITS);
629 if (!p)
630 return;
631 if (!p->code_bitmap &&
632 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
633 is_cpu_write_access) {
634 /* build code bitmap */
635 build_page_bitmap(p);
638 /* we remove all the TBs in the range [start, end[ */
639 /* XXX: see if in some cases it could be faster to invalidate all the code */
640 current_tb_not_found = is_cpu_write_access;
641 current_tb_modified = 0;
642 current_tb = NULL; /* avoid warning */
643 current_pc = 0; /* avoid warning */
644 current_cs_base = 0; /* avoid warning */
645 current_flags = 0; /* avoid warning */
646 tb = p->first_tb;
647 while (tb != NULL) {
648 n = (long)tb & 3;
649 tb = (TranslationBlock *)((long)tb & ~3);
650 tb_next = tb->page_next[n];
651 /* NOTE: this is subtle as a TB may span two physical pages */
652 if (n == 0) {
653 /* NOTE: tb_end may be after the end of the page, but
654 it is not a problem */
655 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
656 tb_end = tb_start + tb->size;
657 } else {
658 tb_start = tb->page_addr[1];
659 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
661 if (!(tb_end <= start || tb_start >= end)) {
662 #ifdef TARGET_HAS_PRECISE_SMC
663 if (current_tb_not_found) {
664 current_tb_not_found = 0;
665 current_tb = NULL;
666 if (env->mem_write_pc) {
667 /* now we have a real cpu fault */
668 current_tb = tb_find_pc(env->mem_write_pc);
671 if (current_tb == tb &&
672 !(current_tb->cflags & CF_SINGLE_INSN)) {
673 /* If we are modifying the current TB, we must stop
674 its execution. We could be more precise by checking
675 that the modification is after the current PC, but it
676 would require a specialized function to partially
677 restore the CPU state */
679 current_tb_modified = 1;
680 cpu_restore_state(current_tb, env,
681 env->mem_write_pc, NULL);
682 #if defined(TARGET_I386)
683 current_flags = env->hflags;
684 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
685 current_cs_base = (target_ulong)env->segs[R_CS].base;
686 current_pc = current_cs_base + env->eip;
687 #else
688 #error unsupported CPU
689 #endif
691 #endif /* TARGET_HAS_PRECISE_SMC */
692 /* we need to do that to handle the case where a signal
693 occurs while doing tb_phys_invalidate() */
694 saved_tb = NULL;
695 if (env) {
696 saved_tb = env->current_tb;
697 env->current_tb = NULL;
699 tb_phys_invalidate(tb, -1);
700 if (env) {
701 env->current_tb = saved_tb;
702 if (env->interrupt_request && env->current_tb)
703 cpu_interrupt(env, env->interrupt_request);
706 tb = tb_next;
708 #if !defined(CONFIG_USER_ONLY)
709 /* if no code remaining, no need to continue to use slow writes */
710 if (!p->first_tb) {
711 invalidate_page_bitmap(p);
712 if (is_cpu_write_access) {
713 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
716 #endif
717 #ifdef TARGET_HAS_PRECISE_SMC
718 if (current_tb_modified) {
719 /* we generate a block containing just the instruction
720 modifying the memory. It will ensure that it cannot modify
721 itself */
722 env->current_tb = NULL;
723 tb_gen_code(env, current_pc, current_cs_base, current_flags,
724 CF_SINGLE_INSN);
725 cpu_resume_from_signal(env, NULL);
727 #endif
730 /* len must be <= 8 and start must be a multiple of len */
731 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
733 PageDesc *p;
734 int offset, b;
735 #if 0
736 if (1) {
737 if (loglevel) {
738 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
739 cpu_single_env->mem_write_vaddr, len,
740 cpu_single_env->eip,
741 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
744 #endif
745 p = page_find(start >> TARGET_PAGE_BITS);
746 if (!p)
747 return;
748 if (p->code_bitmap) {
749 offset = start & ~TARGET_PAGE_MASK;
750 b = p->code_bitmap[offset >> 3] >> (offset & 7);
751 if (b & ((1 << len) - 1))
752 goto do_invalidate;
753 } else {
754 do_invalidate:
755 tb_invalidate_phys_page_range(start, start + len, 1);
759 #if !defined(CONFIG_SOFTMMU)
760 static void tb_invalidate_phys_page(target_ulong addr,
761 unsigned long pc, void *puc)
763 int n, current_flags, current_tb_modified;
764 target_ulong current_pc, current_cs_base;
765 PageDesc *p;
766 TranslationBlock *tb, *current_tb;
767 #ifdef TARGET_HAS_PRECISE_SMC
768 CPUState *env = cpu_single_env;
769 #endif
771 addr &= TARGET_PAGE_MASK;
772 p = page_find(addr >> TARGET_PAGE_BITS);
773 if (!p)
774 return;
775 tb = p->first_tb;
776 current_tb_modified = 0;
777 current_tb = NULL;
778 current_pc = 0; /* avoid warning */
779 current_cs_base = 0; /* avoid warning */
780 current_flags = 0; /* avoid warning */
781 #ifdef TARGET_HAS_PRECISE_SMC
782 if (tb && pc != 0) {
783 current_tb = tb_find_pc(pc);
785 #endif
786 while (tb != NULL) {
787 n = (long)tb & 3;
788 tb = (TranslationBlock *)((long)tb & ~3);
789 #ifdef TARGET_HAS_PRECISE_SMC
790 if (current_tb == tb &&
791 !(current_tb->cflags & CF_SINGLE_INSN)) {
792 /* If we are modifying the current TB, we must stop
793 its execution. We could be more precise by checking
794 that the modification is after the current PC, but it
795 would require a specialized function to partially
796 restore the CPU state */
798 current_tb_modified = 1;
799 cpu_restore_state(current_tb, env, pc, puc);
800 #if defined(TARGET_I386)
801 current_flags = env->hflags;
802 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
803 current_cs_base = (target_ulong)env->segs[R_CS].base;
804 current_pc = current_cs_base + env->eip;
805 #else
806 #error unsupported CPU
807 #endif
809 #endif /* TARGET_HAS_PRECISE_SMC */
810 tb_phys_invalidate(tb, addr);
811 tb = tb->page_next[n];
813 p->first_tb = NULL;
814 #ifdef TARGET_HAS_PRECISE_SMC
815 if (current_tb_modified) {
816 /* we generate a block containing just the instruction
817 modifying the memory. It will ensure that it cannot modify
818 itself */
819 env->current_tb = NULL;
820 tb_gen_code(env, current_pc, current_cs_base, current_flags,
821 CF_SINGLE_INSN);
822 cpu_resume_from_signal(env, puc);
824 #endif
826 #endif
828 /* add the tb in the target page and protect it if necessary */
829 static inline void tb_alloc_page(TranslationBlock *tb,
830 unsigned int n, target_ulong page_addr)
832 PageDesc *p;
833 TranslationBlock *last_first_tb;
835 tb->page_addr[n] = page_addr;
836 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
837 tb->page_next[n] = p->first_tb;
838 last_first_tb = p->first_tb;
839 p->first_tb = (TranslationBlock *)((long)tb | n);
840 invalidate_page_bitmap(p);
842 #if defined(TARGET_HAS_SMC) || 1
844 #if defined(CONFIG_USER_ONLY)
845 if (p->flags & PAGE_WRITE) {
846 target_ulong addr;
847 PageDesc *p2;
848 int prot;
850 /* force the host page as non writable (writes will have a
851 page fault + mprotect overhead) */
852 page_addr &= qemu_host_page_mask;
853 prot = 0;
854 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
855 addr += TARGET_PAGE_SIZE) {
857 p2 = page_find (addr >> TARGET_PAGE_BITS);
858 if (!p2)
859 continue;
860 prot |= p2->flags;
861 p2->flags &= ~PAGE_WRITE;
862 page_get_flags(addr);
864 mprotect(g2h(page_addr), qemu_host_page_size,
865 (prot & PAGE_BITS) & ~PAGE_WRITE);
866 #ifdef DEBUG_TB_INVALIDATE
867 printf("protecting code page: 0x%08lx\n",
868 page_addr);
869 #endif
871 #else
872 /* if some code is already present, then the pages are already
873 protected. So we handle the case where only the first TB is
874 allocated in a physical page */
875 if (!last_first_tb) {
876 tlb_protect_code(page_addr);
878 #endif
880 #endif /* TARGET_HAS_SMC */
883 /* Allocate a new translation block. Flush the translation buffer if
884 too many translation blocks or too much generated code. */
885 TranslationBlock *tb_alloc(target_ulong pc)
887 TranslationBlock *tb;
889 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
890 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
891 return NULL;
892 tb = &tbs[nb_tbs++];
893 tb->pc = pc;
894 tb->cflags = 0;
895 return tb;
898 /* add a new TB and link it to the physical page tables. phys_page2 is
899 (-1) to indicate that only one page contains the TB. */
900 void tb_link_phys(TranslationBlock *tb,
901 target_ulong phys_pc, target_ulong phys_page2)
903 unsigned int h;
904 TranslationBlock **ptb;
906 /* add in the physical hash table */
907 h = tb_phys_hash_func(phys_pc);
908 ptb = &tb_phys_hash[h];
909 tb->phys_hash_next = *ptb;
910 *ptb = tb;
912 /* add in the page list */
913 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
914 if (phys_page2 != -1)
915 tb_alloc_page(tb, 1, phys_page2);
916 else
917 tb->page_addr[1] = -1;
919 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
920 tb->jmp_next[0] = NULL;
921 tb->jmp_next[1] = NULL;
922 #ifdef USE_CODE_COPY
923 tb->cflags &= ~CF_FP_USED;
924 if (tb->cflags & CF_TB_FP_USED)
925 tb->cflags |= CF_FP_USED;
926 #endif
928 /* init original jump addresses */
929 if (tb->tb_next_offset[0] != 0xffff)
930 tb_reset_jump(tb, 0);
931 if (tb->tb_next_offset[1] != 0xffff)
932 tb_reset_jump(tb, 1);
934 #ifdef DEBUG_TB_CHECK
935 tb_page_check();
936 #endif
939 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
940 tb[1].tc_ptr. Return NULL if not found */
941 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
943 int m_min, m_max, m;
944 unsigned long v;
945 TranslationBlock *tb;
947 if (nb_tbs <= 0)
948 return NULL;
949 if (tc_ptr < (unsigned long)code_gen_buffer ||
950 tc_ptr >= (unsigned long)code_gen_ptr)
951 return NULL;
952 /* binary search (cf Knuth) */
953 m_min = 0;
954 m_max = nb_tbs - 1;
955 while (m_min <= m_max) {
956 m = (m_min + m_max) >> 1;
957 tb = &tbs[m];
958 v = (unsigned long)tb->tc_ptr;
959 if (v == tc_ptr)
960 return tb;
961 else if (tc_ptr < v) {
962 m_max = m - 1;
963 } else {
964 m_min = m + 1;
967 return &tbs[m_max];
970 static void tb_reset_jump_recursive(TranslationBlock *tb);
972 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
974 TranslationBlock *tb1, *tb_next, **ptb;
975 unsigned int n1;
977 tb1 = tb->jmp_next[n];
978 if (tb1 != NULL) {
979 /* find head of list */
980 for(;;) {
981 n1 = (long)tb1 & 3;
982 tb1 = (TranslationBlock *)((long)tb1 & ~3);
983 if (n1 == 2)
984 break;
985 tb1 = tb1->jmp_next[n1];
987 /* we are now sure now that tb jumps to tb1 */
988 tb_next = tb1;
990 /* remove tb from the jmp_first list */
991 ptb = &tb_next->jmp_first;
992 for(;;) {
993 tb1 = *ptb;
994 n1 = (long)tb1 & 3;
995 tb1 = (TranslationBlock *)((long)tb1 & ~3);
996 if (n1 == n && tb1 == tb)
997 break;
998 ptb = &tb1->jmp_next[n1];
1000 *ptb = tb->jmp_next[n];
1001 tb->jmp_next[n] = NULL;
1003 /* suppress the jump to next tb in generated code */
1004 tb_reset_jump(tb, n);
1006 /* suppress jumps in the tb on which we could have jumped */
1007 tb_reset_jump_recursive(tb_next);
1011 static void tb_reset_jump_recursive(TranslationBlock *tb)
1013 tb_reset_jump_recursive2(tb, 0);
1014 tb_reset_jump_recursive2(tb, 1);
1017 #if defined(TARGET_HAS_ICE)
1018 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1020 target_ulong addr, pd;
1021 ram_addr_t ram_addr;
1022 PhysPageDesc *p;
1024 addr = cpu_get_phys_page_debug(env, pc);
1025 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1026 if (!p) {
1027 pd = IO_MEM_UNASSIGNED;
1028 } else {
1029 pd = p->phys_offset;
1031 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1032 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1034 #endif
1036 /* Add a watchpoint. */
1037 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1039 int i;
1041 for (i = 0; i < env->nb_watchpoints; i++) {
1042 if (addr == env->watchpoint[i].vaddr)
1043 return 0;
1045 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1046 return -1;
1048 i = env->nb_watchpoints++;
1049 env->watchpoint[i].vaddr = addr;
1050 tlb_flush_page(env, addr);
1051 /* FIXME: This flush is needed because of the hack to make memory ops
1052 terminate the TB. It can be removed once the proper IO trap and
1053 re-execute bits are in. */
1054 tb_flush(env);
1055 return i;
1058 /* Remove a watchpoint. */
1059 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1061 int i;
1063 for (i = 0; i < env->nb_watchpoints; i++) {
1064 if (addr == env->watchpoint[i].vaddr) {
1065 env->nb_watchpoints--;
1066 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1067 tlb_flush_page(env, addr);
1068 return 0;
1071 return -1;
1074 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1075 breakpoint is reached */
1076 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1078 #if defined(TARGET_HAS_ICE)
1079 int i;
1081 for(i = 0; i < env->nb_breakpoints; i++) {
1082 if (env->breakpoints[i] == pc)
1083 return 0;
1086 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1087 return -1;
1088 env->breakpoints[env->nb_breakpoints++] = pc;
1090 breakpoint_invalidate(env, pc);
1091 return 0;
1092 #else
1093 return -1;
1094 #endif
1097 /* remove a breakpoint */
1098 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1100 #if defined(TARGET_HAS_ICE)
1101 int i;
1102 for(i = 0; i < env->nb_breakpoints; i++) {
1103 if (env->breakpoints[i] == pc)
1104 goto found;
1106 return -1;
1107 found:
1108 env->nb_breakpoints--;
1109 if (i < env->nb_breakpoints)
1110 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1112 breakpoint_invalidate(env, pc);
1113 return 0;
1114 #else
1115 return -1;
1116 #endif
1119 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1120 CPU loop after each instruction */
1121 void cpu_single_step(CPUState *env, int enabled)
1123 #if defined(TARGET_HAS_ICE)
1124 if (env->singlestep_enabled != enabled) {
1125 env->singlestep_enabled = enabled;
1126 /* must flush all the translated code to avoid inconsistancies */
1127 /* XXX: only flush what is necessary */
1128 tb_flush(env);
1130 #endif
1133 /* enable or disable low levels log */
1134 void cpu_set_log(int log_flags)
1136 loglevel = log_flags;
1137 if (loglevel && !logfile) {
1138 logfile = fopen(logfilename, "w");
1139 if (!logfile) {
1140 perror(logfilename);
1141 _exit(1);
1143 #if !defined(CONFIG_SOFTMMU)
1144 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1146 static uint8_t logfile_buf[4096];
1147 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1149 #else
1150 setvbuf(logfile, NULL, _IOLBF, 0);
1151 #endif
1155 void cpu_set_log_filename(const char *filename)
1157 logfilename = strdup(filename);
1160 /* mask must never be zero, except for A20 change call */
1161 void cpu_interrupt(CPUState *env, int mask)
1163 TranslationBlock *tb;
1164 static int interrupt_lock;
1166 env->interrupt_request |= mask;
1167 /* if the cpu is currently executing code, we must unlink it and
1168 all the potentially executing TB */
1169 tb = env->current_tb;
1170 if (tb && !testandset(&interrupt_lock)) {
1171 env->current_tb = NULL;
1172 tb_reset_jump_recursive(tb);
1173 interrupt_lock = 0;
1177 void cpu_reset_interrupt(CPUState *env, int mask)
1179 env->interrupt_request &= ~mask;
1182 CPULogItem cpu_log_items[] = {
1183 { CPU_LOG_TB_OUT_ASM, "out_asm",
1184 "show generated host assembly code for each compiled TB" },
1185 { CPU_LOG_TB_IN_ASM, "in_asm",
1186 "show target assembly code for each compiled TB" },
1187 { CPU_LOG_TB_OP, "op",
1188 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1189 #ifdef TARGET_I386
1190 { CPU_LOG_TB_OP_OPT, "op_opt",
1191 "show micro ops after optimization for each compiled TB" },
1192 #endif
1193 { CPU_LOG_INT, "int",
1194 "show interrupts/exceptions in short format" },
1195 { CPU_LOG_EXEC, "exec",
1196 "show trace before each executed TB (lots of logs)" },
1197 { CPU_LOG_TB_CPU, "cpu",
1198 "show CPU state before bloc translation" },
1199 #ifdef TARGET_I386
1200 { CPU_LOG_PCALL, "pcall",
1201 "show protected mode far calls/returns/exceptions" },
1202 #endif
1203 #ifdef DEBUG_IOPORT
1204 { CPU_LOG_IOPORT, "ioport",
1205 "show all i/o ports accesses" },
1206 #endif
1207 { 0, NULL, NULL },
1210 static int cmp1(const char *s1, int n, const char *s2)
1212 if (strlen(s2) != n)
1213 return 0;
1214 return memcmp(s1, s2, n) == 0;
1217 /* takes a comma separated list of log masks. Return 0 if error. */
1218 int cpu_str_to_log_mask(const char *str)
1220 CPULogItem *item;
1221 int mask;
1222 const char *p, *p1;
1224 p = str;
1225 mask = 0;
1226 for(;;) {
1227 p1 = strchr(p, ',');
1228 if (!p1)
1229 p1 = p + strlen(p);
1230 if(cmp1(p,p1-p,"all")) {
1231 for(item = cpu_log_items; item->mask != 0; item++) {
1232 mask |= item->mask;
1234 } else {
1235 for(item = cpu_log_items; item->mask != 0; item++) {
1236 if (cmp1(p, p1 - p, item->name))
1237 goto found;
1239 return 0;
1241 found:
1242 mask |= item->mask;
1243 if (*p1 != ',')
1244 break;
1245 p = p1 + 1;
1247 return mask;
1250 void cpu_abort(CPUState *env, const char *fmt, ...)
1252 va_list ap;
1254 va_start(ap, fmt);
1255 fprintf(stderr, "qemu: fatal: ");
1256 vfprintf(stderr, fmt, ap);
1257 fprintf(stderr, "\n");
1258 #ifdef TARGET_I386
1259 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1260 #else
1261 cpu_dump_state(env, stderr, fprintf, 0);
1262 #endif
1263 va_end(ap);
1264 abort();
1267 CPUState *cpu_copy(CPUState *env)
1269 CPUState *new_env = cpu_init();
1270 /* preserve chaining and index */
1271 CPUState *next_cpu = new_env->next_cpu;
1272 int cpu_index = new_env->cpu_index;
1273 memcpy(new_env, env, sizeof(CPUState));
1274 new_env->next_cpu = next_cpu;
1275 new_env->cpu_index = cpu_index;
1276 return new_env;
1279 #if !defined(CONFIG_USER_ONLY)
1281 /* NOTE: if flush_global is true, also flush global entries (not
1282 implemented yet) */
1283 void tlb_flush(CPUState *env, int flush_global)
1285 int i;
1287 #if defined(DEBUG_TLB)
1288 printf("tlb_flush:\n");
1289 #endif
1290 /* must reset current TB so that interrupts cannot modify the
1291 links while we are modifying them */
1292 env->current_tb = NULL;
1294 for(i = 0; i < CPU_TLB_SIZE; i++) {
1295 env->tlb_table[0][i].addr_read = -1;
1296 env->tlb_table[0][i].addr_write = -1;
1297 env->tlb_table[0][i].addr_code = -1;
1298 env->tlb_table[1][i].addr_read = -1;
1299 env->tlb_table[1][i].addr_write = -1;
1300 env->tlb_table[1][i].addr_code = -1;
1303 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1305 #if !defined(CONFIG_SOFTMMU)
1306 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1307 #endif
1308 #ifdef USE_KQEMU
1309 if (env->kqemu_enabled) {
1310 kqemu_flush(env, flush_global);
1312 #endif
1313 tlb_flush_count++;
1316 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1318 if (addr == (tlb_entry->addr_read &
1319 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1320 addr == (tlb_entry->addr_write &
1321 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1322 addr == (tlb_entry->addr_code &
1323 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1324 tlb_entry->addr_read = -1;
1325 tlb_entry->addr_write = -1;
1326 tlb_entry->addr_code = -1;
1330 void tlb_flush_page(CPUState *env, target_ulong addr)
1332 int i;
1333 TranslationBlock *tb;
1335 #if defined(DEBUG_TLB)
1336 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1337 #endif
1338 /* must reset current TB so that interrupts cannot modify the
1339 links while we are modifying them */
1340 env->current_tb = NULL;
1342 addr &= TARGET_PAGE_MASK;
1343 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1344 tlb_flush_entry(&env->tlb_table[0][i], addr);
1345 tlb_flush_entry(&env->tlb_table[1][i], addr);
1347 /* Discard jump cache entries for any tb which might potentially
1348 overlap the flushed page. */
1349 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1350 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1352 i = tb_jmp_cache_hash_page(addr);
1353 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1355 #if !defined(CONFIG_SOFTMMU)
1356 if (addr < MMAP_AREA_END)
1357 munmap((void *)addr, TARGET_PAGE_SIZE);
1358 #endif
1359 #ifdef USE_KQEMU
1360 if (env->kqemu_enabled) {
1361 kqemu_flush_page(env, addr);
1363 #endif
1366 /* update the TLBs so that writes to code in the virtual page 'addr'
1367 can be detected */
1368 static void tlb_protect_code(ram_addr_t ram_addr)
1370 cpu_physical_memory_reset_dirty(ram_addr,
1371 ram_addr + TARGET_PAGE_SIZE,
1372 CODE_DIRTY_FLAG);
1375 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1376 tested for self modifying code */
1377 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1378 target_ulong vaddr)
1380 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1383 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1384 unsigned long start, unsigned long length)
1386 unsigned long addr;
1387 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1388 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1389 if ((addr - start) < length) {
1390 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1395 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1396 int dirty_flags)
1398 CPUState *env;
1399 unsigned long length, start1;
1400 int i, mask, len;
1401 uint8_t *p;
1403 start &= TARGET_PAGE_MASK;
1404 end = TARGET_PAGE_ALIGN(end);
1406 length = end - start;
1407 if (length == 0)
1408 return;
1409 len = length >> TARGET_PAGE_BITS;
1410 #ifdef USE_KQEMU
1411 /* XXX: should not depend on cpu context */
1412 env = first_cpu;
1413 if (env->kqemu_enabled) {
1414 ram_addr_t addr;
1415 addr = start;
1416 for(i = 0; i < len; i++) {
1417 kqemu_set_notdirty(env, addr);
1418 addr += TARGET_PAGE_SIZE;
1421 #endif
1422 mask = ~dirty_flags;
1423 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1424 for(i = 0; i < len; i++)
1425 p[i] &= mask;
1427 /* we modify the TLB cache so that the dirty bit will be set again
1428 when accessing the range */
1429 start1 = start + (unsigned long)phys_ram_base;
1430 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1431 for(i = 0; i < CPU_TLB_SIZE; i++)
1432 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1433 for(i = 0; i < CPU_TLB_SIZE; i++)
1434 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1437 #if !defined(CONFIG_SOFTMMU)
1438 /* XXX: this is expensive */
1440 VirtPageDesc *p;
1441 int j;
1442 target_ulong addr;
1444 for(i = 0; i < L1_SIZE; i++) {
1445 p = l1_virt_map[i];
1446 if (p) {
1447 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1448 for(j = 0; j < L2_SIZE; j++) {
1449 if (p->valid_tag == virt_valid_tag &&
1450 p->phys_addr >= start && p->phys_addr < end &&
1451 (p->prot & PROT_WRITE)) {
1452 if (addr < MMAP_AREA_END) {
1453 mprotect((void *)addr, TARGET_PAGE_SIZE,
1454 p->prot & ~PROT_WRITE);
1457 addr += TARGET_PAGE_SIZE;
1458 p++;
1463 #endif
1466 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1468 ram_addr_t ram_addr;
1470 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1471 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1472 tlb_entry->addend - (unsigned long)phys_ram_base;
1473 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1474 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1479 /* update the TLB according to the current state of the dirty bits */
1480 void cpu_tlb_update_dirty(CPUState *env)
1482 int i;
1483 for(i = 0; i < CPU_TLB_SIZE; i++)
1484 tlb_update_dirty(&env->tlb_table[0][i]);
1485 for(i = 0; i < CPU_TLB_SIZE; i++)
1486 tlb_update_dirty(&env->tlb_table[1][i]);
1489 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1490 unsigned long start)
1492 unsigned long addr;
1493 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1494 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1495 if (addr == start) {
1496 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1501 /* update the TLB corresponding to virtual page vaddr and phys addr
1502 addr so that it is no longer dirty */
1503 static inline void tlb_set_dirty(CPUState *env,
1504 unsigned long addr, target_ulong vaddr)
1506 int i;
1508 addr &= TARGET_PAGE_MASK;
1509 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1510 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1511 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1514 /* add a new TLB entry. At most one entry for a given virtual address
1515 is permitted. Return 0 if OK or 2 if the page could not be mapped
1516 (can only happen in non SOFTMMU mode for I/O pages or pages
1517 conflicting with the host address space). */
1518 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1519 target_phys_addr_t paddr, int prot,
1520 int is_user, int is_softmmu)
1522 PhysPageDesc *p;
1523 unsigned long pd;
1524 unsigned int index;
1525 target_ulong address;
1526 target_phys_addr_t addend;
1527 int ret;
1528 CPUTLBEntry *te;
1529 int i;
1531 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1532 if (!p) {
1533 pd = IO_MEM_UNASSIGNED;
1534 } else {
1535 pd = p->phys_offset;
1537 #if defined(DEBUG_TLB)
1538 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1539 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1540 #endif
1542 ret = 0;
1543 #if !defined(CONFIG_SOFTMMU)
1544 if (is_softmmu)
1545 #endif
1547 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1548 /* IO memory case */
1549 address = vaddr | pd;
1550 addend = paddr;
1551 } else {
1552 /* standard memory */
1553 address = vaddr;
1554 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1557 /* Make accesses to pages with watchpoints go via the
1558 watchpoint trap routines. */
1559 for (i = 0; i < env->nb_watchpoints; i++) {
1560 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1561 if (address & ~TARGET_PAGE_MASK) {
1562 env->watchpoint[i].is_ram = 0;
1563 address = vaddr | io_mem_watch;
1564 } else {
1565 env->watchpoint[i].is_ram = 1;
1566 /* TODO: Figure out how to make read watchpoints coexist
1567 with code. */
1568 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1573 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1574 addend -= vaddr;
1575 te = &env->tlb_table[is_user][index];
1576 te->addend = addend;
1577 if (prot & PAGE_READ) {
1578 te->addr_read = address;
1579 } else {
1580 te->addr_read = -1;
1582 if (prot & PAGE_EXEC) {
1583 te->addr_code = address;
1584 } else {
1585 te->addr_code = -1;
1587 if (prot & PAGE_WRITE) {
1588 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1589 (pd & IO_MEM_ROMD)) {
1590 /* write access calls the I/O callback */
1591 te->addr_write = vaddr |
1592 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1593 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1594 !cpu_physical_memory_is_dirty(pd)) {
1595 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1596 } else {
1597 te->addr_write = address;
1599 } else {
1600 te->addr_write = -1;
1603 #if !defined(CONFIG_SOFTMMU)
1604 else {
1605 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1606 /* IO access: no mapping is done as it will be handled by the
1607 soft MMU */
1608 if (!(env->hflags & HF_SOFTMMU_MASK))
1609 ret = 2;
1610 } else {
1611 void *map_addr;
1613 if (vaddr >= MMAP_AREA_END) {
1614 ret = 2;
1615 } else {
1616 if (prot & PROT_WRITE) {
1617 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1618 #if defined(TARGET_HAS_SMC) || 1
1619 first_tb ||
1620 #endif
1621 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1622 !cpu_physical_memory_is_dirty(pd))) {
1623 /* ROM: we do as if code was inside */
1624 /* if code is present, we only map as read only and save the
1625 original mapping */
1626 VirtPageDesc *vp;
1628 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1629 vp->phys_addr = pd;
1630 vp->prot = prot;
1631 vp->valid_tag = virt_valid_tag;
1632 prot &= ~PAGE_WRITE;
1635 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1636 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1637 if (map_addr == MAP_FAILED) {
1638 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1639 paddr, vaddr);
1644 #endif
1645 return ret;
1648 /* called from signal handler: invalidate the code and unprotect the
1649 page. Return TRUE if the fault was succesfully handled. */
1650 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1652 #if !defined(CONFIG_SOFTMMU)
1653 VirtPageDesc *vp;
1655 #if defined(DEBUG_TLB)
1656 printf("page_unprotect: addr=0x%08x\n", addr);
1657 #endif
1658 addr &= TARGET_PAGE_MASK;
1660 /* if it is not mapped, no need to worry here */
1661 if (addr >= MMAP_AREA_END)
1662 return 0;
1663 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1664 if (!vp)
1665 return 0;
1666 /* NOTE: in this case, validate_tag is _not_ tested as it
1667 validates only the code TLB */
1668 if (vp->valid_tag != virt_valid_tag)
1669 return 0;
1670 if (!(vp->prot & PAGE_WRITE))
1671 return 0;
1672 #if defined(DEBUG_TLB)
1673 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1674 addr, vp->phys_addr, vp->prot);
1675 #endif
1676 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1677 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1678 (unsigned long)addr, vp->prot);
1679 /* set the dirty bit */
1680 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1681 /* flush the code inside */
1682 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1683 return 1;
1684 #else
1685 return 0;
1686 #endif
1689 #else
1691 void tlb_flush(CPUState *env, int flush_global)
1695 void tlb_flush_page(CPUState *env, target_ulong addr)
1699 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1700 target_phys_addr_t paddr, int prot,
1701 int is_user, int is_softmmu)
1703 return 0;
1706 /* dump memory mappings */
1707 void page_dump(FILE *f)
1709 unsigned long start, end;
1710 int i, j, prot, prot1;
1711 PageDesc *p;
1713 fprintf(f, "%-8s %-8s %-8s %s\n",
1714 "start", "end", "size", "prot");
1715 start = -1;
1716 end = -1;
1717 prot = 0;
1718 for(i = 0; i <= L1_SIZE; i++) {
1719 if (i < L1_SIZE)
1720 p = l1_map[i];
1721 else
1722 p = NULL;
1723 for(j = 0;j < L2_SIZE; j++) {
1724 if (!p)
1725 prot1 = 0;
1726 else
1727 prot1 = p[j].flags;
1728 if (prot1 != prot) {
1729 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1730 if (start != -1) {
1731 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1732 start, end, end - start,
1733 prot & PAGE_READ ? 'r' : '-',
1734 prot & PAGE_WRITE ? 'w' : '-',
1735 prot & PAGE_EXEC ? 'x' : '-');
1737 if (prot1 != 0)
1738 start = end;
1739 else
1740 start = -1;
1741 prot = prot1;
1743 if (!p)
1744 break;
1749 int page_get_flags(target_ulong address)
1751 PageDesc *p;
1753 p = page_find(address >> TARGET_PAGE_BITS);
1754 if (!p)
1755 return 0;
1756 return p->flags;
1759 /* modify the flags of a page and invalidate the code if
1760 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1761 depending on PAGE_WRITE */
1762 void page_set_flags(target_ulong start, target_ulong end, int flags)
1764 PageDesc *p;
1765 target_ulong addr;
1767 start = start & TARGET_PAGE_MASK;
1768 end = TARGET_PAGE_ALIGN(end);
1769 if (flags & PAGE_WRITE)
1770 flags |= PAGE_WRITE_ORG;
1771 spin_lock(&tb_lock);
1772 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1773 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1774 /* if the write protection is set, then we invalidate the code
1775 inside */
1776 if (!(p->flags & PAGE_WRITE) &&
1777 (flags & PAGE_WRITE) &&
1778 p->first_tb) {
1779 tb_invalidate_phys_page(addr, 0, NULL);
1781 p->flags = flags;
1783 spin_unlock(&tb_lock);
1786 /* called from signal handler: invalidate the code and unprotect the
1787 page. Return TRUE if the fault was succesfully handled. */
1788 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1790 unsigned int page_index, prot, pindex;
1791 PageDesc *p, *p1;
1792 target_ulong host_start, host_end, addr;
1794 host_start = address & qemu_host_page_mask;
1795 page_index = host_start >> TARGET_PAGE_BITS;
1796 p1 = page_find(page_index);
1797 if (!p1)
1798 return 0;
1799 host_end = host_start + qemu_host_page_size;
1800 p = p1;
1801 prot = 0;
1802 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1803 prot |= p->flags;
1804 p++;
1806 /* if the page was really writable, then we change its
1807 protection back to writable */
1808 if (prot & PAGE_WRITE_ORG) {
1809 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1810 if (!(p1[pindex].flags & PAGE_WRITE)) {
1811 mprotect((void *)g2h(host_start), qemu_host_page_size,
1812 (prot & PAGE_BITS) | PAGE_WRITE);
1813 p1[pindex].flags |= PAGE_WRITE;
1814 /* and since the content will be modified, we must invalidate
1815 the corresponding translated code. */
1816 tb_invalidate_phys_page(address, pc, puc);
1817 #ifdef DEBUG_TB_CHECK
1818 tb_invalidate_check(address);
1819 #endif
1820 return 1;
1823 return 0;
1826 /* call this function when system calls directly modify a memory area */
1827 /* ??? This should be redundant now we have lock_user. */
1828 void page_unprotect_range(target_ulong data, target_ulong data_size)
1830 target_ulong start, end, addr;
1832 start = data;
1833 end = start + data_size;
1834 start &= TARGET_PAGE_MASK;
1835 end = TARGET_PAGE_ALIGN(end);
1836 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1837 page_unprotect(addr, 0, NULL);
1841 static inline void tlb_set_dirty(CPUState *env,
1842 unsigned long addr, target_ulong vaddr)
1845 #endif /* defined(CONFIG_USER_ONLY) */
1847 /* register physical memory. 'size' must be a multiple of the target
1848 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1849 io memory page */
1850 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1851 unsigned long size,
1852 unsigned long phys_offset)
1854 target_phys_addr_t addr, end_addr;
1855 PhysPageDesc *p;
1856 CPUState *env;
1858 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1859 end_addr = start_addr + size;
1860 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1861 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1862 p->phys_offset = phys_offset;
1863 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1864 (phys_offset & IO_MEM_ROMD))
1865 phys_offset += TARGET_PAGE_SIZE;
1868 /* since each CPU stores ram addresses in its TLB cache, we must
1869 reset the modified entries */
1870 /* XXX: slow ! */
1871 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1872 tlb_flush(env, 1);
1876 /* XXX: temporary until new memory mapping API */
1877 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1879 PhysPageDesc *p;
1881 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1882 if (!p)
1883 return IO_MEM_UNASSIGNED;
1884 return p->phys_offset;
1887 /* XXX: better than nothing */
1888 ram_addr_t qemu_ram_alloc(unsigned int size)
1890 ram_addr_t addr;
1891 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1892 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
1893 size, phys_ram_size);
1894 abort();
1896 addr = phys_ram_alloc_offset;
1897 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1898 return addr;
1901 void qemu_ram_free(ram_addr_t addr)
1905 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1907 #ifdef DEBUG_UNASSIGNED
1908 printf("Unassigned mem read 0x%08x\n", (int)addr);
1909 #endif
1910 return 0;
1913 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1915 #ifdef DEBUG_UNASSIGNED
1916 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1917 #endif
1920 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1921 unassigned_mem_readb,
1922 unassigned_mem_readb,
1923 unassigned_mem_readb,
1926 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1927 unassigned_mem_writeb,
1928 unassigned_mem_writeb,
1929 unassigned_mem_writeb,
1932 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1934 unsigned long ram_addr;
1935 int dirty_flags;
1936 ram_addr = addr - (unsigned long)phys_ram_base;
1937 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1938 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1939 #if !defined(CONFIG_USER_ONLY)
1940 tb_invalidate_phys_page_fast(ram_addr, 1);
1941 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1942 #endif
1944 stb_p((uint8_t *)(long)addr, val);
1945 #ifdef USE_KQEMU
1946 if (cpu_single_env->kqemu_enabled &&
1947 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1948 kqemu_modify_page(cpu_single_env, ram_addr);
1949 #endif
1950 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1951 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1952 /* we remove the notdirty callback only if the code has been
1953 flushed */
1954 if (dirty_flags == 0xff)
1955 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1958 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1960 unsigned long ram_addr;
1961 int dirty_flags;
1962 ram_addr = addr - (unsigned long)phys_ram_base;
1963 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1964 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1965 #if !defined(CONFIG_USER_ONLY)
1966 tb_invalidate_phys_page_fast(ram_addr, 2);
1967 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1968 #endif
1970 stw_p((uint8_t *)(long)addr, val);
1971 #ifdef USE_KQEMU
1972 if (cpu_single_env->kqemu_enabled &&
1973 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1974 kqemu_modify_page(cpu_single_env, ram_addr);
1975 #endif
1976 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1977 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1978 /* we remove the notdirty callback only if the code has been
1979 flushed */
1980 if (dirty_flags == 0xff)
1981 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1984 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1986 unsigned long ram_addr;
1987 int dirty_flags;
1988 ram_addr = addr - (unsigned long)phys_ram_base;
1989 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1990 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1991 #if !defined(CONFIG_USER_ONLY)
1992 tb_invalidate_phys_page_fast(ram_addr, 4);
1993 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1994 #endif
1996 stl_p((uint8_t *)(long)addr, val);
1997 #ifdef USE_KQEMU
1998 if (cpu_single_env->kqemu_enabled &&
1999 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2000 kqemu_modify_page(cpu_single_env, ram_addr);
2001 #endif
2002 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2003 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2004 /* we remove the notdirty callback only if the code has been
2005 flushed */
2006 if (dirty_flags == 0xff)
2007 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2010 static CPUReadMemoryFunc *error_mem_read[3] = {
2011 NULL, /* never used */
2012 NULL, /* never used */
2013 NULL, /* never used */
2016 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2017 notdirty_mem_writeb,
2018 notdirty_mem_writew,
2019 notdirty_mem_writel,
2022 #if defined(CONFIG_SOFTMMU)
2023 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2024 so these check for a hit then pass through to the normal out-of-line
2025 phys routines. */
2026 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2028 return ldub_phys(addr);
2031 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2033 return lduw_phys(addr);
2036 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2038 return ldl_phys(addr);
2041 /* Generate a debug exception if a watchpoint has been hit.
2042 Returns the real physical address of the access. addr will be a host
2043 address in the is_ram case. */
2044 static target_ulong check_watchpoint(target_phys_addr_t addr)
2046 CPUState *env = cpu_single_env;
2047 target_ulong watch;
2048 target_ulong retaddr;
2049 int i;
2051 retaddr = addr;
2052 for (i = 0; i < env->nb_watchpoints; i++) {
2053 watch = env->watchpoint[i].vaddr;
2054 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2055 if (env->watchpoint[i].is_ram)
2056 retaddr = addr - (unsigned long)phys_ram_base;
2057 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2058 cpu_single_env->watchpoint_hit = i + 1;
2059 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2060 break;
2064 return retaddr;
2067 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2068 uint32_t val)
2070 addr = check_watchpoint(addr);
2071 stb_phys(addr, val);
2074 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2075 uint32_t val)
2077 addr = check_watchpoint(addr);
2078 stw_phys(addr, val);
2081 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2082 uint32_t val)
2084 addr = check_watchpoint(addr);
2085 stl_phys(addr, val);
2088 static CPUReadMemoryFunc *watch_mem_read[3] = {
2089 watch_mem_readb,
2090 watch_mem_readw,
2091 watch_mem_readl,
2094 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2095 watch_mem_writeb,
2096 watch_mem_writew,
2097 watch_mem_writel,
2099 #endif
2101 static void io_mem_init(void)
2103 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2104 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2105 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2106 io_mem_nb = 5;
2108 #if defined(CONFIG_SOFTMMU)
2109 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2110 watch_mem_write, NULL);
2111 #endif
2112 /* alloc dirty bits array */
2113 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2114 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2117 /* mem_read and mem_write are arrays of functions containing the
2118 function to access byte (index 0), word (index 1) and dword (index
2119 2). All functions must be supplied. If io_index is non zero, the
2120 corresponding io zone is modified. If it is zero, a new io zone is
2121 allocated. The return value can be used with
2122 cpu_register_physical_memory(). (-1) is returned if error. */
2123 int cpu_register_io_memory(int io_index,
2124 CPUReadMemoryFunc **mem_read,
2125 CPUWriteMemoryFunc **mem_write,
2126 void *opaque)
2128 int i;
2130 if (io_index <= 0) {
2131 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2132 return -1;
2133 io_index = io_mem_nb++;
2134 } else {
2135 if (io_index >= IO_MEM_NB_ENTRIES)
2136 return -1;
2139 for(i = 0;i < 3; i++) {
2140 io_mem_read[io_index][i] = mem_read[i];
2141 io_mem_write[io_index][i] = mem_write[i];
2143 io_mem_opaque[io_index] = opaque;
2144 return io_index << IO_MEM_SHIFT;
2147 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2149 return io_mem_write[io_index >> IO_MEM_SHIFT];
2152 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2154 return io_mem_read[io_index >> IO_MEM_SHIFT];
2157 /* physical memory access (slow version, mainly for debug) */
2158 #if defined(CONFIG_USER_ONLY)
2159 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2160 int len, int is_write)
2162 int l, flags;
2163 target_ulong page;
2164 void * p;
2166 while (len > 0) {
2167 page = addr & TARGET_PAGE_MASK;
2168 l = (page + TARGET_PAGE_SIZE) - addr;
2169 if (l > len)
2170 l = len;
2171 flags = page_get_flags(page);
2172 if (!(flags & PAGE_VALID))
2173 return;
2174 if (is_write) {
2175 if (!(flags & PAGE_WRITE))
2176 return;
2177 p = lock_user(addr, len, 0);
2178 memcpy(p, buf, len);
2179 unlock_user(p, addr, len);
2180 } else {
2181 if (!(flags & PAGE_READ))
2182 return;
2183 p = lock_user(addr, len, 1);
2184 memcpy(buf, p, len);
2185 unlock_user(p, addr, 0);
2187 len -= l;
2188 buf += l;
2189 addr += l;
2193 #else
2194 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2195 int len, int is_write)
2197 int l, io_index;
2198 uint8_t *ptr;
2199 uint32_t val;
2200 target_phys_addr_t page;
2201 unsigned long pd;
2202 PhysPageDesc *p;
2204 while (len > 0) {
2205 page = addr & TARGET_PAGE_MASK;
2206 l = (page + TARGET_PAGE_SIZE) - addr;
2207 if (l > len)
2208 l = len;
2209 p = phys_page_find(page >> TARGET_PAGE_BITS);
2210 if (!p) {
2211 pd = IO_MEM_UNASSIGNED;
2212 } else {
2213 pd = p->phys_offset;
2216 if (is_write) {
2217 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2218 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2219 /* XXX: could force cpu_single_env to NULL to avoid
2220 potential bugs */
2221 if (l >= 4 && ((addr & 3) == 0)) {
2222 /* 32 bit write access */
2223 val = ldl_p(buf);
2224 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2225 l = 4;
2226 } else if (l >= 2 && ((addr & 1) == 0)) {
2227 /* 16 bit write access */
2228 val = lduw_p(buf);
2229 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2230 l = 2;
2231 } else {
2232 /* 8 bit write access */
2233 val = ldub_p(buf);
2234 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2235 l = 1;
2237 } else {
2238 unsigned long addr1;
2239 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2240 /* RAM case */
2241 ptr = phys_ram_base + addr1;
2242 memcpy(ptr, buf, l);
2243 if (!cpu_physical_memory_is_dirty(addr1)) {
2244 /* invalidate code */
2245 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2246 /* set dirty bit */
2247 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2248 (0xff & ~CODE_DIRTY_FLAG);
2251 } else {
2252 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2253 !(pd & IO_MEM_ROMD)) {
2254 /* I/O case */
2255 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2256 if (l >= 4 && ((addr & 3) == 0)) {
2257 /* 32 bit read access */
2258 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2259 stl_p(buf, val);
2260 l = 4;
2261 } else if (l >= 2 && ((addr & 1) == 0)) {
2262 /* 16 bit read access */
2263 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2264 stw_p(buf, val);
2265 l = 2;
2266 } else {
2267 /* 8 bit read access */
2268 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2269 stb_p(buf, val);
2270 l = 1;
2272 } else {
2273 /* RAM case */
2274 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2275 (addr & ~TARGET_PAGE_MASK);
2276 memcpy(buf, ptr, l);
2279 len -= l;
2280 buf += l;
2281 addr += l;
2285 /* used for ROM loading : can write in RAM and ROM */
2286 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2287 const uint8_t *buf, int len)
2289 int l;
2290 uint8_t *ptr;
2291 target_phys_addr_t page;
2292 unsigned long pd;
2293 PhysPageDesc *p;
2295 while (len > 0) {
2296 page = addr & TARGET_PAGE_MASK;
2297 l = (page + TARGET_PAGE_SIZE) - addr;
2298 if (l > len)
2299 l = len;
2300 p = phys_page_find(page >> TARGET_PAGE_BITS);
2301 if (!p) {
2302 pd = IO_MEM_UNASSIGNED;
2303 } else {
2304 pd = p->phys_offset;
2307 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2308 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2309 !(pd & IO_MEM_ROMD)) {
2310 /* do nothing */
2311 } else {
2312 unsigned long addr1;
2313 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2314 /* ROM/RAM case */
2315 ptr = phys_ram_base + addr1;
2316 memcpy(ptr, buf, l);
2318 len -= l;
2319 buf += l;
2320 addr += l;
2325 /* warning: addr must be aligned */
2326 uint32_t ldl_phys(target_phys_addr_t addr)
2328 int io_index;
2329 uint8_t *ptr;
2330 uint32_t val;
2331 unsigned long pd;
2332 PhysPageDesc *p;
2334 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2335 if (!p) {
2336 pd = IO_MEM_UNASSIGNED;
2337 } else {
2338 pd = p->phys_offset;
2341 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2342 !(pd & IO_MEM_ROMD)) {
2343 /* I/O case */
2344 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2345 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2346 } else {
2347 /* RAM case */
2348 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2349 (addr & ~TARGET_PAGE_MASK);
2350 val = ldl_p(ptr);
2352 return val;
2355 /* warning: addr must be aligned */
2356 uint64_t ldq_phys(target_phys_addr_t addr)
2358 int io_index;
2359 uint8_t *ptr;
2360 uint64_t val;
2361 unsigned long pd;
2362 PhysPageDesc *p;
2364 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2365 if (!p) {
2366 pd = IO_MEM_UNASSIGNED;
2367 } else {
2368 pd = p->phys_offset;
2371 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2372 !(pd & IO_MEM_ROMD)) {
2373 /* I/O case */
2374 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2375 #ifdef TARGET_WORDS_BIGENDIAN
2376 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2377 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2378 #else
2379 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2380 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2381 #endif
2382 } else {
2383 /* RAM case */
2384 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2385 (addr & ~TARGET_PAGE_MASK);
2386 val = ldq_p(ptr);
2388 return val;
2391 /* XXX: optimize */
2392 uint32_t ldub_phys(target_phys_addr_t addr)
2394 uint8_t val;
2395 cpu_physical_memory_read(addr, &val, 1);
2396 return val;
2399 /* XXX: optimize */
2400 uint32_t lduw_phys(target_phys_addr_t addr)
2402 uint16_t val;
2403 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2404 return tswap16(val);
2407 /* warning: addr must be aligned. The ram page is not masked as dirty
2408 and the code inside is not invalidated. It is useful if the dirty
2409 bits are used to track modified PTEs */
2410 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2412 int io_index;
2413 uint8_t *ptr;
2414 unsigned long pd;
2415 PhysPageDesc *p;
2417 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2418 if (!p) {
2419 pd = IO_MEM_UNASSIGNED;
2420 } else {
2421 pd = p->phys_offset;
2424 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2425 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2426 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2427 } else {
2428 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2429 (addr & ~TARGET_PAGE_MASK);
2430 stl_p(ptr, val);
2434 /* warning: addr must be aligned */
2435 void stl_phys(target_phys_addr_t addr, uint32_t val)
2437 int io_index;
2438 uint8_t *ptr;
2439 unsigned long pd;
2440 PhysPageDesc *p;
2442 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2443 if (!p) {
2444 pd = IO_MEM_UNASSIGNED;
2445 } else {
2446 pd = p->phys_offset;
2449 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2450 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2451 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2452 } else {
2453 unsigned long addr1;
2454 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2455 /* RAM case */
2456 ptr = phys_ram_base + addr1;
2457 stl_p(ptr, val);
2458 if (!cpu_physical_memory_is_dirty(addr1)) {
2459 /* invalidate code */
2460 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2461 /* set dirty bit */
2462 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2463 (0xff & ~CODE_DIRTY_FLAG);
2468 /* XXX: optimize */
2469 void stb_phys(target_phys_addr_t addr, uint32_t val)
2471 uint8_t v = val;
2472 cpu_physical_memory_write(addr, &v, 1);
2475 /* XXX: optimize */
2476 void stw_phys(target_phys_addr_t addr, uint32_t val)
2478 uint16_t v = tswap16(val);
2479 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2482 /* XXX: optimize */
2483 void stq_phys(target_phys_addr_t addr, uint64_t val)
2485 val = tswap64(val);
2486 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2489 #endif
2491 /* virtual memory access for debug */
2492 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2493 uint8_t *buf, int len, int is_write)
2495 int l;
2496 target_ulong page, phys_addr;
2498 while (len > 0) {
2499 page = addr & TARGET_PAGE_MASK;
2500 phys_addr = cpu_get_phys_page_debug(env, page);
2501 /* if no physical page mapped, return an error */
2502 if (phys_addr == -1)
2503 return -1;
2504 l = (page + TARGET_PAGE_SIZE) - addr;
2505 if (l > len)
2506 l = len;
2507 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2508 buf, l, is_write);
2509 len -= l;
2510 buf += l;
2511 addr += l;
2513 return 0;
2516 void dump_exec_info(FILE *f,
2517 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2519 int i, target_code_size, max_target_code_size;
2520 int direct_jmp_count, direct_jmp2_count, cross_page;
2521 TranslationBlock *tb;
2523 target_code_size = 0;
2524 max_target_code_size = 0;
2525 cross_page = 0;
2526 direct_jmp_count = 0;
2527 direct_jmp2_count = 0;
2528 for(i = 0; i < nb_tbs; i++) {
2529 tb = &tbs[i];
2530 target_code_size += tb->size;
2531 if (tb->size > max_target_code_size)
2532 max_target_code_size = tb->size;
2533 if (tb->page_addr[1] != -1)
2534 cross_page++;
2535 if (tb->tb_next_offset[0] != 0xffff) {
2536 direct_jmp_count++;
2537 if (tb->tb_next_offset[1] != 0xffff) {
2538 direct_jmp2_count++;
2542 /* XXX: avoid using doubles ? */
2543 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2544 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2545 nb_tbs ? target_code_size / nb_tbs : 0,
2546 max_target_code_size);
2547 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2548 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2549 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2550 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2551 cross_page,
2552 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2553 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2554 direct_jmp_count,
2555 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2556 direct_jmp2_count,
2557 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2558 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2559 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2560 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2563 #if !defined(CONFIG_USER_ONLY)
2565 #define MMUSUFFIX _cmmu
2566 #define GETPC() NULL
2567 #define env cpu_single_env
2568 #define SOFTMMU_CODE_ACCESS
2570 #define SHIFT 0
2571 #include "softmmu_template.h"
2573 #define SHIFT 1
2574 #include "softmmu_template.h"
2576 #define SHIFT 2
2577 #include "softmmu_template.h"
2579 #define SHIFT 3
2580 #include "softmmu_template.h"
2582 #undef env
2584 #endif