OHCI USB PXA support (Andrzej Zaborowski).
[qemu/mini2440.git] / exec.c
blobc168abef480d45879af3da548a6b190575461021
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
52 #if !defined(CONFIG_USER_ONLY)
53 /* TB consistency checks only implemented for usermode emulation. */
54 #undef DEBUG_TB_CHECK
55 #endif
57 /* threshold to flush the translated code buffer */
58 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60 #define SMC_BITMAP_USE_THRESHOLD 10
62 #define MMAP_AREA_START 0x00000000
63 #define MMAP_AREA_END 0xa8000000
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_PPC64)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 42
69 #else
70 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
71 #define TARGET_PHYS_ADDR_SPACE_BITS 32
72 #endif
74 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
75 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
76 int nb_tbs;
77 /* any access to the tbs or the page table must use this lock */
78 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
80 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
81 uint8_t *code_gen_ptr;
83 int phys_ram_size;
84 int phys_ram_fd;
85 uint8_t *phys_ram_base;
86 uint8_t *phys_ram_dirty;
87 static ram_addr_t phys_ram_alloc_offset = 0;
89 CPUState *first_cpu;
90 /* current CPU in the current thread. It is only valid inside
91 cpu_exec() */
92 CPUState *cpu_single_env;
94 typedef struct PageDesc {
95 /* list of TBs intersecting this ram page */
96 TranslationBlock *first_tb;
97 /* in order to optimize self modifying code, we count the number
98 of lookups we do to a given page to use a bitmap */
99 unsigned int code_write_count;
100 uint8_t *code_bitmap;
101 #if defined(CONFIG_USER_ONLY)
102 unsigned long flags;
103 #endif
104 } PageDesc;
106 typedef struct PhysPageDesc {
107 /* offset in host memory of the page + io_index in the low 12 bits */
108 uint32_t phys_offset;
109 } PhysPageDesc;
111 #define L2_BITS 10
112 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
114 #define L1_SIZE (1 << L1_BITS)
115 #define L2_SIZE (1 << L2_BITS)
117 static void io_mem_init(void);
119 unsigned long qemu_real_host_page_size;
120 unsigned long qemu_host_page_bits;
121 unsigned long qemu_host_page_size;
122 unsigned long qemu_host_page_mask;
124 /* XXX: for system emulation, it could just be an array */
125 static PageDesc *l1_map[L1_SIZE];
126 PhysPageDesc **l1_phys_map;
128 /* io memory support */
129 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
130 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
131 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
132 static int io_mem_nb;
133 #if defined(CONFIG_SOFTMMU)
134 static int io_mem_watch;
135 #endif
137 /* log support */
138 char *logfilename = "/tmp/qemu.log";
139 FILE *logfile;
140 int loglevel;
142 /* statistics */
143 static int tlb_flush_count;
144 static int tb_flush_count;
145 static int tb_phys_invalidate_count;
147 static void page_init(void)
149 /* NOTE: we can always suppose that qemu_host_page_size >=
150 TARGET_PAGE_SIZE */
151 #ifdef _WIN32
153 SYSTEM_INFO system_info;
154 DWORD old_protect;
156 GetSystemInfo(&system_info);
157 qemu_real_host_page_size = system_info.dwPageSize;
159 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
160 PAGE_EXECUTE_READWRITE, &old_protect);
162 #else
163 qemu_real_host_page_size = getpagesize();
165 unsigned long start, end;
167 start = (unsigned long)code_gen_buffer;
168 start &= ~(qemu_real_host_page_size - 1);
170 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
171 end += qemu_real_host_page_size - 1;
172 end &= ~(qemu_real_host_page_size - 1);
174 mprotect((void *)start, end - start,
175 PROT_READ | PROT_WRITE | PROT_EXEC);
177 #endif
179 if (qemu_host_page_size == 0)
180 qemu_host_page_size = qemu_real_host_page_size;
181 if (qemu_host_page_size < TARGET_PAGE_SIZE)
182 qemu_host_page_size = TARGET_PAGE_SIZE;
183 qemu_host_page_bits = 0;
184 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
185 qemu_host_page_bits++;
186 qemu_host_page_mask = ~(qemu_host_page_size - 1);
187 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
188 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
191 static inline PageDesc *page_find_alloc(unsigned int index)
193 PageDesc **lp, *p;
195 lp = &l1_map[index >> L2_BITS];
196 p = *lp;
197 if (!p) {
198 /* allocate if not found */
199 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
200 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
201 *lp = p;
203 return p + (index & (L2_SIZE - 1));
206 static inline PageDesc *page_find(unsigned int index)
208 PageDesc *p;
210 p = l1_map[index >> L2_BITS];
211 if (!p)
212 return 0;
213 return p + (index & (L2_SIZE - 1));
216 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
218 void **lp, **p;
219 PhysPageDesc *pd;
221 p = (void **)l1_phys_map;
222 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
224 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
225 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
226 #endif
227 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
228 p = *lp;
229 if (!p) {
230 /* allocate if not found */
231 if (!alloc)
232 return NULL;
233 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
234 memset(p, 0, sizeof(void *) * L1_SIZE);
235 *lp = p;
237 #endif
238 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
239 pd = *lp;
240 if (!pd) {
241 int i;
242 /* allocate if not found */
243 if (!alloc)
244 return NULL;
245 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
246 *lp = pd;
247 for (i = 0; i < L2_SIZE; i++)
248 pd[i].phys_offset = IO_MEM_UNASSIGNED;
250 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
253 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
255 return phys_page_find_alloc(index, 0);
258 #if !defined(CONFIG_USER_ONLY)
259 static void tlb_protect_code(ram_addr_t ram_addr);
260 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
261 target_ulong vaddr);
262 #endif
264 void cpu_exec_init(CPUState *env)
266 CPUState **penv;
267 int cpu_index;
269 if (!code_gen_ptr) {
270 code_gen_ptr = code_gen_buffer;
271 page_init();
272 io_mem_init();
274 env->next_cpu = NULL;
275 penv = &first_cpu;
276 cpu_index = 0;
277 while (*penv != NULL) {
278 penv = (CPUState **)&(*penv)->next_cpu;
279 cpu_index++;
281 env->cpu_index = cpu_index;
282 env->nb_watchpoints = 0;
283 *penv = env;
286 static inline void invalidate_page_bitmap(PageDesc *p)
288 if (p->code_bitmap) {
289 qemu_free(p->code_bitmap);
290 p->code_bitmap = NULL;
292 p->code_write_count = 0;
295 /* set to NULL all the 'first_tb' fields in all PageDescs */
296 static void page_flush_tb(void)
298 int i, j;
299 PageDesc *p;
301 for(i = 0; i < L1_SIZE; i++) {
302 p = l1_map[i];
303 if (p) {
304 for(j = 0; j < L2_SIZE; j++) {
305 p->first_tb = NULL;
306 invalidate_page_bitmap(p);
307 p++;
313 /* flush all the translation blocks */
314 /* XXX: tb_flush is currently not thread safe */
315 void tb_flush(CPUState *env1)
317 CPUState *env;
318 #if defined(DEBUG_FLUSH)
319 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
320 code_gen_ptr - code_gen_buffer,
321 nb_tbs,
322 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
323 #endif
324 nb_tbs = 0;
326 for(env = first_cpu; env != NULL; env = env->next_cpu) {
327 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
330 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
331 page_flush_tb();
333 code_gen_ptr = code_gen_buffer;
334 /* XXX: flush processor icache at this point if cache flush is
335 expensive */
336 tb_flush_count++;
339 #ifdef DEBUG_TB_CHECK
341 static void tb_invalidate_check(unsigned long address)
343 TranslationBlock *tb;
344 int i;
345 address &= TARGET_PAGE_MASK;
346 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
347 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
348 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
349 address >= tb->pc + tb->size)) {
350 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
351 address, (long)tb->pc, tb->size);
357 /* verify that all the pages have correct rights for code */
358 static void tb_page_check(void)
360 TranslationBlock *tb;
361 int i, flags1, flags2;
363 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
364 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
365 flags1 = page_get_flags(tb->pc);
366 flags2 = page_get_flags(tb->pc + tb->size - 1);
367 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
368 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
369 (long)tb->pc, tb->size, flags1, flags2);
375 void tb_jmp_check(TranslationBlock *tb)
377 TranslationBlock *tb1;
378 unsigned int n1;
380 /* suppress any remaining jumps to this TB */
381 tb1 = tb->jmp_first;
382 for(;;) {
383 n1 = (long)tb1 & 3;
384 tb1 = (TranslationBlock *)((long)tb1 & ~3);
385 if (n1 == 2)
386 break;
387 tb1 = tb1->jmp_next[n1];
389 /* check end of list */
390 if (tb1 != tb) {
391 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
395 #endif
397 /* invalidate one TB */
398 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
399 int next_offset)
401 TranslationBlock *tb1;
402 for(;;) {
403 tb1 = *ptb;
404 if (tb1 == tb) {
405 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
406 break;
408 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
412 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
414 TranslationBlock *tb1;
415 unsigned int n1;
417 for(;;) {
418 tb1 = *ptb;
419 n1 = (long)tb1 & 3;
420 tb1 = (TranslationBlock *)((long)tb1 & ~3);
421 if (tb1 == tb) {
422 *ptb = tb1->page_next[n1];
423 break;
425 ptb = &tb1->page_next[n1];
429 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
431 TranslationBlock *tb1, **ptb;
432 unsigned int n1;
434 ptb = &tb->jmp_next[n];
435 tb1 = *ptb;
436 if (tb1) {
437 /* find tb(n) in circular list */
438 for(;;) {
439 tb1 = *ptb;
440 n1 = (long)tb1 & 3;
441 tb1 = (TranslationBlock *)((long)tb1 & ~3);
442 if (n1 == n && tb1 == tb)
443 break;
444 if (n1 == 2) {
445 ptb = &tb1->jmp_first;
446 } else {
447 ptb = &tb1->jmp_next[n1];
450 /* now we can suppress tb(n) from the list */
451 *ptb = tb->jmp_next[n];
453 tb->jmp_next[n] = NULL;
457 /* reset the jump entry 'n' of a TB so that it is not chained to
458 another TB */
459 static inline void tb_reset_jump(TranslationBlock *tb, int n)
461 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
464 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
466 CPUState *env;
467 PageDesc *p;
468 unsigned int h, n1;
469 target_ulong phys_pc;
470 TranslationBlock *tb1, *tb2;
472 /* remove the TB from the hash list */
473 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
474 h = tb_phys_hash_func(phys_pc);
475 tb_remove(&tb_phys_hash[h], tb,
476 offsetof(TranslationBlock, phys_hash_next));
478 /* remove the TB from the page list */
479 if (tb->page_addr[0] != page_addr) {
480 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
481 tb_page_remove(&p->first_tb, tb);
482 invalidate_page_bitmap(p);
484 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
485 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
486 tb_page_remove(&p->first_tb, tb);
487 invalidate_page_bitmap(p);
490 tb_invalidated_flag = 1;
492 /* remove the TB from the hash list */
493 h = tb_jmp_cache_hash_func(tb->pc);
494 for(env = first_cpu; env != NULL; env = env->next_cpu) {
495 if (env->tb_jmp_cache[h] == tb)
496 env->tb_jmp_cache[h] = NULL;
499 /* suppress this TB from the two jump lists */
500 tb_jmp_remove(tb, 0);
501 tb_jmp_remove(tb, 1);
503 /* suppress any remaining jumps to this TB */
504 tb1 = tb->jmp_first;
505 for(;;) {
506 n1 = (long)tb1 & 3;
507 if (n1 == 2)
508 break;
509 tb1 = (TranslationBlock *)((long)tb1 & ~3);
510 tb2 = tb1->jmp_next[n1];
511 tb_reset_jump(tb1, n1);
512 tb1->jmp_next[n1] = NULL;
513 tb1 = tb2;
515 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
517 tb_phys_invalidate_count++;
520 static inline void set_bits(uint8_t *tab, int start, int len)
522 int end, mask, end1;
524 end = start + len;
525 tab += start >> 3;
526 mask = 0xff << (start & 7);
527 if ((start & ~7) == (end & ~7)) {
528 if (start < end) {
529 mask &= ~(0xff << (end & 7));
530 *tab |= mask;
532 } else {
533 *tab++ |= mask;
534 start = (start + 8) & ~7;
535 end1 = end & ~7;
536 while (start < end1) {
537 *tab++ = 0xff;
538 start += 8;
540 if (start < end) {
541 mask = ~(0xff << (end & 7));
542 *tab |= mask;
547 static void build_page_bitmap(PageDesc *p)
549 int n, tb_start, tb_end;
550 TranslationBlock *tb;
552 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
553 if (!p->code_bitmap)
554 return;
555 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
557 tb = p->first_tb;
558 while (tb != NULL) {
559 n = (long)tb & 3;
560 tb = (TranslationBlock *)((long)tb & ~3);
561 /* NOTE: this is subtle as a TB may span two physical pages */
562 if (n == 0) {
563 /* NOTE: tb_end may be after the end of the page, but
564 it is not a problem */
565 tb_start = tb->pc & ~TARGET_PAGE_MASK;
566 tb_end = tb_start + tb->size;
567 if (tb_end > TARGET_PAGE_SIZE)
568 tb_end = TARGET_PAGE_SIZE;
569 } else {
570 tb_start = 0;
571 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
573 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
574 tb = tb->page_next[n];
578 #ifdef TARGET_HAS_PRECISE_SMC
580 static void tb_gen_code(CPUState *env,
581 target_ulong pc, target_ulong cs_base, int flags,
582 int cflags)
584 TranslationBlock *tb;
585 uint8_t *tc_ptr;
586 target_ulong phys_pc, phys_page2, virt_page2;
587 int code_gen_size;
589 phys_pc = get_phys_addr_code(env, pc);
590 tb = tb_alloc(pc);
591 if (!tb) {
592 /* flush must be done */
593 tb_flush(env);
594 /* cannot fail at this point */
595 tb = tb_alloc(pc);
597 tc_ptr = code_gen_ptr;
598 tb->tc_ptr = tc_ptr;
599 tb->cs_base = cs_base;
600 tb->flags = flags;
601 tb->cflags = cflags;
602 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
603 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
605 /* check next page if needed */
606 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
607 phys_page2 = -1;
608 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
609 phys_page2 = get_phys_addr_code(env, virt_page2);
611 tb_link_phys(tb, phys_pc, phys_page2);
613 #endif
615 /* invalidate all TBs which intersect with the target physical page
616 starting in range [start;end[. NOTE: start and end must refer to
617 the same physical page. 'is_cpu_write_access' should be true if called
618 from a real cpu write access: the virtual CPU will exit the current
619 TB if code is modified inside this TB. */
620 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
621 int is_cpu_write_access)
623 int n, current_tb_modified, current_tb_not_found, current_flags;
624 CPUState *env = cpu_single_env;
625 PageDesc *p;
626 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
627 target_ulong tb_start, tb_end;
628 target_ulong current_pc, current_cs_base;
630 p = page_find(start >> TARGET_PAGE_BITS);
631 if (!p)
632 return;
633 if (!p->code_bitmap &&
634 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
635 is_cpu_write_access) {
636 /* build code bitmap */
637 build_page_bitmap(p);
640 /* we remove all the TBs in the range [start, end[ */
641 /* XXX: see if in some cases it could be faster to invalidate all the code */
642 current_tb_not_found = is_cpu_write_access;
643 current_tb_modified = 0;
644 current_tb = NULL; /* avoid warning */
645 current_pc = 0; /* avoid warning */
646 current_cs_base = 0; /* avoid warning */
647 current_flags = 0; /* avoid warning */
648 tb = p->first_tb;
649 while (tb != NULL) {
650 n = (long)tb & 3;
651 tb = (TranslationBlock *)((long)tb & ~3);
652 tb_next = tb->page_next[n];
653 /* NOTE: this is subtle as a TB may span two physical pages */
654 if (n == 0) {
655 /* NOTE: tb_end may be after the end of the page, but
656 it is not a problem */
657 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
658 tb_end = tb_start + tb->size;
659 } else {
660 tb_start = tb->page_addr[1];
661 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
663 if (!(tb_end <= start || tb_start >= end)) {
664 #ifdef TARGET_HAS_PRECISE_SMC
665 if (current_tb_not_found) {
666 current_tb_not_found = 0;
667 current_tb = NULL;
668 if (env->mem_write_pc) {
669 /* now we have a real cpu fault */
670 current_tb = tb_find_pc(env->mem_write_pc);
673 if (current_tb == tb &&
674 !(current_tb->cflags & CF_SINGLE_INSN)) {
675 /* If we are modifying the current TB, we must stop
676 its execution. We could be more precise by checking
677 that the modification is after the current PC, but it
678 would require a specialized function to partially
679 restore the CPU state */
681 current_tb_modified = 1;
682 cpu_restore_state(current_tb, env,
683 env->mem_write_pc, NULL);
684 #if defined(TARGET_I386)
685 current_flags = env->hflags;
686 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
687 current_cs_base = (target_ulong)env->segs[R_CS].base;
688 current_pc = current_cs_base + env->eip;
689 #else
690 #error unsupported CPU
691 #endif
693 #endif /* TARGET_HAS_PRECISE_SMC */
694 /* we need to do that to handle the case where a signal
695 occurs while doing tb_phys_invalidate() */
696 saved_tb = NULL;
697 if (env) {
698 saved_tb = env->current_tb;
699 env->current_tb = NULL;
701 tb_phys_invalidate(tb, -1);
702 if (env) {
703 env->current_tb = saved_tb;
704 if (env->interrupt_request && env->current_tb)
705 cpu_interrupt(env, env->interrupt_request);
708 tb = tb_next;
710 #if !defined(CONFIG_USER_ONLY)
711 /* if no code remaining, no need to continue to use slow writes */
712 if (!p->first_tb) {
713 invalidate_page_bitmap(p);
714 if (is_cpu_write_access) {
715 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
718 #endif
719 #ifdef TARGET_HAS_PRECISE_SMC
720 if (current_tb_modified) {
721 /* we generate a block containing just the instruction
722 modifying the memory. It will ensure that it cannot modify
723 itself */
724 env->current_tb = NULL;
725 tb_gen_code(env, current_pc, current_cs_base, current_flags,
726 CF_SINGLE_INSN);
727 cpu_resume_from_signal(env, NULL);
729 #endif
732 /* len must be <= 8 and start must be a multiple of len */
733 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
735 PageDesc *p;
736 int offset, b;
737 #if 0
738 if (1) {
739 if (loglevel) {
740 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
741 cpu_single_env->mem_write_vaddr, len,
742 cpu_single_env->eip,
743 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
746 #endif
747 p = page_find(start >> TARGET_PAGE_BITS);
748 if (!p)
749 return;
750 if (p->code_bitmap) {
751 offset = start & ~TARGET_PAGE_MASK;
752 b = p->code_bitmap[offset >> 3] >> (offset & 7);
753 if (b & ((1 << len) - 1))
754 goto do_invalidate;
755 } else {
756 do_invalidate:
757 tb_invalidate_phys_page_range(start, start + len, 1);
761 #if !defined(CONFIG_SOFTMMU)
762 static void tb_invalidate_phys_page(target_ulong addr,
763 unsigned long pc, void *puc)
765 int n, current_flags, current_tb_modified;
766 target_ulong current_pc, current_cs_base;
767 PageDesc *p;
768 TranslationBlock *tb, *current_tb;
769 #ifdef TARGET_HAS_PRECISE_SMC
770 CPUState *env = cpu_single_env;
771 #endif
773 addr &= TARGET_PAGE_MASK;
774 p = page_find(addr >> TARGET_PAGE_BITS);
775 if (!p)
776 return;
777 tb = p->first_tb;
778 current_tb_modified = 0;
779 current_tb = NULL;
780 current_pc = 0; /* avoid warning */
781 current_cs_base = 0; /* avoid warning */
782 current_flags = 0; /* avoid warning */
783 #ifdef TARGET_HAS_PRECISE_SMC
784 if (tb && pc != 0) {
785 current_tb = tb_find_pc(pc);
787 #endif
788 while (tb != NULL) {
789 n = (long)tb & 3;
790 tb = (TranslationBlock *)((long)tb & ~3);
791 #ifdef TARGET_HAS_PRECISE_SMC
792 if (current_tb == tb &&
793 !(current_tb->cflags & CF_SINGLE_INSN)) {
794 /* If we are modifying the current TB, we must stop
795 its execution. We could be more precise by checking
796 that the modification is after the current PC, but it
797 would require a specialized function to partially
798 restore the CPU state */
800 current_tb_modified = 1;
801 cpu_restore_state(current_tb, env, pc, puc);
802 #if defined(TARGET_I386)
803 current_flags = env->hflags;
804 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
805 current_cs_base = (target_ulong)env->segs[R_CS].base;
806 current_pc = current_cs_base + env->eip;
807 #else
808 #error unsupported CPU
809 #endif
811 #endif /* TARGET_HAS_PRECISE_SMC */
812 tb_phys_invalidate(tb, addr);
813 tb = tb->page_next[n];
815 p->first_tb = NULL;
816 #ifdef TARGET_HAS_PRECISE_SMC
817 if (current_tb_modified) {
818 /* we generate a block containing just the instruction
819 modifying the memory. It will ensure that it cannot modify
820 itself */
821 env->current_tb = NULL;
822 tb_gen_code(env, current_pc, current_cs_base, current_flags,
823 CF_SINGLE_INSN);
824 cpu_resume_from_signal(env, puc);
826 #endif
828 #endif
830 /* add the tb in the target page and protect it if necessary */
831 static inline void tb_alloc_page(TranslationBlock *tb,
832 unsigned int n, target_ulong page_addr)
834 PageDesc *p;
835 TranslationBlock *last_first_tb;
837 tb->page_addr[n] = page_addr;
838 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
839 tb->page_next[n] = p->first_tb;
840 last_first_tb = p->first_tb;
841 p->first_tb = (TranslationBlock *)((long)tb | n);
842 invalidate_page_bitmap(p);
844 #if defined(TARGET_HAS_SMC) || 1
846 #if defined(CONFIG_USER_ONLY)
847 if (p->flags & PAGE_WRITE) {
848 target_ulong addr;
849 PageDesc *p2;
850 int prot;
852 /* force the host page as non writable (writes will have a
853 page fault + mprotect overhead) */
854 page_addr &= qemu_host_page_mask;
855 prot = 0;
856 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
857 addr += TARGET_PAGE_SIZE) {
859 p2 = page_find (addr >> TARGET_PAGE_BITS);
860 if (!p2)
861 continue;
862 prot |= p2->flags;
863 p2->flags &= ~PAGE_WRITE;
864 page_get_flags(addr);
866 mprotect(g2h(page_addr), qemu_host_page_size,
867 (prot & PAGE_BITS) & ~PAGE_WRITE);
868 #ifdef DEBUG_TB_INVALIDATE
869 printf("protecting code page: 0x%08lx\n",
870 page_addr);
871 #endif
873 #else
874 /* if some code is already present, then the pages are already
875 protected. So we handle the case where only the first TB is
876 allocated in a physical page */
877 if (!last_first_tb) {
878 tlb_protect_code(page_addr);
880 #endif
882 #endif /* TARGET_HAS_SMC */
885 /* Allocate a new translation block. Flush the translation buffer if
886 too many translation blocks or too much generated code. */
887 TranslationBlock *tb_alloc(target_ulong pc)
889 TranslationBlock *tb;
891 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
892 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
893 return NULL;
894 tb = &tbs[nb_tbs++];
895 tb->pc = pc;
896 tb->cflags = 0;
897 return tb;
900 /* add a new TB and link it to the physical page tables. phys_page2 is
901 (-1) to indicate that only one page contains the TB. */
902 void tb_link_phys(TranslationBlock *tb,
903 target_ulong phys_pc, target_ulong phys_page2)
905 unsigned int h;
906 TranslationBlock **ptb;
908 /* add in the physical hash table */
909 h = tb_phys_hash_func(phys_pc);
910 ptb = &tb_phys_hash[h];
911 tb->phys_hash_next = *ptb;
912 *ptb = tb;
914 /* add in the page list */
915 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
916 if (phys_page2 != -1)
917 tb_alloc_page(tb, 1, phys_page2);
918 else
919 tb->page_addr[1] = -1;
921 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
922 tb->jmp_next[0] = NULL;
923 tb->jmp_next[1] = NULL;
924 #ifdef USE_CODE_COPY
925 tb->cflags &= ~CF_FP_USED;
926 if (tb->cflags & CF_TB_FP_USED)
927 tb->cflags |= CF_FP_USED;
928 #endif
930 /* init original jump addresses */
931 if (tb->tb_next_offset[0] != 0xffff)
932 tb_reset_jump(tb, 0);
933 if (tb->tb_next_offset[1] != 0xffff)
934 tb_reset_jump(tb, 1);
936 #ifdef DEBUG_TB_CHECK
937 tb_page_check();
938 #endif
941 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
942 tb[1].tc_ptr. Return NULL if not found */
943 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
945 int m_min, m_max, m;
946 unsigned long v;
947 TranslationBlock *tb;
949 if (nb_tbs <= 0)
950 return NULL;
951 if (tc_ptr < (unsigned long)code_gen_buffer ||
952 tc_ptr >= (unsigned long)code_gen_ptr)
953 return NULL;
954 /* binary search (cf Knuth) */
955 m_min = 0;
956 m_max = nb_tbs - 1;
957 while (m_min <= m_max) {
958 m = (m_min + m_max) >> 1;
959 tb = &tbs[m];
960 v = (unsigned long)tb->tc_ptr;
961 if (v == tc_ptr)
962 return tb;
963 else if (tc_ptr < v) {
964 m_max = m - 1;
965 } else {
966 m_min = m + 1;
969 return &tbs[m_max];
972 static void tb_reset_jump_recursive(TranslationBlock *tb);
974 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
976 TranslationBlock *tb1, *tb_next, **ptb;
977 unsigned int n1;
979 tb1 = tb->jmp_next[n];
980 if (tb1 != NULL) {
981 /* find head of list */
982 for(;;) {
983 n1 = (long)tb1 & 3;
984 tb1 = (TranslationBlock *)((long)tb1 & ~3);
985 if (n1 == 2)
986 break;
987 tb1 = tb1->jmp_next[n1];
989 /* we are now sure now that tb jumps to tb1 */
990 tb_next = tb1;
992 /* remove tb from the jmp_first list */
993 ptb = &tb_next->jmp_first;
994 for(;;) {
995 tb1 = *ptb;
996 n1 = (long)tb1 & 3;
997 tb1 = (TranslationBlock *)((long)tb1 & ~3);
998 if (n1 == n && tb1 == tb)
999 break;
1000 ptb = &tb1->jmp_next[n1];
1002 *ptb = tb->jmp_next[n];
1003 tb->jmp_next[n] = NULL;
1005 /* suppress the jump to next tb in generated code */
1006 tb_reset_jump(tb, n);
1008 /* suppress jumps in the tb on which we could have jumped */
1009 tb_reset_jump_recursive(tb_next);
1013 static void tb_reset_jump_recursive(TranslationBlock *tb)
1015 tb_reset_jump_recursive2(tb, 0);
1016 tb_reset_jump_recursive2(tb, 1);
1019 #if defined(TARGET_HAS_ICE)
1020 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1022 target_ulong addr, pd;
1023 ram_addr_t ram_addr;
1024 PhysPageDesc *p;
1026 addr = cpu_get_phys_page_debug(env, pc);
1027 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1028 if (!p) {
1029 pd = IO_MEM_UNASSIGNED;
1030 } else {
1031 pd = p->phys_offset;
1033 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1034 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1036 #endif
1038 /* Add a watchpoint. */
1039 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1041 int i;
1043 for (i = 0; i < env->nb_watchpoints; i++) {
1044 if (addr == env->watchpoint[i].vaddr)
1045 return 0;
1047 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1048 return -1;
1050 i = env->nb_watchpoints++;
1051 env->watchpoint[i].vaddr = addr;
1052 tlb_flush_page(env, addr);
1053 /* FIXME: This flush is needed because of the hack to make memory ops
1054 terminate the TB. It can be removed once the proper IO trap and
1055 re-execute bits are in. */
1056 tb_flush(env);
1057 return i;
1060 /* Remove a watchpoint. */
1061 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1063 int i;
1065 for (i = 0; i < env->nb_watchpoints; i++) {
1066 if (addr == env->watchpoint[i].vaddr) {
1067 env->nb_watchpoints--;
1068 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1069 tlb_flush_page(env, addr);
1070 return 0;
1073 return -1;
1076 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1077 breakpoint is reached */
1078 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1080 #if defined(TARGET_HAS_ICE)
1081 int i;
1083 for(i = 0; i < env->nb_breakpoints; i++) {
1084 if (env->breakpoints[i] == pc)
1085 return 0;
1088 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1089 return -1;
1090 env->breakpoints[env->nb_breakpoints++] = pc;
1092 breakpoint_invalidate(env, pc);
1093 return 0;
1094 #else
1095 return -1;
1096 #endif
1099 /* remove a breakpoint */
1100 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1102 #if defined(TARGET_HAS_ICE)
1103 int i;
1104 for(i = 0; i < env->nb_breakpoints; i++) {
1105 if (env->breakpoints[i] == pc)
1106 goto found;
1108 return -1;
1109 found:
1110 env->nb_breakpoints--;
1111 if (i < env->nb_breakpoints)
1112 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1114 breakpoint_invalidate(env, pc);
1115 return 0;
1116 #else
1117 return -1;
1118 #endif
1121 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1122 CPU loop after each instruction */
1123 void cpu_single_step(CPUState *env, int enabled)
1125 #if defined(TARGET_HAS_ICE)
1126 if (env->singlestep_enabled != enabled) {
1127 env->singlestep_enabled = enabled;
1128 /* must flush all the translated code to avoid inconsistancies */
1129 /* XXX: only flush what is necessary */
1130 tb_flush(env);
1132 #endif
1135 /* enable or disable low levels log */
1136 void cpu_set_log(int log_flags)
1138 loglevel = log_flags;
1139 if (loglevel && !logfile) {
1140 logfile = fopen(logfilename, "w");
1141 if (!logfile) {
1142 perror(logfilename);
1143 _exit(1);
1145 #if !defined(CONFIG_SOFTMMU)
1146 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1148 static uint8_t logfile_buf[4096];
1149 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1151 #else
1152 setvbuf(logfile, NULL, _IOLBF, 0);
1153 #endif
1157 void cpu_set_log_filename(const char *filename)
1159 logfilename = strdup(filename);
1162 /* mask must never be zero, except for A20 change call */
1163 void cpu_interrupt(CPUState *env, int mask)
1165 TranslationBlock *tb;
1166 static int interrupt_lock;
1168 env->interrupt_request |= mask;
1169 /* if the cpu is currently executing code, we must unlink it and
1170 all the potentially executing TB */
1171 tb = env->current_tb;
1172 if (tb && !testandset(&interrupt_lock)) {
1173 env->current_tb = NULL;
1174 tb_reset_jump_recursive(tb);
1175 interrupt_lock = 0;
1179 void cpu_reset_interrupt(CPUState *env, int mask)
1181 env->interrupt_request &= ~mask;
1184 CPULogItem cpu_log_items[] = {
1185 { CPU_LOG_TB_OUT_ASM, "out_asm",
1186 "show generated host assembly code for each compiled TB" },
1187 { CPU_LOG_TB_IN_ASM, "in_asm",
1188 "show target assembly code for each compiled TB" },
1189 { CPU_LOG_TB_OP, "op",
1190 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1191 #ifdef TARGET_I386
1192 { CPU_LOG_TB_OP_OPT, "op_opt",
1193 "show micro ops after optimization for each compiled TB" },
1194 #endif
1195 { CPU_LOG_INT, "int",
1196 "show interrupts/exceptions in short format" },
1197 { CPU_LOG_EXEC, "exec",
1198 "show trace before each executed TB (lots of logs)" },
1199 { CPU_LOG_TB_CPU, "cpu",
1200 "show CPU state before bloc translation" },
1201 #ifdef TARGET_I386
1202 { CPU_LOG_PCALL, "pcall",
1203 "show protected mode far calls/returns/exceptions" },
1204 #endif
1205 #ifdef DEBUG_IOPORT
1206 { CPU_LOG_IOPORT, "ioport",
1207 "show all i/o ports accesses" },
1208 #endif
1209 { 0, NULL, NULL },
1212 static int cmp1(const char *s1, int n, const char *s2)
1214 if (strlen(s2) != n)
1215 return 0;
1216 return memcmp(s1, s2, n) == 0;
1219 /* takes a comma separated list of log masks. Return 0 if error. */
1220 int cpu_str_to_log_mask(const char *str)
1222 CPULogItem *item;
1223 int mask;
1224 const char *p, *p1;
1226 p = str;
1227 mask = 0;
1228 for(;;) {
1229 p1 = strchr(p, ',');
1230 if (!p1)
1231 p1 = p + strlen(p);
1232 if(cmp1(p,p1-p,"all")) {
1233 for(item = cpu_log_items; item->mask != 0; item++) {
1234 mask |= item->mask;
1236 } else {
1237 for(item = cpu_log_items; item->mask != 0; item++) {
1238 if (cmp1(p, p1 - p, item->name))
1239 goto found;
1241 return 0;
1243 found:
1244 mask |= item->mask;
1245 if (*p1 != ',')
1246 break;
1247 p = p1 + 1;
1249 return mask;
1252 void cpu_abort(CPUState *env, const char *fmt, ...)
1254 va_list ap;
1256 va_start(ap, fmt);
1257 fprintf(stderr, "qemu: fatal: ");
1258 vfprintf(stderr, fmt, ap);
1259 fprintf(stderr, "\n");
1260 #ifdef TARGET_I386
1261 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1262 #else
1263 cpu_dump_state(env, stderr, fprintf, 0);
1264 #endif
1265 va_end(ap);
1266 abort();
1269 CPUState *cpu_copy(CPUState *env)
1271 CPUState *new_env = cpu_init();
1272 /* preserve chaining and index */
1273 CPUState *next_cpu = new_env->next_cpu;
1274 int cpu_index = new_env->cpu_index;
1275 memcpy(new_env, env, sizeof(CPUState));
1276 new_env->next_cpu = next_cpu;
1277 new_env->cpu_index = cpu_index;
1278 return new_env;
1281 #if !defined(CONFIG_USER_ONLY)
1283 /* NOTE: if flush_global is true, also flush global entries (not
1284 implemented yet) */
1285 void tlb_flush(CPUState *env, int flush_global)
1287 int i;
1289 #if defined(DEBUG_TLB)
1290 printf("tlb_flush:\n");
1291 #endif
1292 /* must reset current TB so that interrupts cannot modify the
1293 links while we are modifying them */
1294 env->current_tb = NULL;
1296 for(i = 0; i < CPU_TLB_SIZE; i++) {
1297 env->tlb_table[0][i].addr_read = -1;
1298 env->tlb_table[0][i].addr_write = -1;
1299 env->tlb_table[0][i].addr_code = -1;
1300 env->tlb_table[1][i].addr_read = -1;
1301 env->tlb_table[1][i].addr_write = -1;
1302 env->tlb_table[1][i].addr_code = -1;
1305 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1307 #if !defined(CONFIG_SOFTMMU)
1308 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1309 #endif
1310 #ifdef USE_KQEMU
1311 if (env->kqemu_enabled) {
1312 kqemu_flush(env, flush_global);
1314 #endif
1315 tlb_flush_count++;
1318 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1320 if (addr == (tlb_entry->addr_read &
1321 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1322 addr == (tlb_entry->addr_write &
1323 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1324 addr == (tlb_entry->addr_code &
1325 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1326 tlb_entry->addr_read = -1;
1327 tlb_entry->addr_write = -1;
1328 tlb_entry->addr_code = -1;
1332 void tlb_flush_page(CPUState *env, target_ulong addr)
1334 int i;
1335 TranslationBlock *tb;
1337 #if defined(DEBUG_TLB)
1338 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1339 #endif
1340 /* must reset current TB so that interrupts cannot modify the
1341 links while we are modifying them */
1342 env->current_tb = NULL;
1344 addr &= TARGET_PAGE_MASK;
1345 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1346 tlb_flush_entry(&env->tlb_table[0][i], addr);
1347 tlb_flush_entry(&env->tlb_table[1][i], addr);
1349 /* Discard jump cache entries for any tb which might potentially
1350 overlap the flushed page. */
1351 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1352 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1354 i = tb_jmp_cache_hash_page(addr);
1355 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1357 #if !defined(CONFIG_SOFTMMU)
1358 if (addr < MMAP_AREA_END)
1359 munmap((void *)addr, TARGET_PAGE_SIZE);
1360 #endif
1361 #ifdef USE_KQEMU
1362 if (env->kqemu_enabled) {
1363 kqemu_flush_page(env, addr);
1365 #endif
1368 /* update the TLBs so that writes to code in the virtual page 'addr'
1369 can be detected */
1370 static void tlb_protect_code(ram_addr_t ram_addr)
1372 cpu_physical_memory_reset_dirty(ram_addr,
1373 ram_addr + TARGET_PAGE_SIZE,
1374 CODE_DIRTY_FLAG);
1377 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1378 tested for self modifying code */
1379 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1380 target_ulong vaddr)
1382 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1385 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1386 unsigned long start, unsigned long length)
1388 unsigned long addr;
1389 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1390 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1391 if ((addr - start) < length) {
1392 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1397 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1398 int dirty_flags)
1400 CPUState *env;
1401 unsigned long length, start1;
1402 int i, mask, len;
1403 uint8_t *p;
1405 start &= TARGET_PAGE_MASK;
1406 end = TARGET_PAGE_ALIGN(end);
1408 length = end - start;
1409 if (length == 0)
1410 return;
1411 len = length >> TARGET_PAGE_BITS;
1412 #ifdef USE_KQEMU
1413 /* XXX: should not depend on cpu context */
1414 env = first_cpu;
1415 if (env->kqemu_enabled) {
1416 ram_addr_t addr;
1417 addr = start;
1418 for(i = 0; i < len; i++) {
1419 kqemu_set_notdirty(env, addr);
1420 addr += TARGET_PAGE_SIZE;
1423 #endif
1424 mask = ~dirty_flags;
1425 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1426 for(i = 0; i < len; i++)
1427 p[i] &= mask;
1429 /* we modify the TLB cache so that the dirty bit will be set again
1430 when accessing the range */
1431 start1 = start + (unsigned long)phys_ram_base;
1432 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1433 for(i = 0; i < CPU_TLB_SIZE; i++)
1434 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1435 for(i = 0; i < CPU_TLB_SIZE; i++)
1436 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1439 #if !defined(CONFIG_SOFTMMU)
1440 /* XXX: this is expensive */
1442 VirtPageDesc *p;
1443 int j;
1444 target_ulong addr;
1446 for(i = 0; i < L1_SIZE; i++) {
1447 p = l1_virt_map[i];
1448 if (p) {
1449 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1450 for(j = 0; j < L2_SIZE; j++) {
1451 if (p->valid_tag == virt_valid_tag &&
1452 p->phys_addr >= start && p->phys_addr < end &&
1453 (p->prot & PROT_WRITE)) {
1454 if (addr < MMAP_AREA_END) {
1455 mprotect((void *)addr, TARGET_PAGE_SIZE,
1456 p->prot & ~PROT_WRITE);
1459 addr += TARGET_PAGE_SIZE;
1460 p++;
1465 #endif
1468 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1470 ram_addr_t ram_addr;
1472 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1473 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1474 tlb_entry->addend - (unsigned long)phys_ram_base;
1475 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1476 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1481 /* update the TLB according to the current state of the dirty bits */
1482 void cpu_tlb_update_dirty(CPUState *env)
1484 int i;
1485 for(i = 0; i < CPU_TLB_SIZE; i++)
1486 tlb_update_dirty(&env->tlb_table[0][i]);
1487 for(i = 0; i < CPU_TLB_SIZE; i++)
1488 tlb_update_dirty(&env->tlb_table[1][i]);
1491 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1492 unsigned long start)
1494 unsigned long addr;
1495 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1496 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1497 if (addr == start) {
1498 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1503 /* update the TLB corresponding to virtual page vaddr and phys addr
1504 addr so that it is no longer dirty */
1505 static inline void tlb_set_dirty(CPUState *env,
1506 unsigned long addr, target_ulong vaddr)
1508 int i;
1510 addr &= TARGET_PAGE_MASK;
1511 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1512 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1513 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1516 /* add a new TLB entry. At most one entry for a given virtual address
1517 is permitted. Return 0 if OK or 2 if the page could not be mapped
1518 (can only happen in non SOFTMMU mode for I/O pages or pages
1519 conflicting with the host address space). */
1520 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1521 target_phys_addr_t paddr, int prot,
1522 int is_user, int is_softmmu)
1524 PhysPageDesc *p;
1525 unsigned long pd;
1526 unsigned int index;
1527 target_ulong address;
1528 target_phys_addr_t addend;
1529 int ret;
1530 CPUTLBEntry *te;
1531 int i;
1533 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1534 if (!p) {
1535 pd = IO_MEM_UNASSIGNED;
1536 } else {
1537 pd = p->phys_offset;
1539 #if defined(DEBUG_TLB)
1540 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1541 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1542 #endif
1544 ret = 0;
1545 #if !defined(CONFIG_SOFTMMU)
1546 if (is_softmmu)
1547 #endif
1549 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1550 /* IO memory case */
1551 address = vaddr | pd;
1552 addend = paddr;
1553 } else {
1554 /* standard memory */
1555 address = vaddr;
1556 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1559 /* Make accesses to pages with watchpoints go via the
1560 watchpoint trap routines. */
1561 for (i = 0; i < env->nb_watchpoints; i++) {
1562 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1563 if (address & ~TARGET_PAGE_MASK) {
1564 env->watchpoint[i].is_ram = 0;
1565 address = vaddr | io_mem_watch;
1566 } else {
1567 env->watchpoint[i].is_ram = 1;
1568 /* TODO: Figure out how to make read watchpoints coexist
1569 with code. */
1570 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1575 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1576 addend -= vaddr;
1577 te = &env->tlb_table[is_user][index];
1578 te->addend = addend;
1579 if (prot & PAGE_READ) {
1580 te->addr_read = address;
1581 } else {
1582 te->addr_read = -1;
1584 if (prot & PAGE_EXEC) {
1585 te->addr_code = address;
1586 } else {
1587 te->addr_code = -1;
1589 if (prot & PAGE_WRITE) {
1590 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1591 (pd & IO_MEM_ROMD)) {
1592 /* write access calls the I/O callback */
1593 te->addr_write = vaddr |
1594 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1595 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1596 !cpu_physical_memory_is_dirty(pd)) {
1597 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1598 } else {
1599 te->addr_write = address;
1601 } else {
1602 te->addr_write = -1;
1605 #if !defined(CONFIG_SOFTMMU)
1606 else {
1607 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1608 /* IO access: no mapping is done as it will be handled by the
1609 soft MMU */
1610 if (!(env->hflags & HF_SOFTMMU_MASK))
1611 ret = 2;
1612 } else {
1613 void *map_addr;
1615 if (vaddr >= MMAP_AREA_END) {
1616 ret = 2;
1617 } else {
1618 if (prot & PROT_WRITE) {
1619 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1620 #if defined(TARGET_HAS_SMC) || 1
1621 first_tb ||
1622 #endif
1623 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1624 !cpu_physical_memory_is_dirty(pd))) {
1625 /* ROM: we do as if code was inside */
1626 /* if code is present, we only map as read only and save the
1627 original mapping */
1628 VirtPageDesc *vp;
1630 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1631 vp->phys_addr = pd;
1632 vp->prot = prot;
1633 vp->valid_tag = virt_valid_tag;
1634 prot &= ~PAGE_WRITE;
1637 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1638 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1639 if (map_addr == MAP_FAILED) {
1640 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1641 paddr, vaddr);
1646 #endif
1647 return ret;
1650 /* called from signal handler: invalidate the code and unprotect the
1651 page. Return TRUE if the fault was succesfully handled. */
1652 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1654 #if !defined(CONFIG_SOFTMMU)
1655 VirtPageDesc *vp;
1657 #if defined(DEBUG_TLB)
1658 printf("page_unprotect: addr=0x%08x\n", addr);
1659 #endif
1660 addr &= TARGET_PAGE_MASK;
1662 /* if it is not mapped, no need to worry here */
1663 if (addr >= MMAP_AREA_END)
1664 return 0;
1665 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1666 if (!vp)
1667 return 0;
1668 /* NOTE: in this case, validate_tag is _not_ tested as it
1669 validates only the code TLB */
1670 if (vp->valid_tag != virt_valid_tag)
1671 return 0;
1672 if (!(vp->prot & PAGE_WRITE))
1673 return 0;
1674 #if defined(DEBUG_TLB)
1675 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1676 addr, vp->phys_addr, vp->prot);
1677 #endif
1678 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1679 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1680 (unsigned long)addr, vp->prot);
1681 /* set the dirty bit */
1682 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1683 /* flush the code inside */
1684 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1685 return 1;
1686 #else
1687 return 0;
1688 #endif
1691 #else
1693 void tlb_flush(CPUState *env, int flush_global)
1697 void tlb_flush_page(CPUState *env, target_ulong addr)
1701 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1702 target_phys_addr_t paddr, int prot,
1703 int is_user, int is_softmmu)
1705 return 0;
1708 /* dump memory mappings */
1709 void page_dump(FILE *f)
1711 unsigned long start, end;
1712 int i, j, prot, prot1;
1713 PageDesc *p;
1715 fprintf(f, "%-8s %-8s %-8s %s\n",
1716 "start", "end", "size", "prot");
1717 start = -1;
1718 end = -1;
1719 prot = 0;
1720 for(i = 0; i <= L1_SIZE; i++) {
1721 if (i < L1_SIZE)
1722 p = l1_map[i];
1723 else
1724 p = NULL;
1725 for(j = 0;j < L2_SIZE; j++) {
1726 if (!p)
1727 prot1 = 0;
1728 else
1729 prot1 = p[j].flags;
1730 if (prot1 != prot) {
1731 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1732 if (start != -1) {
1733 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1734 start, end, end - start,
1735 prot & PAGE_READ ? 'r' : '-',
1736 prot & PAGE_WRITE ? 'w' : '-',
1737 prot & PAGE_EXEC ? 'x' : '-');
1739 if (prot1 != 0)
1740 start = end;
1741 else
1742 start = -1;
1743 prot = prot1;
1745 if (!p)
1746 break;
1751 int page_get_flags(target_ulong address)
1753 PageDesc *p;
1755 p = page_find(address >> TARGET_PAGE_BITS);
1756 if (!p)
1757 return 0;
1758 return p->flags;
1761 /* modify the flags of a page and invalidate the code if
1762 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1763 depending on PAGE_WRITE */
1764 void page_set_flags(target_ulong start, target_ulong end, int flags)
1766 PageDesc *p;
1767 target_ulong addr;
1769 start = start & TARGET_PAGE_MASK;
1770 end = TARGET_PAGE_ALIGN(end);
1771 if (flags & PAGE_WRITE)
1772 flags |= PAGE_WRITE_ORG;
1773 spin_lock(&tb_lock);
1774 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1775 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1776 /* if the write protection is set, then we invalidate the code
1777 inside */
1778 if (!(p->flags & PAGE_WRITE) &&
1779 (flags & PAGE_WRITE) &&
1780 p->first_tb) {
1781 tb_invalidate_phys_page(addr, 0, NULL);
1783 p->flags = flags;
1785 spin_unlock(&tb_lock);
1788 /* called from signal handler: invalidate the code and unprotect the
1789 page. Return TRUE if the fault was succesfully handled. */
1790 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1792 unsigned int page_index, prot, pindex;
1793 PageDesc *p, *p1;
1794 target_ulong host_start, host_end, addr;
1796 host_start = address & qemu_host_page_mask;
1797 page_index = host_start >> TARGET_PAGE_BITS;
1798 p1 = page_find(page_index);
1799 if (!p1)
1800 return 0;
1801 host_end = host_start + qemu_host_page_size;
1802 p = p1;
1803 prot = 0;
1804 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1805 prot |= p->flags;
1806 p++;
1808 /* if the page was really writable, then we change its
1809 protection back to writable */
1810 if (prot & PAGE_WRITE_ORG) {
1811 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1812 if (!(p1[pindex].flags & PAGE_WRITE)) {
1813 mprotect((void *)g2h(host_start), qemu_host_page_size,
1814 (prot & PAGE_BITS) | PAGE_WRITE);
1815 p1[pindex].flags |= PAGE_WRITE;
1816 /* and since the content will be modified, we must invalidate
1817 the corresponding translated code. */
1818 tb_invalidate_phys_page(address, pc, puc);
1819 #ifdef DEBUG_TB_CHECK
1820 tb_invalidate_check(address);
1821 #endif
1822 return 1;
1825 return 0;
1828 /* call this function when system calls directly modify a memory area */
1829 /* ??? This should be redundant now we have lock_user. */
1830 void page_unprotect_range(target_ulong data, target_ulong data_size)
1832 target_ulong start, end, addr;
1834 start = data;
1835 end = start + data_size;
1836 start &= TARGET_PAGE_MASK;
1837 end = TARGET_PAGE_ALIGN(end);
1838 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1839 page_unprotect(addr, 0, NULL);
1843 static inline void tlb_set_dirty(CPUState *env,
1844 unsigned long addr, target_ulong vaddr)
1847 #endif /* defined(CONFIG_USER_ONLY) */
1849 /* register physical memory. 'size' must be a multiple of the target
1850 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1851 io memory page */
1852 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1853 unsigned long size,
1854 unsigned long phys_offset)
1856 target_phys_addr_t addr, end_addr;
1857 PhysPageDesc *p;
1858 CPUState *env;
1860 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1861 end_addr = start_addr + size;
1862 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1863 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1864 p->phys_offset = phys_offset;
1865 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1866 (phys_offset & IO_MEM_ROMD))
1867 phys_offset += TARGET_PAGE_SIZE;
1870 /* since each CPU stores ram addresses in its TLB cache, we must
1871 reset the modified entries */
1872 /* XXX: slow ! */
1873 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1874 tlb_flush(env, 1);
1878 /* XXX: temporary until new memory mapping API */
1879 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1881 PhysPageDesc *p;
1883 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1884 if (!p)
1885 return IO_MEM_UNASSIGNED;
1886 return p->phys_offset;
1889 /* XXX: better than nothing */
1890 ram_addr_t qemu_ram_alloc(unsigned int size)
1892 ram_addr_t addr;
1893 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1894 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
1895 size, phys_ram_size);
1896 abort();
1898 addr = phys_ram_alloc_offset;
1899 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1900 return addr;
1903 void qemu_ram_free(ram_addr_t addr)
1907 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1909 #ifdef DEBUG_UNASSIGNED
1910 printf("Unassigned mem read 0x%08x\n", (int)addr);
1911 #endif
1912 return 0;
1915 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1917 #ifdef DEBUG_UNASSIGNED
1918 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1919 #endif
1922 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1923 unassigned_mem_readb,
1924 unassigned_mem_readb,
1925 unassigned_mem_readb,
1928 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1929 unassigned_mem_writeb,
1930 unassigned_mem_writeb,
1931 unassigned_mem_writeb,
1934 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1936 unsigned long ram_addr;
1937 int dirty_flags;
1938 ram_addr = addr - (unsigned long)phys_ram_base;
1939 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1940 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1941 #if !defined(CONFIG_USER_ONLY)
1942 tb_invalidate_phys_page_fast(ram_addr, 1);
1943 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1944 #endif
1946 stb_p((uint8_t *)(long)addr, val);
1947 #ifdef USE_KQEMU
1948 if (cpu_single_env->kqemu_enabled &&
1949 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1950 kqemu_modify_page(cpu_single_env, ram_addr);
1951 #endif
1952 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1953 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1954 /* we remove the notdirty callback only if the code has been
1955 flushed */
1956 if (dirty_flags == 0xff)
1957 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1960 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1962 unsigned long ram_addr;
1963 int dirty_flags;
1964 ram_addr = addr - (unsigned long)phys_ram_base;
1965 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1966 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1967 #if !defined(CONFIG_USER_ONLY)
1968 tb_invalidate_phys_page_fast(ram_addr, 2);
1969 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1970 #endif
1972 stw_p((uint8_t *)(long)addr, val);
1973 #ifdef USE_KQEMU
1974 if (cpu_single_env->kqemu_enabled &&
1975 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1976 kqemu_modify_page(cpu_single_env, ram_addr);
1977 #endif
1978 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1979 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1980 /* we remove the notdirty callback only if the code has been
1981 flushed */
1982 if (dirty_flags == 0xff)
1983 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1986 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1988 unsigned long ram_addr;
1989 int dirty_flags;
1990 ram_addr = addr - (unsigned long)phys_ram_base;
1991 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1992 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1993 #if !defined(CONFIG_USER_ONLY)
1994 tb_invalidate_phys_page_fast(ram_addr, 4);
1995 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1996 #endif
1998 stl_p((uint8_t *)(long)addr, val);
1999 #ifdef USE_KQEMU
2000 if (cpu_single_env->kqemu_enabled &&
2001 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2002 kqemu_modify_page(cpu_single_env, ram_addr);
2003 #endif
2004 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2005 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2006 /* we remove the notdirty callback only if the code has been
2007 flushed */
2008 if (dirty_flags == 0xff)
2009 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2012 static CPUReadMemoryFunc *error_mem_read[3] = {
2013 NULL, /* never used */
2014 NULL, /* never used */
2015 NULL, /* never used */
2018 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2019 notdirty_mem_writeb,
2020 notdirty_mem_writew,
2021 notdirty_mem_writel,
2024 #if defined(CONFIG_SOFTMMU)
2025 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2026 so these check for a hit then pass through to the normal out-of-line
2027 phys routines. */
2028 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2030 return ldub_phys(addr);
2033 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2035 return lduw_phys(addr);
2038 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2040 return ldl_phys(addr);
2043 /* Generate a debug exception if a watchpoint has been hit.
2044 Returns the real physical address of the access. addr will be a host
2045 address in the is_ram case. */
2046 static target_ulong check_watchpoint(target_phys_addr_t addr)
2048 CPUState *env = cpu_single_env;
2049 target_ulong watch;
2050 target_ulong retaddr;
2051 int i;
2053 retaddr = addr;
2054 for (i = 0; i < env->nb_watchpoints; i++) {
2055 watch = env->watchpoint[i].vaddr;
2056 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2057 if (env->watchpoint[i].is_ram)
2058 retaddr = addr - (unsigned long)phys_ram_base;
2059 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2060 cpu_single_env->watchpoint_hit = i + 1;
2061 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2062 break;
2066 return retaddr;
2069 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2070 uint32_t val)
2072 addr = check_watchpoint(addr);
2073 stb_phys(addr, val);
2076 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2077 uint32_t val)
2079 addr = check_watchpoint(addr);
2080 stw_phys(addr, val);
2083 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2084 uint32_t val)
2086 addr = check_watchpoint(addr);
2087 stl_phys(addr, val);
2090 static CPUReadMemoryFunc *watch_mem_read[3] = {
2091 watch_mem_readb,
2092 watch_mem_readw,
2093 watch_mem_readl,
2096 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2097 watch_mem_writeb,
2098 watch_mem_writew,
2099 watch_mem_writel,
2101 #endif
2103 static void io_mem_init(void)
2105 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2106 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2107 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2108 io_mem_nb = 5;
2110 #if defined(CONFIG_SOFTMMU)
2111 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2112 watch_mem_write, NULL);
2113 #endif
2114 /* alloc dirty bits array */
2115 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2116 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2119 /* mem_read and mem_write are arrays of functions containing the
2120 function to access byte (index 0), word (index 1) and dword (index
2121 2). All functions must be supplied. If io_index is non zero, the
2122 corresponding io zone is modified. If it is zero, a new io zone is
2123 allocated. The return value can be used with
2124 cpu_register_physical_memory(). (-1) is returned if error. */
2125 int cpu_register_io_memory(int io_index,
2126 CPUReadMemoryFunc **mem_read,
2127 CPUWriteMemoryFunc **mem_write,
2128 void *opaque)
2130 int i;
2132 if (io_index <= 0) {
2133 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2134 return -1;
2135 io_index = io_mem_nb++;
2136 } else {
2137 if (io_index >= IO_MEM_NB_ENTRIES)
2138 return -1;
2141 for(i = 0;i < 3; i++) {
2142 io_mem_read[io_index][i] = mem_read[i];
2143 io_mem_write[io_index][i] = mem_write[i];
2145 io_mem_opaque[io_index] = opaque;
2146 return io_index << IO_MEM_SHIFT;
2149 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2151 return io_mem_write[io_index >> IO_MEM_SHIFT];
2154 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2156 return io_mem_read[io_index >> IO_MEM_SHIFT];
2159 /* physical memory access (slow version, mainly for debug) */
2160 #if defined(CONFIG_USER_ONLY)
2161 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2162 int len, int is_write)
2164 int l, flags;
2165 target_ulong page;
2166 void * p;
2168 while (len > 0) {
2169 page = addr & TARGET_PAGE_MASK;
2170 l = (page + TARGET_PAGE_SIZE) - addr;
2171 if (l > len)
2172 l = len;
2173 flags = page_get_flags(page);
2174 if (!(flags & PAGE_VALID))
2175 return;
2176 if (is_write) {
2177 if (!(flags & PAGE_WRITE))
2178 return;
2179 p = lock_user(addr, len, 0);
2180 memcpy(p, buf, len);
2181 unlock_user(p, addr, len);
2182 } else {
2183 if (!(flags & PAGE_READ))
2184 return;
2185 p = lock_user(addr, len, 1);
2186 memcpy(buf, p, len);
2187 unlock_user(p, addr, 0);
2189 len -= l;
2190 buf += l;
2191 addr += l;
2195 #else
2196 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2197 int len, int is_write)
2199 int l, io_index;
2200 uint8_t *ptr;
2201 uint32_t val;
2202 target_phys_addr_t page;
2203 unsigned long pd;
2204 PhysPageDesc *p;
2206 while (len > 0) {
2207 page = addr & TARGET_PAGE_MASK;
2208 l = (page + TARGET_PAGE_SIZE) - addr;
2209 if (l > len)
2210 l = len;
2211 p = phys_page_find(page >> TARGET_PAGE_BITS);
2212 if (!p) {
2213 pd = IO_MEM_UNASSIGNED;
2214 } else {
2215 pd = p->phys_offset;
2218 if (is_write) {
2219 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2220 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2221 /* XXX: could force cpu_single_env to NULL to avoid
2222 potential bugs */
2223 if (l >= 4 && ((addr & 3) == 0)) {
2224 /* 32 bit write access */
2225 val = ldl_p(buf);
2226 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2227 l = 4;
2228 } else if (l >= 2 && ((addr & 1) == 0)) {
2229 /* 16 bit write access */
2230 val = lduw_p(buf);
2231 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2232 l = 2;
2233 } else {
2234 /* 8 bit write access */
2235 val = ldub_p(buf);
2236 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2237 l = 1;
2239 } else {
2240 unsigned long addr1;
2241 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2242 /* RAM case */
2243 ptr = phys_ram_base + addr1;
2244 memcpy(ptr, buf, l);
2245 if (!cpu_physical_memory_is_dirty(addr1)) {
2246 /* invalidate code */
2247 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2248 /* set dirty bit */
2249 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2250 (0xff & ~CODE_DIRTY_FLAG);
2253 } else {
2254 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2255 !(pd & IO_MEM_ROMD)) {
2256 /* I/O case */
2257 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2258 if (l >= 4 && ((addr & 3) == 0)) {
2259 /* 32 bit read access */
2260 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2261 stl_p(buf, val);
2262 l = 4;
2263 } else if (l >= 2 && ((addr & 1) == 0)) {
2264 /* 16 bit read access */
2265 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2266 stw_p(buf, val);
2267 l = 2;
2268 } else {
2269 /* 8 bit read access */
2270 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2271 stb_p(buf, val);
2272 l = 1;
2274 } else {
2275 /* RAM case */
2276 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2277 (addr & ~TARGET_PAGE_MASK);
2278 memcpy(buf, ptr, l);
2281 len -= l;
2282 buf += l;
2283 addr += l;
2287 /* used for ROM loading : can write in RAM and ROM */
2288 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2289 const uint8_t *buf, int len)
2291 int l;
2292 uint8_t *ptr;
2293 target_phys_addr_t page;
2294 unsigned long pd;
2295 PhysPageDesc *p;
2297 while (len > 0) {
2298 page = addr & TARGET_PAGE_MASK;
2299 l = (page + TARGET_PAGE_SIZE) - addr;
2300 if (l > len)
2301 l = len;
2302 p = phys_page_find(page >> TARGET_PAGE_BITS);
2303 if (!p) {
2304 pd = IO_MEM_UNASSIGNED;
2305 } else {
2306 pd = p->phys_offset;
2309 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2310 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2311 !(pd & IO_MEM_ROMD)) {
2312 /* do nothing */
2313 } else {
2314 unsigned long addr1;
2315 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2316 /* ROM/RAM case */
2317 ptr = phys_ram_base + addr1;
2318 memcpy(ptr, buf, l);
2320 len -= l;
2321 buf += l;
2322 addr += l;
2327 /* warning: addr must be aligned */
2328 uint32_t ldl_phys(target_phys_addr_t addr)
2330 int io_index;
2331 uint8_t *ptr;
2332 uint32_t val;
2333 unsigned long pd;
2334 PhysPageDesc *p;
2336 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2337 if (!p) {
2338 pd = IO_MEM_UNASSIGNED;
2339 } else {
2340 pd = p->phys_offset;
2343 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2344 !(pd & IO_MEM_ROMD)) {
2345 /* I/O case */
2346 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2347 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2348 } else {
2349 /* RAM case */
2350 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2351 (addr & ~TARGET_PAGE_MASK);
2352 val = ldl_p(ptr);
2354 return val;
2357 /* warning: addr must be aligned */
2358 uint64_t ldq_phys(target_phys_addr_t addr)
2360 int io_index;
2361 uint8_t *ptr;
2362 uint64_t val;
2363 unsigned long pd;
2364 PhysPageDesc *p;
2366 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2367 if (!p) {
2368 pd = IO_MEM_UNASSIGNED;
2369 } else {
2370 pd = p->phys_offset;
2373 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2374 !(pd & IO_MEM_ROMD)) {
2375 /* I/O case */
2376 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2377 #ifdef TARGET_WORDS_BIGENDIAN
2378 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2379 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2380 #else
2381 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2382 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2383 #endif
2384 } else {
2385 /* RAM case */
2386 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2387 (addr & ~TARGET_PAGE_MASK);
2388 val = ldq_p(ptr);
2390 return val;
2393 /* XXX: optimize */
2394 uint32_t ldub_phys(target_phys_addr_t addr)
2396 uint8_t val;
2397 cpu_physical_memory_read(addr, &val, 1);
2398 return val;
2401 /* XXX: optimize */
2402 uint32_t lduw_phys(target_phys_addr_t addr)
2404 uint16_t val;
2405 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2406 return tswap16(val);
2409 /* warning: addr must be aligned. The ram page is not masked as dirty
2410 and the code inside is not invalidated. It is useful if the dirty
2411 bits are used to track modified PTEs */
2412 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2414 int io_index;
2415 uint8_t *ptr;
2416 unsigned long pd;
2417 PhysPageDesc *p;
2419 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2420 if (!p) {
2421 pd = IO_MEM_UNASSIGNED;
2422 } else {
2423 pd = p->phys_offset;
2426 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2427 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2428 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2429 } else {
2430 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2431 (addr & ~TARGET_PAGE_MASK);
2432 stl_p(ptr, val);
2436 /* warning: addr must be aligned */
2437 void stl_phys(target_phys_addr_t addr, uint32_t val)
2439 int io_index;
2440 uint8_t *ptr;
2441 unsigned long pd;
2442 PhysPageDesc *p;
2444 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2445 if (!p) {
2446 pd = IO_MEM_UNASSIGNED;
2447 } else {
2448 pd = p->phys_offset;
2451 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2452 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2453 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2454 } else {
2455 unsigned long addr1;
2456 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2457 /* RAM case */
2458 ptr = phys_ram_base + addr1;
2459 stl_p(ptr, val);
2460 if (!cpu_physical_memory_is_dirty(addr1)) {
2461 /* invalidate code */
2462 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2463 /* set dirty bit */
2464 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2465 (0xff & ~CODE_DIRTY_FLAG);
2470 /* XXX: optimize */
2471 void stb_phys(target_phys_addr_t addr, uint32_t val)
2473 uint8_t v = val;
2474 cpu_physical_memory_write(addr, &v, 1);
2477 /* XXX: optimize */
2478 void stw_phys(target_phys_addr_t addr, uint32_t val)
2480 uint16_t v = tswap16(val);
2481 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2484 /* XXX: optimize */
2485 void stq_phys(target_phys_addr_t addr, uint64_t val)
2487 val = tswap64(val);
2488 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2491 #endif
2493 /* virtual memory access for debug */
2494 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2495 uint8_t *buf, int len, int is_write)
2497 int l;
2498 target_ulong page, phys_addr;
2500 while (len > 0) {
2501 page = addr & TARGET_PAGE_MASK;
2502 phys_addr = cpu_get_phys_page_debug(env, page);
2503 /* if no physical page mapped, return an error */
2504 if (phys_addr == -1)
2505 return -1;
2506 l = (page + TARGET_PAGE_SIZE) - addr;
2507 if (l > len)
2508 l = len;
2509 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2510 buf, l, is_write);
2511 len -= l;
2512 buf += l;
2513 addr += l;
2515 return 0;
2518 void dump_exec_info(FILE *f,
2519 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2521 int i, target_code_size, max_target_code_size;
2522 int direct_jmp_count, direct_jmp2_count, cross_page;
2523 TranslationBlock *tb;
2525 target_code_size = 0;
2526 max_target_code_size = 0;
2527 cross_page = 0;
2528 direct_jmp_count = 0;
2529 direct_jmp2_count = 0;
2530 for(i = 0; i < nb_tbs; i++) {
2531 tb = &tbs[i];
2532 target_code_size += tb->size;
2533 if (tb->size > max_target_code_size)
2534 max_target_code_size = tb->size;
2535 if (tb->page_addr[1] != -1)
2536 cross_page++;
2537 if (tb->tb_next_offset[0] != 0xffff) {
2538 direct_jmp_count++;
2539 if (tb->tb_next_offset[1] != 0xffff) {
2540 direct_jmp2_count++;
2544 /* XXX: avoid using doubles ? */
2545 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2546 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2547 nb_tbs ? target_code_size / nb_tbs : 0,
2548 max_target_code_size);
2549 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2550 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2551 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2552 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2553 cross_page,
2554 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2555 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2556 direct_jmp_count,
2557 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2558 direct_jmp2_count,
2559 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2560 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2561 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2562 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2565 #if !defined(CONFIG_USER_ONLY)
2567 #define MMUSUFFIX _cmmu
2568 #define GETPC() NULL
2569 #define env cpu_single_env
2570 #define SOFTMMU_CODE_ACCESS
2572 #define SHIFT 0
2573 #include "softmmu_template.h"
2575 #define SHIFT 1
2576 #include "softmmu_template.h"
2578 #define SHIFT 2
2579 #include "softmmu_template.h"
2581 #define SHIFT 3
2582 #include "softmmu_template.h"
2584 #undef env
2586 #endif