Rename variables and rearrange code to please gcc -Wshadow checks
[qemu/qemu_0_9_1_stable.git] / exec.c
blobc782e5b6b28e6bcd745b92273f709a3780c9615a
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
51 //#define DEBUG_SUBPAGE
53 #if !defined(CONFIG_USER_ONLY)
54 /* TB consistency checks only implemented for usermode emulation. */
55 #undef DEBUG_TB_CHECK
56 #endif
58 /* threshold to flush the translated code buffer */
59 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #else
76 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77 #define TARGET_PHYS_ADDR_SPACE_BITS 32
78 #endif
80 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
81 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
82 int nb_tbs;
83 /* any access to the tbs or the page table must use this lock */
84 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
86 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
87 uint8_t *code_gen_ptr;
89 int phys_ram_size;
90 int phys_ram_fd;
91 uint8_t *phys_ram_base;
92 uint8_t *phys_ram_dirty;
93 static ram_addr_t phys_ram_alloc_offset = 0;
95 CPUState *first_cpu;
96 /* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
98 CPUState *cpu_single_env;
100 typedef struct PageDesc {
101 /* list of TBs intersecting this ram page */
102 TranslationBlock *first_tb;
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count;
106 uint8_t *code_bitmap;
107 #if defined(CONFIG_USER_ONLY)
108 unsigned long flags;
109 #endif
110 } PageDesc;
112 typedef struct PhysPageDesc {
113 /* offset in host memory of the page + io_index in the low 12 bits */
114 uint32_t phys_offset;
115 } PhysPageDesc;
117 #define L2_BITS 10
118 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119 /* XXX: this is a temporary hack for alpha target.
120 * In the future, this is to be replaced by a multi-level table
121 * to actually be able to handle the complete 64 bits address space.
123 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124 #else
125 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
126 #endif
128 #define L1_SIZE (1 << L1_BITS)
129 #define L2_SIZE (1 << L2_BITS)
131 static void io_mem_init(void);
133 unsigned long qemu_real_host_page_size;
134 unsigned long qemu_host_page_bits;
135 unsigned long qemu_host_page_size;
136 unsigned long qemu_host_page_mask;
138 /* XXX: for system emulation, it could just be an array */
139 static PageDesc *l1_map[L1_SIZE];
140 PhysPageDesc **l1_phys_map;
142 /* io memory support */
143 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
144 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
145 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
146 static int io_mem_nb;
147 #if defined(CONFIG_SOFTMMU)
148 static int io_mem_watch;
149 #endif
151 /* log support */
152 char *logfilename = "/tmp/qemu.log";
153 FILE *logfile;
154 int loglevel;
156 /* statistics */
157 static int tlb_flush_count;
158 static int tb_flush_count;
159 static int tb_phys_invalidate_count;
161 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
162 typedef struct subpage_t {
163 target_phys_addr_t base;
164 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
165 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
166 void *opaque[TARGET_PAGE_SIZE];
167 } subpage_t;
169 static void page_init(void)
171 /* NOTE: we can always suppose that qemu_host_page_size >=
172 TARGET_PAGE_SIZE */
173 #ifdef _WIN32
175 SYSTEM_INFO system_info;
176 DWORD old_protect;
178 GetSystemInfo(&system_info);
179 qemu_real_host_page_size = system_info.dwPageSize;
181 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
182 PAGE_EXECUTE_READWRITE, &old_protect);
184 #else
185 qemu_real_host_page_size = getpagesize();
187 unsigned long start, end;
189 start = (unsigned long)code_gen_buffer;
190 start &= ~(qemu_real_host_page_size - 1);
192 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
193 end += qemu_real_host_page_size - 1;
194 end &= ~(qemu_real_host_page_size - 1);
196 mprotect((void *)start, end - start,
197 PROT_READ | PROT_WRITE | PROT_EXEC);
199 #endif
201 if (qemu_host_page_size == 0)
202 qemu_host_page_size = qemu_real_host_page_size;
203 if (qemu_host_page_size < TARGET_PAGE_SIZE)
204 qemu_host_page_size = TARGET_PAGE_SIZE;
205 qemu_host_page_bits = 0;
206 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
207 qemu_host_page_bits++;
208 qemu_host_page_mask = ~(qemu_host_page_size - 1);
209 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
210 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
213 static inline PageDesc *page_find_alloc(unsigned int index)
215 PageDesc **lp, *p;
217 lp = &l1_map[index >> L2_BITS];
218 p = *lp;
219 if (!p) {
220 /* allocate if not found */
221 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
222 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
223 *lp = p;
225 return p + (index & (L2_SIZE - 1));
228 static inline PageDesc *page_find(unsigned int index)
230 PageDesc *p;
232 p = l1_map[index >> L2_BITS];
233 if (!p)
234 return 0;
235 return p + (index & (L2_SIZE - 1));
238 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
240 void **lp, **p;
241 PhysPageDesc *pd;
243 p = (void **)l1_phys_map;
244 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
246 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
247 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
248 #endif
249 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
250 p = *lp;
251 if (!p) {
252 /* allocate if not found */
253 if (!alloc)
254 return NULL;
255 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
256 memset(p, 0, sizeof(void *) * L1_SIZE);
257 *lp = p;
259 #endif
260 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
261 pd = *lp;
262 if (!pd) {
263 int i;
264 /* allocate if not found */
265 if (!alloc)
266 return NULL;
267 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
268 *lp = pd;
269 for (i = 0; i < L2_SIZE; i++)
270 pd[i].phys_offset = IO_MEM_UNASSIGNED;
272 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
275 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
277 return phys_page_find_alloc(index, 0);
280 #if !defined(CONFIG_USER_ONLY)
281 static void tlb_protect_code(ram_addr_t ram_addr);
282 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
283 target_ulong vaddr);
284 #endif
286 void cpu_exec_init(CPUState *env)
288 CPUState **penv;
289 int cpu_index;
291 if (!code_gen_ptr) {
292 code_gen_ptr = code_gen_buffer;
293 page_init();
294 io_mem_init();
296 env->next_cpu = NULL;
297 penv = &first_cpu;
298 cpu_index = 0;
299 while (*penv != NULL) {
300 penv = (CPUState **)&(*penv)->next_cpu;
301 cpu_index++;
303 env->cpu_index = cpu_index;
304 env->nb_watchpoints = 0;
305 *penv = env;
308 static inline void invalidate_page_bitmap(PageDesc *p)
310 if (p->code_bitmap) {
311 qemu_free(p->code_bitmap);
312 p->code_bitmap = NULL;
314 p->code_write_count = 0;
317 /* set to NULL all the 'first_tb' fields in all PageDescs */
318 static void page_flush_tb(void)
320 int i, j;
321 PageDesc *p;
323 for(i = 0; i < L1_SIZE; i++) {
324 p = l1_map[i];
325 if (p) {
326 for(j = 0; j < L2_SIZE; j++) {
327 p->first_tb = NULL;
328 invalidate_page_bitmap(p);
329 p++;
335 /* flush all the translation blocks */
336 /* XXX: tb_flush is currently not thread safe */
337 void tb_flush(CPUState *env1)
339 CPUState *env;
340 #if defined(DEBUG_FLUSH)
341 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
342 code_gen_ptr - code_gen_buffer,
343 nb_tbs,
344 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
345 #endif
346 nb_tbs = 0;
348 for(env = first_cpu; env != NULL; env = env->next_cpu) {
349 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
352 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
353 page_flush_tb();
355 code_gen_ptr = code_gen_buffer;
356 /* XXX: flush processor icache at this point if cache flush is
357 expensive */
358 tb_flush_count++;
361 #ifdef DEBUG_TB_CHECK
363 static void tb_invalidate_check(target_ulong address)
365 TranslationBlock *tb;
366 int i;
367 address &= TARGET_PAGE_MASK;
368 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
369 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
370 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
371 address >= tb->pc + tb->size)) {
372 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
373 address, (long)tb->pc, tb->size);
379 /* verify that all the pages have correct rights for code */
380 static void tb_page_check(void)
382 TranslationBlock *tb;
383 int i, flags1, flags2;
385 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
386 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
387 flags1 = page_get_flags(tb->pc);
388 flags2 = page_get_flags(tb->pc + tb->size - 1);
389 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
390 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
391 (long)tb->pc, tb->size, flags1, flags2);
397 void tb_jmp_check(TranslationBlock *tb)
399 TranslationBlock *tb1;
400 unsigned int n1;
402 /* suppress any remaining jumps to this TB */
403 tb1 = tb->jmp_first;
404 for(;;) {
405 n1 = (long)tb1 & 3;
406 tb1 = (TranslationBlock *)((long)tb1 & ~3);
407 if (n1 == 2)
408 break;
409 tb1 = tb1->jmp_next[n1];
411 /* check end of list */
412 if (tb1 != tb) {
413 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
417 #endif
419 /* invalidate one TB */
420 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
421 int next_offset)
423 TranslationBlock *tb1;
424 for(;;) {
425 tb1 = *ptb;
426 if (tb1 == tb) {
427 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
428 break;
430 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
434 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
436 TranslationBlock *tb1;
437 unsigned int n1;
439 for(;;) {
440 tb1 = *ptb;
441 n1 = (long)tb1 & 3;
442 tb1 = (TranslationBlock *)((long)tb1 & ~3);
443 if (tb1 == tb) {
444 *ptb = tb1->page_next[n1];
445 break;
447 ptb = &tb1->page_next[n1];
451 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
453 TranslationBlock *tb1, **ptb;
454 unsigned int n1;
456 ptb = &tb->jmp_next[n];
457 tb1 = *ptb;
458 if (tb1) {
459 /* find tb(n) in circular list */
460 for(;;) {
461 tb1 = *ptb;
462 n1 = (long)tb1 & 3;
463 tb1 = (TranslationBlock *)((long)tb1 & ~3);
464 if (n1 == n && tb1 == tb)
465 break;
466 if (n1 == 2) {
467 ptb = &tb1->jmp_first;
468 } else {
469 ptb = &tb1->jmp_next[n1];
472 /* now we can suppress tb(n) from the list */
473 *ptb = tb->jmp_next[n];
475 tb->jmp_next[n] = NULL;
479 /* reset the jump entry 'n' of a TB so that it is not chained to
480 another TB */
481 static inline void tb_reset_jump(TranslationBlock *tb, int n)
483 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
486 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
488 CPUState *env;
489 PageDesc *p;
490 unsigned int h, n1;
491 target_ulong phys_pc;
492 TranslationBlock *tb1, *tb2;
494 /* remove the TB from the hash list */
495 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
496 h = tb_phys_hash_func(phys_pc);
497 tb_remove(&tb_phys_hash[h], tb,
498 offsetof(TranslationBlock, phys_hash_next));
500 /* remove the TB from the page list */
501 if (tb->page_addr[0] != page_addr) {
502 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
503 tb_page_remove(&p->first_tb, tb);
504 invalidate_page_bitmap(p);
506 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
507 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
508 tb_page_remove(&p->first_tb, tb);
509 invalidate_page_bitmap(p);
512 tb_invalidated_flag = 1;
514 /* remove the TB from the hash list */
515 h = tb_jmp_cache_hash_func(tb->pc);
516 for(env = first_cpu; env != NULL; env = env->next_cpu) {
517 if (env->tb_jmp_cache[h] == tb)
518 env->tb_jmp_cache[h] = NULL;
521 /* suppress this TB from the two jump lists */
522 tb_jmp_remove(tb, 0);
523 tb_jmp_remove(tb, 1);
525 /* suppress any remaining jumps to this TB */
526 tb1 = tb->jmp_first;
527 for(;;) {
528 n1 = (long)tb1 & 3;
529 if (n1 == 2)
530 break;
531 tb1 = (TranslationBlock *)((long)tb1 & ~3);
532 tb2 = tb1->jmp_next[n1];
533 tb_reset_jump(tb1, n1);
534 tb1->jmp_next[n1] = NULL;
535 tb1 = tb2;
537 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
539 tb_phys_invalidate_count++;
542 static inline void set_bits(uint8_t *tab, int start, int len)
544 int end, mask, end1;
546 end = start + len;
547 tab += start >> 3;
548 mask = 0xff << (start & 7);
549 if ((start & ~7) == (end & ~7)) {
550 if (start < end) {
551 mask &= ~(0xff << (end & 7));
552 *tab |= mask;
554 } else {
555 *tab++ |= mask;
556 start = (start + 8) & ~7;
557 end1 = end & ~7;
558 while (start < end1) {
559 *tab++ = 0xff;
560 start += 8;
562 if (start < end) {
563 mask = ~(0xff << (end & 7));
564 *tab |= mask;
569 static void build_page_bitmap(PageDesc *p)
571 int n, tb_start, tb_end;
572 TranslationBlock *tb;
574 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
575 if (!p->code_bitmap)
576 return;
577 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
579 tb = p->first_tb;
580 while (tb != NULL) {
581 n = (long)tb & 3;
582 tb = (TranslationBlock *)((long)tb & ~3);
583 /* NOTE: this is subtle as a TB may span two physical pages */
584 if (n == 0) {
585 /* NOTE: tb_end may be after the end of the page, but
586 it is not a problem */
587 tb_start = tb->pc & ~TARGET_PAGE_MASK;
588 tb_end = tb_start + tb->size;
589 if (tb_end > TARGET_PAGE_SIZE)
590 tb_end = TARGET_PAGE_SIZE;
591 } else {
592 tb_start = 0;
593 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
595 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
596 tb = tb->page_next[n];
600 #ifdef TARGET_HAS_PRECISE_SMC
602 static void tb_gen_code(CPUState *env,
603 target_ulong pc, target_ulong cs_base, int flags,
604 int cflags)
606 TranslationBlock *tb;
607 uint8_t *tc_ptr;
608 target_ulong phys_pc, phys_page2, virt_page2;
609 int code_gen_size;
611 phys_pc = get_phys_addr_code(env, pc);
612 tb = tb_alloc(pc);
613 if (!tb) {
614 /* flush must be done */
615 tb_flush(env);
616 /* cannot fail at this point */
617 tb = tb_alloc(pc);
619 tc_ptr = code_gen_ptr;
620 tb->tc_ptr = tc_ptr;
621 tb->cs_base = cs_base;
622 tb->flags = flags;
623 tb->cflags = cflags;
624 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
625 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
627 /* check next page if needed */
628 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
629 phys_page2 = -1;
630 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
631 phys_page2 = get_phys_addr_code(env, virt_page2);
633 tb_link_phys(tb, phys_pc, phys_page2);
635 #endif
637 /* invalidate all TBs which intersect with the target physical page
638 starting in range [start;end[. NOTE: start and end must refer to
639 the same physical page. 'is_cpu_write_access' should be true if called
640 from a real cpu write access: the virtual CPU will exit the current
641 TB if code is modified inside this TB. */
642 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
643 int is_cpu_write_access)
645 int n, current_tb_modified, current_tb_not_found, current_flags;
646 CPUState *env = cpu_single_env;
647 PageDesc *p;
648 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
649 target_ulong tb_start, tb_end;
650 target_ulong current_pc, current_cs_base;
652 p = page_find(start >> TARGET_PAGE_BITS);
653 if (!p)
654 return;
655 if (!p->code_bitmap &&
656 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
657 is_cpu_write_access) {
658 /* build code bitmap */
659 build_page_bitmap(p);
662 /* we remove all the TBs in the range [start, end[ */
663 /* XXX: see if in some cases it could be faster to invalidate all the code */
664 current_tb_not_found = is_cpu_write_access;
665 current_tb_modified = 0;
666 current_tb = NULL; /* avoid warning */
667 current_pc = 0; /* avoid warning */
668 current_cs_base = 0; /* avoid warning */
669 current_flags = 0; /* avoid warning */
670 tb = p->first_tb;
671 while (tb != NULL) {
672 n = (long)tb & 3;
673 tb = (TranslationBlock *)((long)tb & ~3);
674 tb_next = tb->page_next[n];
675 /* NOTE: this is subtle as a TB may span two physical pages */
676 if (n == 0) {
677 /* NOTE: tb_end may be after the end of the page, but
678 it is not a problem */
679 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
680 tb_end = tb_start + tb->size;
681 } else {
682 tb_start = tb->page_addr[1];
683 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
685 if (!(tb_end <= start || tb_start >= end)) {
686 #ifdef TARGET_HAS_PRECISE_SMC
687 if (current_tb_not_found) {
688 current_tb_not_found = 0;
689 current_tb = NULL;
690 if (env->mem_write_pc) {
691 /* now we have a real cpu fault */
692 current_tb = tb_find_pc(env->mem_write_pc);
695 if (current_tb == tb &&
696 !(current_tb->cflags & CF_SINGLE_INSN)) {
697 /* If we are modifying the current TB, we must stop
698 its execution. We could be more precise by checking
699 that the modification is after the current PC, but it
700 would require a specialized function to partially
701 restore the CPU state */
703 current_tb_modified = 1;
704 cpu_restore_state(current_tb, env,
705 env->mem_write_pc, NULL);
706 #if defined(TARGET_I386)
707 current_flags = env->hflags;
708 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
709 current_cs_base = (target_ulong)env->segs[R_CS].base;
710 current_pc = current_cs_base + env->eip;
711 #else
712 #error unsupported CPU
713 #endif
715 #endif /* TARGET_HAS_PRECISE_SMC */
716 /* we need to do that to handle the case where a signal
717 occurs while doing tb_phys_invalidate() */
718 saved_tb = NULL;
719 if (env) {
720 saved_tb = env->current_tb;
721 env->current_tb = NULL;
723 tb_phys_invalidate(tb, -1);
724 if (env) {
725 env->current_tb = saved_tb;
726 if (env->interrupt_request && env->current_tb)
727 cpu_interrupt(env, env->interrupt_request);
730 tb = tb_next;
732 #if !defined(CONFIG_USER_ONLY)
733 /* if no code remaining, no need to continue to use slow writes */
734 if (!p->first_tb) {
735 invalidate_page_bitmap(p);
736 if (is_cpu_write_access) {
737 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
740 #endif
741 #ifdef TARGET_HAS_PRECISE_SMC
742 if (current_tb_modified) {
743 /* we generate a block containing just the instruction
744 modifying the memory. It will ensure that it cannot modify
745 itself */
746 env->current_tb = NULL;
747 tb_gen_code(env, current_pc, current_cs_base, current_flags,
748 CF_SINGLE_INSN);
749 cpu_resume_from_signal(env, NULL);
751 #endif
754 /* len must be <= 8 and start must be a multiple of len */
755 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
757 PageDesc *p;
758 int offset, b;
759 #if 0
760 if (1) {
761 if (loglevel) {
762 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
763 cpu_single_env->mem_write_vaddr, len,
764 cpu_single_env->eip,
765 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
768 #endif
769 p = page_find(start >> TARGET_PAGE_BITS);
770 if (!p)
771 return;
772 if (p->code_bitmap) {
773 offset = start & ~TARGET_PAGE_MASK;
774 b = p->code_bitmap[offset >> 3] >> (offset & 7);
775 if (b & ((1 << len) - 1))
776 goto do_invalidate;
777 } else {
778 do_invalidate:
779 tb_invalidate_phys_page_range(start, start + len, 1);
783 #if !defined(CONFIG_SOFTMMU)
784 static void tb_invalidate_phys_page(target_ulong addr,
785 unsigned long pc, void *puc)
787 int n, current_flags, current_tb_modified;
788 target_ulong current_pc, current_cs_base;
789 PageDesc *p;
790 TranslationBlock *tb, *current_tb;
791 #ifdef TARGET_HAS_PRECISE_SMC
792 CPUState *env = cpu_single_env;
793 #endif
795 addr &= TARGET_PAGE_MASK;
796 p = page_find(addr >> TARGET_PAGE_BITS);
797 if (!p)
798 return;
799 tb = p->first_tb;
800 current_tb_modified = 0;
801 current_tb = NULL;
802 current_pc = 0; /* avoid warning */
803 current_cs_base = 0; /* avoid warning */
804 current_flags = 0; /* avoid warning */
805 #ifdef TARGET_HAS_PRECISE_SMC
806 if (tb && pc != 0) {
807 current_tb = tb_find_pc(pc);
809 #endif
810 while (tb != NULL) {
811 n = (long)tb & 3;
812 tb = (TranslationBlock *)((long)tb & ~3);
813 #ifdef TARGET_HAS_PRECISE_SMC
814 if (current_tb == tb &&
815 !(current_tb->cflags & CF_SINGLE_INSN)) {
816 /* If we are modifying the current TB, we must stop
817 its execution. We could be more precise by checking
818 that the modification is after the current PC, but it
819 would require a specialized function to partially
820 restore the CPU state */
822 current_tb_modified = 1;
823 cpu_restore_state(current_tb, env, pc, puc);
824 #if defined(TARGET_I386)
825 current_flags = env->hflags;
826 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
827 current_cs_base = (target_ulong)env->segs[R_CS].base;
828 current_pc = current_cs_base + env->eip;
829 #else
830 #error unsupported CPU
831 #endif
833 #endif /* TARGET_HAS_PRECISE_SMC */
834 tb_phys_invalidate(tb, addr);
835 tb = tb->page_next[n];
837 p->first_tb = NULL;
838 #ifdef TARGET_HAS_PRECISE_SMC
839 if (current_tb_modified) {
840 /* we generate a block containing just the instruction
841 modifying the memory. It will ensure that it cannot modify
842 itself */
843 env->current_tb = NULL;
844 tb_gen_code(env, current_pc, current_cs_base, current_flags,
845 CF_SINGLE_INSN);
846 cpu_resume_from_signal(env, puc);
848 #endif
850 #endif
852 /* add the tb in the target page and protect it if necessary */
853 static inline void tb_alloc_page(TranslationBlock *tb,
854 unsigned int n, target_ulong page_addr)
856 PageDesc *p;
857 TranslationBlock *last_first_tb;
859 tb->page_addr[n] = page_addr;
860 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
861 tb->page_next[n] = p->first_tb;
862 last_first_tb = p->first_tb;
863 p->first_tb = (TranslationBlock *)((long)tb | n);
864 invalidate_page_bitmap(p);
866 #if defined(TARGET_HAS_SMC) || 1
868 #if defined(CONFIG_USER_ONLY)
869 if (p->flags & PAGE_WRITE) {
870 target_ulong addr;
871 PageDesc *p2;
872 int prot;
874 /* force the host page as non writable (writes will have a
875 page fault + mprotect overhead) */
876 page_addr &= qemu_host_page_mask;
877 prot = 0;
878 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
879 addr += TARGET_PAGE_SIZE) {
881 p2 = page_find (addr >> TARGET_PAGE_BITS);
882 if (!p2)
883 continue;
884 prot |= p2->flags;
885 p2->flags &= ~PAGE_WRITE;
886 page_get_flags(addr);
888 mprotect(g2h(page_addr), qemu_host_page_size,
889 (prot & PAGE_BITS) & ~PAGE_WRITE);
890 #ifdef DEBUG_TB_INVALIDATE
891 printf("protecting code page: 0x%08lx\n",
892 page_addr);
893 #endif
895 #else
896 /* if some code is already present, then the pages are already
897 protected. So we handle the case where only the first TB is
898 allocated in a physical page */
899 if (!last_first_tb) {
900 tlb_protect_code(page_addr);
902 #endif
904 #endif /* TARGET_HAS_SMC */
907 /* Allocate a new translation block. Flush the translation buffer if
908 too many translation blocks or too much generated code. */
909 TranslationBlock *tb_alloc(target_ulong pc)
911 TranslationBlock *tb;
913 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
914 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
915 return NULL;
916 tb = &tbs[nb_tbs++];
917 tb->pc = pc;
918 tb->cflags = 0;
919 return tb;
922 /* add a new TB and link it to the physical page tables. phys_page2 is
923 (-1) to indicate that only one page contains the TB. */
924 void tb_link_phys(TranslationBlock *tb,
925 target_ulong phys_pc, target_ulong phys_page2)
927 unsigned int h;
928 TranslationBlock **ptb;
930 /* add in the physical hash table */
931 h = tb_phys_hash_func(phys_pc);
932 ptb = &tb_phys_hash[h];
933 tb->phys_hash_next = *ptb;
934 *ptb = tb;
936 /* add in the page list */
937 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
938 if (phys_page2 != -1)
939 tb_alloc_page(tb, 1, phys_page2);
940 else
941 tb->page_addr[1] = -1;
943 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
944 tb->jmp_next[0] = NULL;
945 tb->jmp_next[1] = NULL;
946 #ifdef USE_CODE_COPY
947 tb->cflags &= ~CF_FP_USED;
948 if (tb->cflags & CF_TB_FP_USED)
949 tb->cflags |= CF_FP_USED;
950 #endif
952 /* init original jump addresses */
953 if (tb->tb_next_offset[0] != 0xffff)
954 tb_reset_jump(tb, 0);
955 if (tb->tb_next_offset[1] != 0xffff)
956 tb_reset_jump(tb, 1);
958 #ifdef DEBUG_TB_CHECK
959 tb_page_check();
960 #endif
963 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
964 tb[1].tc_ptr. Return NULL if not found */
965 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
967 int m_min, m_max, m;
968 unsigned long v;
969 TranslationBlock *tb;
971 if (nb_tbs <= 0)
972 return NULL;
973 if (tc_ptr < (unsigned long)code_gen_buffer ||
974 tc_ptr >= (unsigned long)code_gen_ptr)
975 return NULL;
976 /* binary search (cf Knuth) */
977 m_min = 0;
978 m_max = nb_tbs - 1;
979 while (m_min <= m_max) {
980 m = (m_min + m_max) >> 1;
981 tb = &tbs[m];
982 v = (unsigned long)tb->tc_ptr;
983 if (v == tc_ptr)
984 return tb;
985 else if (tc_ptr < v) {
986 m_max = m - 1;
987 } else {
988 m_min = m + 1;
991 return &tbs[m_max];
994 static void tb_reset_jump_recursive(TranslationBlock *tb);
996 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
998 TranslationBlock *tb1, *tb_next, **ptb;
999 unsigned int n1;
1001 tb1 = tb->jmp_next[n];
1002 if (tb1 != NULL) {
1003 /* find head of list */
1004 for(;;) {
1005 n1 = (long)tb1 & 3;
1006 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1007 if (n1 == 2)
1008 break;
1009 tb1 = tb1->jmp_next[n1];
1011 /* we are now sure now that tb jumps to tb1 */
1012 tb_next = tb1;
1014 /* remove tb from the jmp_first list */
1015 ptb = &tb_next->jmp_first;
1016 for(;;) {
1017 tb1 = *ptb;
1018 n1 = (long)tb1 & 3;
1019 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1020 if (n1 == n && tb1 == tb)
1021 break;
1022 ptb = &tb1->jmp_next[n1];
1024 *ptb = tb->jmp_next[n];
1025 tb->jmp_next[n] = NULL;
1027 /* suppress the jump to next tb in generated code */
1028 tb_reset_jump(tb, n);
1030 /* suppress jumps in the tb on which we could have jumped */
1031 tb_reset_jump_recursive(tb_next);
1035 static void tb_reset_jump_recursive(TranslationBlock *tb)
1037 tb_reset_jump_recursive2(tb, 0);
1038 tb_reset_jump_recursive2(tb, 1);
1041 #if defined(TARGET_HAS_ICE)
1042 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1044 target_phys_addr_t addr;
1045 target_ulong pd;
1046 ram_addr_t ram_addr;
1047 PhysPageDesc *p;
1049 addr = cpu_get_phys_page_debug(env, pc);
1050 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1051 if (!p) {
1052 pd = IO_MEM_UNASSIGNED;
1053 } else {
1054 pd = p->phys_offset;
1056 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1057 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1059 #endif
1061 /* Add a watchpoint. */
1062 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1064 int i;
1066 for (i = 0; i < env->nb_watchpoints; i++) {
1067 if (addr == env->watchpoint[i].vaddr)
1068 return 0;
1070 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1071 return -1;
1073 i = env->nb_watchpoints++;
1074 env->watchpoint[i].vaddr = addr;
1075 tlb_flush_page(env, addr);
1076 /* FIXME: This flush is needed because of the hack to make memory ops
1077 terminate the TB. It can be removed once the proper IO trap and
1078 re-execute bits are in. */
1079 tb_flush(env);
1080 return i;
1083 /* Remove a watchpoint. */
1084 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1086 int i;
1088 for (i = 0; i < env->nb_watchpoints; i++) {
1089 if (addr == env->watchpoint[i].vaddr) {
1090 env->nb_watchpoints--;
1091 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1092 tlb_flush_page(env, addr);
1093 return 0;
1096 return -1;
1099 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1100 breakpoint is reached */
1101 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1103 #if defined(TARGET_HAS_ICE)
1104 int i;
1106 for(i = 0; i < env->nb_breakpoints; i++) {
1107 if (env->breakpoints[i] == pc)
1108 return 0;
1111 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1112 return -1;
1113 env->breakpoints[env->nb_breakpoints++] = pc;
1115 breakpoint_invalidate(env, pc);
1116 return 0;
1117 #else
1118 return -1;
1119 #endif
1122 /* remove a breakpoint */
1123 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1125 #if defined(TARGET_HAS_ICE)
1126 int i;
1127 for(i = 0; i < env->nb_breakpoints; i++) {
1128 if (env->breakpoints[i] == pc)
1129 goto found;
1131 return -1;
1132 found:
1133 env->nb_breakpoints--;
1134 if (i < env->nb_breakpoints)
1135 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1137 breakpoint_invalidate(env, pc);
1138 return 0;
1139 #else
1140 return -1;
1141 #endif
1144 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1145 CPU loop after each instruction */
1146 void cpu_single_step(CPUState *env, int enabled)
1148 #if defined(TARGET_HAS_ICE)
1149 if (env->singlestep_enabled != enabled) {
1150 env->singlestep_enabled = enabled;
1151 /* must flush all the translated code to avoid inconsistancies */
1152 /* XXX: only flush what is necessary */
1153 tb_flush(env);
1155 #endif
1158 /* enable or disable low levels log */
1159 void cpu_set_log(int log_flags)
1161 loglevel = log_flags;
1162 if (loglevel && !logfile) {
1163 logfile = fopen(logfilename, "w");
1164 if (!logfile) {
1165 perror(logfilename);
1166 _exit(1);
1168 #if !defined(CONFIG_SOFTMMU)
1169 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1171 static uint8_t logfile_buf[4096];
1172 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1174 #else
1175 setvbuf(logfile, NULL, _IOLBF, 0);
1176 #endif
1180 void cpu_set_log_filename(const char *filename)
1182 logfilename = strdup(filename);
1185 /* mask must never be zero, except for A20 change call */
1186 void cpu_interrupt(CPUState *env, int mask)
1188 TranslationBlock *tb;
1189 static int interrupt_lock;
1191 env->interrupt_request |= mask;
1192 /* if the cpu is currently executing code, we must unlink it and
1193 all the potentially executing TB */
1194 tb = env->current_tb;
1195 if (tb && !testandset(&interrupt_lock)) {
1196 env->current_tb = NULL;
1197 tb_reset_jump_recursive(tb);
1198 interrupt_lock = 0;
1202 void cpu_reset_interrupt(CPUState *env, int mask)
1204 env->interrupt_request &= ~mask;
1207 CPULogItem cpu_log_items[] = {
1208 { CPU_LOG_TB_OUT_ASM, "out_asm",
1209 "show generated host assembly code for each compiled TB" },
1210 { CPU_LOG_TB_IN_ASM, "in_asm",
1211 "show target assembly code for each compiled TB" },
1212 { CPU_LOG_TB_OP, "op",
1213 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1214 #ifdef TARGET_I386
1215 { CPU_LOG_TB_OP_OPT, "op_opt",
1216 "show micro ops after optimization for each compiled TB" },
1217 #endif
1218 { CPU_LOG_INT, "int",
1219 "show interrupts/exceptions in short format" },
1220 { CPU_LOG_EXEC, "exec",
1221 "show trace before each executed TB (lots of logs)" },
1222 { CPU_LOG_TB_CPU, "cpu",
1223 "show CPU state before block translation" },
1224 #ifdef TARGET_I386
1225 { CPU_LOG_PCALL, "pcall",
1226 "show protected mode far calls/returns/exceptions" },
1227 #endif
1228 #ifdef DEBUG_IOPORT
1229 { CPU_LOG_IOPORT, "ioport",
1230 "show all i/o ports accesses" },
1231 #endif
1232 { 0, NULL, NULL },
1235 static int cmp1(const char *s1, int n, const char *s2)
1237 if (strlen(s2) != n)
1238 return 0;
1239 return memcmp(s1, s2, n) == 0;
1242 /* takes a comma separated list of log masks. Return 0 if error. */
1243 int cpu_str_to_log_mask(const char *str)
1245 CPULogItem *item;
1246 int mask;
1247 const char *p, *p1;
1249 p = str;
1250 mask = 0;
1251 for(;;) {
1252 p1 = strchr(p, ',');
1253 if (!p1)
1254 p1 = p + strlen(p);
1255 if(cmp1(p,p1-p,"all")) {
1256 for(item = cpu_log_items; item->mask != 0; item++) {
1257 mask |= item->mask;
1259 } else {
1260 for(item = cpu_log_items; item->mask != 0; item++) {
1261 if (cmp1(p, p1 - p, item->name))
1262 goto found;
1264 return 0;
1266 found:
1267 mask |= item->mask;
1268 if (*p1 != ',')
1269 break;
1270 p = p1 + 1;
1272 return mask;
1275 void cpu_abort(CPUState *env, const char *fmt, ...)
1277 va_list ap;
1279 va_start(ap, fmt);
1280 fprintf(stderr, "qemu: fatal: ");
1281 vfprintf(stderr, fmt, ap);
1282 fprintf(stderr, "\n");
1283 #ifdef TARGET_I386
1284 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1285 #else
1286 cpu_dump_state(env, stderr, fprintf, 0);
1287 #endif
1288 va_end(ap);
1289 if (logfile) {
1290 fflush(logfile);
1291 fclose(logfile);
1293 abort();
1296 CPUState *cpu_copy(CPUState *env)
1298 CPUState *new_env = cpu_init();
1299 /* preserve chaining and index */
1300 CPUState *next_cpu = new_env->next_cpu;
1301 int cpu_index = new_env->cpu_index;
1302 memcpy(new_env, env, sizeof(CPUState));
1303 new_env->next_cpu = next_cpu;
1304 new_env->cpu_index = cpu_index;
1305 return new_env;
1308 #if !defined(CONFIG_USER_ONLY)
1310 /* NOTE: if flush_global is true, also flush global entries (not
1311 implemented yet) */
1312 void tlb_flush(CPUState *env, int flush_global)
1314 int i;
1316 #if defined(DEBUG_TLB)
1317 printf("tlb_flush:\n");
1318 #endif
1319 /* must reset current TB so that interrupts cannot modify the
1320 links while we are modifying them */
1321 env->current_tb = NULL;
1323 for(i = 0; i < CPU_TLB_SIZE; i++) {
1324 env->tlb_table[0][i].addr_read = -1;
1325 env->tlb_table[0][i].addr_write = -1;
1326 env->tlb_table[0][i].addr_code = -1;
1327 env->tlb_table[1][i].addr_read = -1;
1328 env->tlb_table[1][i].addr_write = -1;
1329 env->tlb_table[1][i].addr_code = -1;
1330 #if (NB_MMU_MODES >= 3)
1331 env->tlb_table[2][i].addr_read = -1;
1332 env->tlb_table[2][i].addr_write = -1;
1333 env->tlb_table[2][i].addr_code = -1;
1334 #if (NB_MMU_MODES == 4)
1335 env->tlb_table[3][i].addr_read = -1;
1336 env->tlb_table[3][i].addr_write = -1;
1337 env->tlb_table[3][i].addr_code = -1;
1338 #endif
1339 #endif
1342 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1344 #if !defined(CONFIG_SOFTMMU)
1345 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1346 #endif
1347 #ifdef USE_KQEMU
1348 if (env->kqemu_enabled) {
1349 kqemu_flush(env, flush_global);
1351 #endif
1352 tlb_flush_count++;
1355 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1357 if (addr == (tlb_entry->addr_read &
1358 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1359 addr == (tlb_entry->addr_write &
1360 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1361 addr == (tlb_entry->addr_code &
1362 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1363 tlb_entry->addr_read = -1;
1364 tlb_entry->addr_write = -1;
1365 tlb_entry->addr_code = -1;
1369 void tlb_flush_page(CPUState *env, target_ulong addr)
1371 int i;
1372 TranslationBlock *tb;
1374 #if defined(DEBUG_TLB)
1375 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1376 #endif
1377 /* must reset current TB so that interrupts cannot modify the
1378 links while we are modifying them */
1379 env->current_tb = NULL;
1381 addr &= TARGET_PAGE_MASK;
1382 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1383 tlb_flush_entry(&env->tlb_table[0][i], addr);
1384 tlb_flush_entry(&env->tlb_table[1][i], addr);
1385 #if (NB_MMU_MODES >= 3)
1386 tlb_flush_entry(&env->tlb_table[2][i], addr);
1387 #if (NB_MMU_MODES == 4)
1388 tlb_flush_entry(&env->tlb_table[3][i], addr);
1389 #endif
1390 #endif
1392 /* Discard jump cache entries for any tb which might potentially
1393 overlap the flushed page. */
1394 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1395 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1397 i = tb_jmp_cache_hash_page(addr);
1398 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1400 #if !defined(CONFIG_SOFTMMU)
1401 if (addr < MMAP_AREA_END)
1402 munmap((void *)addr, TARGET_PAGE_SIZE);
1403 #endif
1404 #ifdef USE_KQEMU
1405 if (env->kqemu_enabled) {
1406 kqemu_flush_page(env, addr);
1408 #endif
1411 /* update the TLBs so that writes to code in the virtual page 'addr'
1412 can be detected */
1413 static void tlb_protect_code(ram_addr_t ram_addr)
1415 cpu_physical_memory_reset_dirty(ram_addr,
1416 ram_addr + TARGET_PAGE_SIZE,
1417 CODE_DIRTY_FLAG);
1420 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1421 tested for self modifying code */
1422 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1423 target_ulong vaddr)
1425 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1428 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1429 unsigned long start, unsigned long length)
1431 unsigned long addr;
1432 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1433 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1434 if ((addr - start) < length) {
1435 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1440 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1441 int dirty_flags)
1443 CPUState *env;
1444 unsigned long length, start1;
1445 int i, mask, len;
1446 uint8_t *p;
1448 start &= TARGET_PAGE_MASK;
1449 end = TARGET_PAGE_ALIGN(end);
1451 length = end - start;
1452 if (length == 0)
1453 return;
1454 len = length >> TARGET_PAGE_BITS;
1455 #ifdef USE_KQEMU
1456 /* XXX: should not depend on cpu context */
1457 env = first_cpu;
1458 if (env->kqemu_enabled) {
1459 ram_addr_t addr;
1460 addr = start;
1461 for(i = 0; i < len; i++) {
1462 kqemu_set_notdirty(env, addr);
1463 addr += TARGET_PAGE_SIZE;
1466 #endif
1467 mask = ~dirty_flags;
1468 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1469 for(i = 0; i < len; i++)
1470 p[i] &= mask;
1472 /* we modify the TLB cache so that the dirty bit will be set again
1473 when accessing the range */
1474 start1 = start + (unsigned long)phys_ram_base;
1475 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1476 for(i = 0; i < CPU_TLB_SIZE; i++)
1477 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1478 for(i = 0; i < CPU_TLB_SIZE; i++)
1479 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1480 #if (NB_MMU_MODES >= 3)
1481 for(i = 0; i < CPU_TLB_SIZE; i++)
1482 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1483 #if (NB_MMU_MODES == 4)
1484 for(i = 0; i < CPU_TLB_SIZE; i++)
1485 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1486 #endif
1487 #endif
1490 #if !defined(CONFIG_SOFTMMU)
1491 /* XXX: this is expensive */
1493 VirtPageDesc *p;
1494 int j;
1495 target_ulong addr;
1497 for(i = 0; i < L1_SIZE; i++) {
1498 p = l1_virt_map[i];
1499 if (p) {
1500 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1501 for(j = 0; j < L2_SIZE; j++) {
1502 if (p->valid_tag == virt_valid_tag &&
1503 p->phys_addr >= start && p->phys_addr < end &&
1504 (p->prot & PROT_WRITE)) {
1505 if (addr < MMAP_AREA_END) {
1506 mprotect((void *)addr, TARGET_PAGE_SIZE,
1507 p->prot & ~PROT_WRITE);
1510 addr += TARGET_PAGE_SIZE;
1511 p++;
1516 #endif
1519 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1521 ram_addr_t ram_addr;
1523 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1524 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1525 tlb_entry->addend - (unsigned long)phys_ram_base;
1526 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1527 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1532 /* update the TLB according to the current state of the dirty bits */
1533 void cpu_tlb_update_dirty(CPUState *env)
1535 int i;
1536 for(i = 0; i < CPU_TLB_SIZE; i++)
1537 tlb_update_dirty(&env->tlb_table[0][i]);
1538 for(i = 0; i < CPU_TLB_SIZE; i++)
1539 tlb_update_dirty(&env->tlb_table[1][i]);
1540 #if (NB_MMU_MODES >= 3)
1541 for(i = 0; i < CPU_TLB_SIZE; i++)
1542 tlb_update_dirty(&env->tlb_table[2][i]);
1543 #if (NB_MMU_MODES == 4)
1544 for(i = 0; i < CPU_TLB_SIZE; i++)
1545 tlb_update_dirty(&env->tlb_table[3][i]);
1546 #endif
1547 #endif
1550 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1551 unsigned long start)
1553 unsigned long addr;
1554 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1555 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1556 if (addr == start) {
1557 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1562 /* update the TLB corresponding to virtual page vaddr and phys addr
1563 addr so that it is no longer dirty */
1564 static inline void tlb_set_dirty(CPUState *env,
1565 unsigned long addr, target_ulong vaddr)
1567 int i;
1569 addr &= TARGET_PAGE_MASK;
1570 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1571 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1572 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1573 #if (NB_MMU_MODES >= 3)
1574 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1575 #if (NB_MMU_MODES == 4)
1576 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1577 #endif
1578 #endif
1581 /* add a new TLB entry. At most one entry for a given virtual address
1582 is permitted. Return 0 if OK or 2 if the page could not be mapped
1583 (can only happen in non SOFTMMU mode for I/O pages or pages
1584 conflicting with the host address space). */
1585 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1586 target_phys_addr_t paddr, int prot,
1587 int is_user, int is_softmmu)
1589 PhysPageDesc *p;
1590 unsigned long pd;
1591 unsigned int index;
1592 target_ulong address;
1593 target_phys_addr_t addend;
1594 int ret;
1595 CPUTLBEntry *te;
1596 int i;
1598 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1599 if (!p) {
1600 pd = IO_MEM_UNASSIGNED;
1601 } else {
1602 pd = p->phys_offset;
1604 #if defined(DEBUG_TLB)
1605 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1606 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1607 #endif
1609 ret = 0;
1610 #if !defined(CONFIG_SOFTMMU)
1611 if (is_softmmu)
1612 #endif
1614 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1615 /* IO memory case */
1616 address = vaddr | pd;
1617 addend = paddr;
1618 } else {
1619 /* standard memory */
1620 address = vaddr;
1621 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1624 /* Make accesses to pages with watchpoints go via the
1625 watchpoint trap routines. */
1626 for (i = 0; i < env->nb_watchpoints; i++) {
1627 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1628 if (address & ~TARGET_PAGE_MASK) {
1629 env->watchpoint[i].is_ram = 0;
1630 address = vaddr | io_mem_watch;
1631 } else {
1632 env->watchpoint[i].is_ram = 1;
1633 /* TODO: Figure out how to make read watchpoints coexist
1634 with code. */
1635 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1640 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1641 addend -= vaddr;
1642 te = &env->tlb_table[is_user][index];
1643 te->addend = addend;
1644 if (prot & PAGE_READ) {
1645 te->addr_read = address;
1646 } else {
1647 te->addr_read = -1;
1649 if (prot & PAGE_EXEC) {
1650 te->addr_code = address;
1651 } else {
1652 te->addr_code = -1;
1654 if (prot & PAGE_WRITE) {
1655 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1656 (pd & IO_MEM_ROMD)) {
1657 /* write access calls the I/O callback */
1658 te->addr_write = vaddr |
1659 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1660 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1661 !cpu_physical_memory_is_dirty(pd)) {
1662 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1663 } else {
1664 te->addr_write = address;
1666 } else {
1667 te->addr_write = -1;
1670 #if !defined(CONFIG_SOFTMMU)
1671 else {
1672 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1673 /* IO access: no mapping is done as it will be handled by the
1674 soft MMU */
1675 if (!(env->hflags & HF_SOFTMMU_MASK))
1676 ret = 2;
1677 } else {
1678 void *map_addr;
1680 if (vaddr >= MMAP_AREA_END) {
1681 ret = 2;
1682 } else {
1683 if (prot & PROT_WRITE) {
1684 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1685 #if defined(TARGET_HAS_SMC) || 1
1686 first_tb ||
1687 #endif
1688 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1689 !cpu_physical_memory_is_dirty(pd))) {
1690 /* ROM: we do as if code was inside */
1691 /* if code is present, we only map as read only and save the
1692 original mapping */
1693 VirtPageDesc *vp;
1695 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1696 vp->phys_addr = pd;
1697 vp->prot = prot;
1698 vp->valid_tag = virt_valid_tag;
1699 prot &= ~PAGE_WRITE;
1702 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1703 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1704 if (map_addr == MAP_FAILED) {
1705 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1706 paddr, vaddr);
1711 #endif
1712 return ret;
1715 /* called from signal handler: invalidate the code and unprotect the
1716 page. Return TRUE if the fault was succesfully handled. */
1717 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1719 #if !defined(CONFIG_SOFTMMU)
1720 VirtPageDesc *vp;
1722 #if defined(DEBUG_TLB)
1723 printf("page_unprotect: addr=0x%08x\n", addr);
1724 #endif
1725 addr &= TARGET_PAGE_MASK;
1727 /* if it is not mapped, no need to worry here */
1728 if (addr >= MMAP_AREA_END)
1729 return 0;
1730 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1731 if (!vp)
1732 return 0;
1733 /* NOTE: in this case, validate_tag is _not_ tested as it
1734 validates only the code TLB */
1735 if (vp->valid_tag != virt_valid_tag)
1736 return 0;
1737 if (!(vp->prot & PAGE_WRITE))
1738 return 0;
1739 #if defined(DEBUG_TLB)
1740 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1741 addr, vp->phys_addr, vp->prot);
1742 #endif
1743 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1744 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1745 (unsigned long)addr, vp->prot);
1746 /* set the dirty bit */
1747 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1748 /* flush the code inside */
1749 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1750 return 1;
1751 #else
1752 return 0;
1753 #endif
1756 #else
1758 void tlb_flush(CPUState *env, int flush_global)
1762 void tlb_flush_page(CPUState *env, target_ulong addr)
1766 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1767 target_phys_addr_t paddr, int prot,
1768 int is_user, int is_softmmu)
1770 return 0;
1773 /* dump memory mappings */
1774 void page_dump(FILE *f)
1776 unsigned long start, end;
1777 int i, j, prot, prot1;
1778 PageDesc *p;
1780 fprintf(f, "%-8s %-8s %-8s %s\n",
1781 "start", "end", "size", "prot");
1782 start = -1;
1783 end = -1;
1784 prot = 0;
1785 for(i = 0; i <= L1_SIZE; i++) {
1786 if (i < L1_SIZE)
1787 p = l1_map[i];
1788 else
1789 p = NULL;
1790 for(j = 0;j < L2_SIZE; j++) {
1791 if (!p)
1792 prot1 = 0;
1793 else
1794 prot1 = p[j].flags;
1795 if (prot1 != prot) {
1796 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1797 if (start != -1) {
1798 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1799 start, end, end - start,
1800 prot & PAGE_READ ? 'r' : '-',
1801 prot & PAGE_WRITE ? 'w' : '-',
1802 prot & PAGE_EXEC ? 'x' : '-');
1804 if (prot1 != 0)
1805 start = end;
1806 else
1807 start = -1;
1808 prot = prot1;
1810 if (!p)
1811 break;
1816 int page_get_flags(target_ulong address)
1818 PageDesc *p;
1820 p = page_find(address >> TARGET_PAGE_BITS);
1821 if (!p)
1822 return 0;
1823 return p->flags;
1826 /* modify the flags of a page and invalidate the code if
1827 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1828 depending on PAGE_WRITE */
1829 void page_set_flags(target_ulong start, target_ulong end, int flags)
1831 PageDesc *p;
1832 target_ulong addr;
1834 start = start & TARGET_PAGE_MASK;
1835 end = TARGET_PAGE_ALIGN(end);
1836 if (flags & PAGE_WRITE)
1837 flags |= PAGE_WRITE_ORG;
1838 spin_lock(&tb_lock);
1839 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1840 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1841 /* if the write protection is set, then we invalidate the code
1842 inside */
1843 if (!(p->flags & PAGE_WRITE) &&
1844 (flags & PAGE_WRITE) &&
1845 p->first_tb) {
1846 tb_invalidate_phys_page(addr, 0, NULL);
1848 p->flags = flags;
1850 spin_unlock(&tb_lock);
1853 /* called from signal handler: invalidate the code and unprotect the
1854 page. Return TRUE if the fault was succesfully handled. */
1855 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1857 unsigned int page_index, prot, pindex;
1858 PageDesc *p, *p1;
1859 target_ulong host_start, host_end, addr;
1861 host_start = address & qemu_host_page_mask;
1862 page_index = host_start >> TARGET_PAGE_BITS;
1863 p1 = page_find(page_index);
1864 if (!p1)
1865 return 0;
1866 host_end = host_start + qemu_host_page_size;
1867 p = p1;
1868 prot = 0;
1869 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1870 prot |= p->flags;
1871 p++;
1873 /* if the page was really writable, then we change its
1874 protection back to writable */
1875 if (prot & PAGE_WRITE_ORG) {
1876 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1877 if (!(p1[pindex].flags & PAGE_WRITE)) {
1878 mprotect((void *)g2h(host_start), qemu_host_page_size,
1879 (prot & PAGE_BITS) | PAGE_WRITE);
1880 p1[pindex].flags |= PAGE_WRITE;
1881 /* and since the content will be modified, we must invalidate
1882 the corresponding translated code. */
1883 tb_invalidate_phys_page(address, pc, puc);
1884 #ifdef DEBUG_TB_CHECK
1885 tb_invalidate_check(address);
1886 #endif
1887 return 1;
1890 return 0;
1893 /* call this function when system calls directly modify a memory area */
1894 /* ??? This should be redundant now we have lock_user. */
1895 void page_unprotect_range(target_ulong data, target_ulong data_size)
1897 target_ulong start, end, addr;
1899 start = data;
1900 end = start + data_size;
1901 start &= TARGET_PAGE_MASK;
1902 end = TARGET_PAGE_ALIGN(end);
1903 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1904 page_unprotect(addr, 0, NULL);
1908 static inline void tlb_set_dirty(CPUState *env,
1909 unsigned long addr, target_ulong vaddr)
1912 #endif /* defined(CONFIG_USER_ONLY) */
1914 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1915 int memory);
1916 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1917 int orig_memory);
1918 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1919 need_subpage) \
1920 do { \
1921 if (addr > start_addr) \
1922 start_addr2 = 0; \
1923 else { \
1924 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1925 if (start_addr2 > 0) \
1926 need_subpage = 1; \
1929 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
1930 end_addr2 = TARGET_PAGE_SIZE - 1; \
1931 else { \
1932 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1933 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
1934 need_subpage = 1; \
1936 } while (0)
1938 /* register physical memory. 'size' must be a multiple of the target
1939 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1940 io memory page */
1941 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1942 unsigned long size,
1943 unsigned long phys_offset)
1945 target_phys_addr_t addr, end_addr;
1946 PhysPageDesc *p;
1947 CPUState *env;
1948 unsigned long orig_size = size;
1949 void *subpage;
1951 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1952 end_addr = start_addr + (target_phys_addr_t)size;
1953 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1954 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1955 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
1956 unsigned long orig_memory = p->phys_offset;
1957 target_phys_addr_t start_addr2, end_addr2;
1958 int need_subpage = 0;
1960 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
1961 need_subpage);
1962 if (need_subpage) {
1963 if (!(orig_memory & IO_MEM_SUBPAGE)) {
1964 subpage = subpage_init((addr & TARGET_PAGE_MASK),
1965 &p->phys_offset, orig_memory);
1966 } else {
1967 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
1968 >> IO_MEM_SHIFT];
1970 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
1971 } else {
1972 p->phys_offset = phys_offset;
1973 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1974 (phys_offset & IO_MEM_ROMD))
1975 phys_offset += TARGET_PAGE_SIZE;
1977 } else {
1978 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1979 p->phys_offset = phys_offset;
1980 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1981 (phys_offset & IO_MEM_ROMD))
1982 phys_offset += TARGET_PAGE_SIZE;
1983 else {
1984 target_phys_addr_t start_addr2, end_addr2;
1985 int need_subpage = 0;
1987 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
1988 end_addr2, need_subpage);
1990 if (need_subpage) {
1991 subpage = subpage_init((addr & TARGET_PAGE_MASK),
1992 &p->phys_offset, IO_MEM_UNASSIGNED);
1993 subpage_register(subpage, start_addr2, end_addr2,
1994 phys_offset);
2000 /* since each CPU stores ram addresses in its TLB cache, we must
2001 reset the modified entries */
2002 /* XXX: slow ! */
2003 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2004 tlb_flush(env, 1);
2008 /* XXX: temporary until new memory mapping API */
2009 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2011 PhysPageDesc *p;
2013 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2014 if (!p)
2015 return IO_MEM_UNASSIGNED;
2016 return p->phys_offset;
2019 /* XXX: better than nothing */
2020 ram_addr_t qemu_ram_alloc(unsigned int size)
2022 ram_addr_t addr;
2023 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2024 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2025 size, phys_ram_size);
2026 abort();
2028 addr = phys_ram_alloc_offset;
2029 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2030 return addr;
2033 void qemu_ram_free(ram_addr_t addr)
2037 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2039 #ifdef DEBUG_UNASSIGNED
2040 printf("Unassigned mem read " TARGET_FMT_lx "\n", addr);
2041 #endif
2042 #ifdef TARGET_SPARC
2043 do_unassigned_access(addr, 0, 0, 0);
2044 #endif
2045 return 0;
2048 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2050 #ifdef DEBUG_UNASSIGNED
2051 printf("Unassigned mem write " TARGET_FMT_lx " = 0x%x\n", addr, val);
2052 #endif
2053 #ifdef TARGET_SPARC
2054 do_unassigned_access(addr, 1, 0, 0);
2055 #endif
2058 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2059 unassigned_mem_readb,
2060 unassigned_mem_readb,
2061 unassigned_mem_readb,
2064 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2065 unassigned_mem_writeb,
2066 unassigned_mem_writeb,
2067 unassigned_mem_writeb,
2070 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2072 unsigned long ram_addr;
2073 int dirty_flags;
2074 ram_addr = addr - (unsigned long)phys_ram_base;
2075 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2076 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2077 #if !defined(CONFIG_USER_ONLY)
2078 tb_invalidate_phys_page_fast(ram_addr, 1);
2079 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2080 #endif
2082 stb_p((uint8_t *)(long)addr, val);
2083 #ifdef USE_KQEMU
2084 if (cpu_single_env->kqemu_enabled &&
2085 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2086 kqemu_modify_page(cpu_single_env, ram_addr);
2087 #endif
2088 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2089 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2090 /* we remove the notdirty callback only if the code has been
2091 flushed */
2092 if (dirty_flags == 0xff)
2093 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2096 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2098 unsigned long ram_addr;
2099 int dirty_flags;
2100 ram_addr = addr - (unsigned long)phys_ram_base;
2101 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2102 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2103 #if !defined(CONFIG_USER_ONLY)
2104 tb_invalidate_phys_page_fast(ram_addr, 2);
2105 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2106 #endif
2108 stw_p((uint8_t *)(long)addr, val);
2109 #ifdef USE_KQEMU
2110 if (cpu_single_env->kqemu_enabled &&
2111 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2112 kqemu_modify_page(cpu_single_env, ram_addr);
2113 #endif
2114 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2115 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2116 /* we remove the notdirty callback only if the code has been
2117 flushed */
2118 if (dirty_flags == 0xff)
2119 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2122 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2124 unsigned long ram_addr;
2125 int dirty_flags;
2126 ram_addr = addr - (unsigned long)phys_ram_base;
2127 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2128 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2129 #if !defined(CONFIG_USER_ONLY)
2130 tb_invalidate_phys_page_fast(ram_addr, 4);
2131 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2132 #endif
2134 stl_p((uint8_t *)(long)addr, val);
2135 #ifdef USE_KQEMU
2136 if (cpu_single_env->kqemu_enabled &&
2137 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2138 kqemu_modify_page(cpu_single_env, ram_addr);
2139 #endif
2140 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2141 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2142 /* we remove the notdirty callback only if the code has been
2143 flushed */
2144 if (dirty_flags == 0xff)
2145 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2148 static CPUReadMemoryFunc *error_mem_read[3] = {
2149 NULL, /* never used */
2150 NULL, /* never used */
2151 NULL, /* never used */
2154 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2155 notdirty_mem_writeb,
2156 notdirty_mem_writew,
2157 notdirty_mem_writel,
2160 #if defined(CONFIG_SOFTMMU)
2161 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2162 so these check for a hit then pass through to the normal out-of-line
2163 phys routines. */
2164 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2166 return ldub_phys(addr);
2169 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2171 return lduw_phys(addr);
2174 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2176 return ldl_phys(addr);
2179 /* Generate a debug exception if a watchpoint has been hit.
2180 Returns the real physical address of the access. addr will be a host
2181 address in the is_ram case. */
2182 static target_ulong check_watchpoint(target_phys_addr_t addr)
2184 CPUState *env = cpu_single_env;
2185 target_ulong watch;
2186 target_ulong retaddr;
2187 int i;
2189 retaddr = addr;
2190 for (i = 0; i < env->nb_watchpoints; i++) {
2191 watch = env->watchpoint[i].vaddr;
2192 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2193 if (env->watchpoint[i].is_ram)
2194 retaddr = addr - (unsigned long)phys_ram_base;
2195 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2196 cpu_single_env->watchpoint_hit = i + 1;
2197 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2198 break;
2202 return retaddr;
2205 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2206 uint32_t val)
2208 addr = check_watchpoint(addr);
2209 stb_phys(addr, val);
2212 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2213 uint32_t val)
2215 addr = check_watchpoint(addr);
2216 stw_phys(addr, val);
2219 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2220 uint32_t val)
2222 addr = check_watchpoint(addr);
2223 stl_phys(addr, val);
2226 static CPUReadMemoryFunc *watch_mem_read[3] = {
2227 watch_mem_readb,
2228 watch_mem_readw,
2229 watch_mem_readl,
2232 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2233 watch_mem_writeb,
2234 watch_mem_writew,
2235 watch_mem_writel,
2237 #endif
2239 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2240 unsigned int len)
2242 CPUReadMemoryFunc **mem_read;
2243 uint32_t ret;
2244 unsigned int idx;
2246 idx = SUBPAGE_IDX(addr - mmio->base);
2247 #if defined(DEBUG_SUBPAGE)
2248 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2249 mmio, len, addr, idx);
2250 #endif
2251 mem_read = mmio->mem_read[idx];
2252 ret = (*mem_read[len])(mmio->opaque[idx], addr);
2254 return ret;
2257 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2258 uint32_t value, unsigned int len)
2260 CPUWriteMemoryFunc **mem_write;
2261 unsigned int idx;
2263 idx = SUBPAGE_IDX(addr - mmio->base);
2264 #if defined(DEBUG_SUBPAGE)
2265 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2266 mmio, len, addr, idx, value);
2267 #endif
2268 mem_write = mmio->mem_write[idx];
2269 (*mem_write[len])(mmio->opaque[idx], addr, value);
2272 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2274 #if defined(DEBUG_SUBPAGE)
2275 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2276 #endif
2278 return subpage_readlen(opaque, addr, 0);
2281 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2282 uint32_t value)
2284 #if defined(DEBUG_SUBPAGE)
2285 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2286 #endif
2287 subpage_writelen(opaque, addr, value, 0);
2290 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2292 #if defined(DEBUG_SUBPAGE)
2293 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2294 #endif
2296 return subpage_readlen(opaque, addr, 1);
2299 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2300 uint32_t value)
2302 #if defined(DEBUG_SUBPAGE)
2303 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2304 #endif
2305 subpage_writelen(opaque, addr, value, 1);
2308 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2310 #if defined(DEBUG_SUBPAGE)
2311 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2312 #endif
2314 return subpage_readlen(opaque, addr, 2);
2317 static void subpage_writel (void *opaque,
2318 target_phys_addr_t addr, uint32_t value)
2320 #if defined(DEBUG_SUBPAGE)
2321 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2322 #endif
2323 subpage_writelen(opaque, addr, value, 2);
2326 static CPUReadMemoryFunc *subpage_read[] = {
2327 &subpage_readb,
2328 &subpage_readw,
2329 &subpage_readl,
2332 static CPUWriteMemoryFunc *subpage_write[] = {
2333 &subpage_writeb,
2334 &subpage_writew,
2335 &subpage_writel,
2338 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2339 int memory)
2341 int idx, eidx;
2343 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2344 return -1;
2345 idx = SUBPAGE_IDX(start);
2346 eidx = SUBPAGE_IDX(end);
2347 #if defined(DEBUG_SUBPAGE)
2348 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2349 mmio, start, end, idx, eidx, memory);
2350 #endif
2351 memory >>= IO_MEM_SHIFT;
2352 for (; idx <= eidx; idx++) {
2353 mmio->mem_read[idx] = io_mem_read[memory];
2354 mmio->mem_write[idx] = io_mem_write[memory];
2355 mmio->opaque[idx] = io_mem_opaque[memory];
2358 return 0;
2361 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2362 int orig_memory)
2364 subpage_t *mmio;
2365 int subpage_memory;
2367 mmio = qemu_mallocz(sizeof(subpage_t));
2368 if (mmio != NULL) {
2369 mmio->base = base;
2370 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2371 #if defined(DEBUG_SUBPAGE)
2372 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2373 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2374 #endif
2375 *phys = subpage_memory | IO_MEM_SUBPAGE;
2376 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2379 return mmio;
2382 static void io_mem_init(void)
2384 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2385 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2386 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2387 io_mem_nb = 5;
2389 #if defined(CONFIG_SOFTMMU)
2390 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2391 watch_mem_write, NULL);
2392 #endif
2393 /* alloc dirty bits array */
2394 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2395 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2398 /* mem_read and mem_write are arrays of functions containing the
2399 function to access byte (index 0), word (index 1) and dword (index
2400 2). All functions must be supplied. If io_index is non zero, the
2401 corresponding io zone is modified. If it is zero, a new io zone is
2402 allocated. The return value can be used with
2403 cpu_register_physical_memory(). (-1) is returned if error. */
2404 int cpu_register_io_memory(int io_index,
2405 CPUReadMemoryFunc **mem_read,
2406 CPUWriteMemoryFunc **mem_write,
2407 void *opaque)
2409 int i;
2411 if (io_index <= 0) {
2412 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2413 return -1;
2414 io_index = io_mem_nb++;
2415 } else {
2416 if (io_index >= IO_MEM_NB_ENTRIES)
2417 return -1;
2420 for(i = 0;i < 3; i++) {
2421 io_mem_read[io_index][i] = mem_read[i];
2422 io_mem_write[io_index][i] = mem_write[i];
2424 io_mem_opaque[io_index] = opaque;
2425 return io_index << IO_MEM_SHIFT;
2428 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2430 return io_mem_write[io_index >> IO_MEM_SHIFT];
2433 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2435 return io_mem_read[io_index >> IO_MEM_SHIFT];
2438 /* physical memory access (slow version, mainly for debug) */
2439 #if defined(CONFIG_USER_ONLY)
2440 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2441 int len, int is_write)
2443 int l, flags;
2444 target_ulong page;
2445 void * p;
2447 while (len > 0) {
2448 page = addr & TARGET_PAGE_MASK;
2449 l = (page + TARGET_PAGE_SIZE) - addr;
2450 if (l > len)
2451 l = len;
2452 flags = page_get_flags(page);
2453 if (!(flags & PAGE_VALID))
2454 return;
2455 if (is_write) {
2456 if (!(flags & PAGE_WRITE))
2457 return;
2458 p = lock_user(addr, len, 0);
2459 memcpy(p, buf, len);
2460 unlock_user(p, addr, len);
2461 } else {
2462 if (!(flags & PAGE_READ))
2463 return;
2464 p = lock_user(addr, len, 1);
2465 memcpy(buf, p, len);
2466 unlock_user(p, addr, 0);
2468 len -= l;
2469 buf += l;
2470 addr += l;
2474 #else
2475 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2476 int len, int is_write)
2478 int l, io_index;
2479 uint8_t *ptr;
2480 uint32_t val;
2481 target_phys_addr_t page;
2482 unsigned long pd;
2483 PhysPageDesc *p;
2485 while (len > 0) {
2486 page = addr & TARGET_PAGE_MASK;
2487 l = (page + TARGET_PAGE_SIZE) - addr;
2488 if (l > len)
2489 l = len;
2490 p = phys_page_find(page >> TARGET_PAGE_BITS);
2491 if (!p) {
2492 pd = IO_MEM_UNASSIGNED;
2493 } else {
2494 pd = p->phys_offset;
2497 if (is_write) {
2498 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2499 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2500 /* XXX: could force cpu_single_env to NULL to avoid
2501 potential bugs */
2502 if (l >= 4 && ((addr & 3) == 0)) {
2503 /* 32 bit write access */
2504 val = ldl_p(buf);
2505 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2506 l = 4;
2507 } else if (l >= 2 && ((addr & 1) == 0)) {
2508 /* 16 bit write access */
2509 val = lduw_p(buf);
2510 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2511 l = 2;
2512 } else {
2513 /* 8 bit write access */
2514 val = ldub_p(buf);
2515 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2516 l = 1;
2518 } else {
2519 unsigned long addr1;
2520 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2521 /* RAM case */
2522 ptr = phys_ram_base + addr1;
2523 memcpy(ptr, buf, l);
2524 if (!cpu_physical_memory_is_dirty(addr1)) {
2525 /* invalidate code */
2526 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2527 /* set dirty bit */
2528 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2529 (0xff & ~CODE_DIRTY_FLAG);
2532 } else {
2533 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2534 !(pd & IO_MEM_ROMD)) {
2535 /* I/O case */
2536 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2537 if (l >= 4 && ((addr & 3) == 0)) {
2538 /* 32 bit read access */
2539 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2540 stl_p(buf, val);
2541 l = 4;
2542 } else if (l >= 2 && ((addr & 1) == 0)) {
2543 /* 16 bit read access */
2544 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2545 stw_p(buf, val);
2546 l = 2;
2547 } else {
2548 /* 8 bit read access */
2549 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2550 stb_p(buf, val);
2551 l = 1;
2553 } else {
2554 /* RAM case */
2555 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2556 (addr & ~TARGET_PAGE_MASK);
2557 memcpy(buf, ptr, l);
2560 len -= l;
2561 buf += l;
2562 addr += l;
2566 /* used for ROM loading : can write in RAM and ROM */
2567 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2568 const uint8_t *buf, int len)
2570 int l;
2571 uint8_t *ptr;
2572 target_phys_addr_t page;
2573 unsigned long pd;
2574 PhysPageDesc *p;
2576 while (len > 0) {
2577 page = addr & TARGET_PAGE_MASK;
2578 l = (page + TARGET_PAGE_SIZE) - addr;
2579 if (l > len)
2580 l = len;
2581 p = phys_page_find(page >> TARGET_PAGE_BITS);
2582 if (!p) {
2583 pd = IO_MEM_UNASSIGNED;
2584 } else {
2585 pd = p->phys_offset;
2588 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2589 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2590 !(pd & IO_MEM_ROMD)) {
2591 /* do nothing */
2592 } else {
2593 unsigned long addr1;
2594 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2595 /* ROM/RAM case */
2596 ptr = phys_ram_base + addr1;
2597 memcpy(ptr, buf, l);
2599 len -= l;
2600 buf += l;
2601 addr += l;
2606 /* warning: addr must be aligned */
2607 uint32_t ldl_phys(target_phys_addr_t addr)
2609 int io_index;
2610 uint8_t *ptr;
2611 uint32_t val;
2612 unsigned long pd;
2613 PhysPageDesc *p;
2615 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2616 if (!p) {
2617 pd = IO_MEM_UNASSIGNED;
2618 } else {
2619 pd = p->phys_offset;
2622 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2623 !(pd & IO_MEM_ROMD)) {
2624 /* I/O case */
2625 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2626 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2627 } else {
2628 /* RAM case */
2629 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2630 (addr & ~TARGET_PAGE_MASK);
2631 val = ldl_p(ptr);
2633 return val;
2636 /* warning: addr must be aligned */
2637 uint64_t ldq_phys(target_phys_addr_t addr)
2639 int io_index;
2640 uint8_t *ptr;
2641 uint64_t val;
2642 unsigned long pd;
2643 PhysPageDesc *p;
2645 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2646 if (!p) {
2647 pd = IO_MEM_UNASSIGNED;
2648 } else {
2649 pd = p->phys_offset;
2652 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2653 !(pd & IO_MEM_ROMD)) {
2654 /* I/O case */
2655 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2656 #ifdef TARGET_WORDS_BIGENDIAN
2657 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2658 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2659 #else
2660 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2661 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2662 #endif
2663 } else {
2664 /* RAM case */
2665 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2666 (addr & ~TARGET_PAGE_MASK);
2667 val = ldq_p(ptr);
2669 return val;
2672 /* XXX: optimize */
2673 uint32_t ldub_phys(target_phys_addr_t addr)
2675 uint8_t val;
2676 cpu_physical_memory_read(addr, &val, 1);
2677 return val;
2680 /* XXX: optimize */
2681 uint32_t lduw_phys(target_phys_addr_t addr)
2683 uint16_t val;
2684 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2685 return tswap16(val);
2688 /* warning: addr must be aligned. The ram page is not masked as dirty
2689 and the code inside is not invalidated. It is useful if the dirty
2690 bits are used to track modified PTEs */
2691 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2693 int io_index;
2694 uint8_t *ptr;
2695 unsigned long pd;
2696 PhysPageDesc *p;
2698 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2699 if (!p) {
2700 pd = IO_MEM_UNASSIGNED;
2701 } else {
2702 pd = p->phys_offset;
2705 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2706 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2707 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2708 } else {
2709 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2710 (addr & ~TARGET_PAGE_MASK);
2711 stl_p(ptr, val);
2715 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2717 int io_index;
2718 uint8_t *ptr;
2719 unsigned long pd;
2720 PhysPageDesc *p;
2722 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2723 if (!p) {
2724 pd = IO_MEM_UNASSIGNED;
2725 } else {
2726 pd = p->phys_offset;
2729 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2730 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2731 #ifdef TARGET_WORDS_BIGENDIAN
2732 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2733 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2734 #else
2735 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2736 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2737 #endif
2738 } else {
2739 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2740 (addr & ~TARGET_PAGE_MASK);
2741 stq_p(ptr, val);
2745 /* warning: addr must be aligned */
2746 void stl_phys(target_phys_addr_t addr, uint32_t val)
2748 int io_index;
2749 uint8_t *ptr;
2750 unsigned long pd;
2751 PhysPageDesc *p;
2753 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2754 if (!p) {
2755 pd = IO_MEM_UNASSIGNED;
2756 } else {
2757 pd = p->phys_offset;
2760 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2761 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2762 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2763 } else {
2764 unsigned long addr1;
2765 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2766 /* RAM case */
2767 ptr = phys_ram_base + addr1;
2768 stl_p(ptr, val);
2769 if (!cpu_physical_memory_is_dirty(addr1)) {
2770 /* invalidate code */
2771 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2772 /* set dirty bit */
2773 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2774 (0xff & ~CODE_DIRTY_FLAG);
2779 /* XXX: optimize */
2780 void stb_phys(target_phys_addr_t addr, uint32_t val)
2782 uint8_t v = val;
2783 cpu_physical_memory_write(addr, &v, 1);
2786 /* XXX: optimize */
2787 void stw_phys(target_phys_addr_t addr, uint32_t val)
2789 uint16_t v = tswap16(val);
2790 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2793 /* XXX: optimize */
2794 void stq_phys(target_phys_addr_t addr, uint64_t val)
2796 val = tswap64(val);
2797 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2800 #endif
2802 /* virtual memory access for debug */
2803 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2804 uint8_t *buf, int len, int is_write)
2806 int l;
2807 target_phys_addr_t phys_addr;
2808 target_ulong page;
2810 while (len > 0) {
2811 page = addr & TARGET_PAGE_MASK;
2812 phys_addr = cpu_get_phys_page_debug(env, page);
2813 /* if no physical page mapped, return an error */
2814 if (phys_addr == -1)
2815 return -1;
2816 l = (page + TARGET_PAGE_SIZE) - addr;
2817 if (l > len)
2818 l = len;
2819 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2820 buf, l, is_write);
2821 len -= l;
2822 buf += l;
2823 addr += l;
2825 return 0;
2828 void dump_exec_info(FILE *f,
2829 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2831 int i, target_code_size, max_target_code_size;
2832 int direct_jmp_count, direct_jmp2_count, cross_page;
2833 TranslationBlock *tb;
2835 target_code_size = 0;
2836 max_target_code_size = 0;
2837 cross_page = 0;
2838 direct_jmp_count = 0;
2839 direct_jmp2_count = 0;
2840 for(i = 0; i < nb_tbs; i++) {
2841 tb = &tbs[i];
2842 target_code_size += tb->size;
2843 if (tb->size > max_target_code_size)
2844 max_target_code_size = tb->size;
2845 if (tb->page_addr[1] != -1)
2846 cross_page++;
2847 if (tb->tb_next_offset[0] != 0xffff) {
2848 direct_jmp_count++;
2849 if (tb->tb_next_offset[1] != 0xffff) {
2850 direct_jmp2_count++;
2854 /* XXX: avoid using doubles ? */
2855 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2856 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2857 nb_tbs ? target_code_size / nb_tbs : 0,
2858 max_target_code_size);
2859 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2860 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2861 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2862 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2863 cross_page,
2864 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2865 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2866 direct_jmp_count,
2867 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2868 direct_jmp2_count,
2869 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2870 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2871 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2872 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2875 #if !defined(CONFIG_USER_ONLY)
2877 #define MMUSUFFIX _cmmu
2878 #define GETPC() NULL
2879 #define env cpu_single_env
2880 #define SOFTMMU_CODE_ACCESS
2882 #define SHIFT 0
2883 #include "softmmu_template.h"
2885 #define SHIFT 1
2886 #include "softmmu_template.h"
2888 #define SHIFT 2
2889 #include "softmmu_template.h"
2891 #define SHIFT 3
2892 #include "softmmu_template.h"
2894 #undef env
2896 #endif