Remove leftover support for 82371FB (Step A1), by Carlo Marcelo Arenas
[qemu/qemu_0_9_1_stable.git] / exec.c
blob3fe340ae740c902e2f4234ee3e54e51152613a42
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
51 //#define DEBUG_SUBPAGE
53 #if !defined(CONFIG_USER_ONLY)
54 /* TB consistency checks only implemented for usermode emulation. */
55 #undef DEBUG_TB_CHECK
56 #endif
58 /* threshold to flush the translated code buffer */
59 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #else
76 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77 #define TARGET_PHYS_ADDR_SPACE_BITS 32
78 #endif
80 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
81 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
82 int nb_tbs;
83 /* any access to the tbs or the page table must use this lock */
84 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
86 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
87 uint8_t *code_gen_ptr;
89 int phys_ram_size;
90 int phys_ram_fd;
91 uint8_t *phys_ram_base;
92 uint8_t *phys_ram_dirty;
93 static ram_addr_t phys_ram_alloc_offset = 0;
95 CPUState *first_cpu;
96 /* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
98 CPUState *cpu_single_env;
100 typedef struct PageDesc {
101 /* list of TBs intersecting this ram page */
102 TranslationBlock *first_tb;
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count;
106 uint8_t *code_bitmap;
107 #if defined(CONFIG_USER_ONLY)
108 unsigned long flags;
109 #endif
110 } PageDesc;
112 typedef struct PhysPageDesc {
113 /* offset in host memory of the page + io_index in the low 12 bits */
114 uint32_t phys_offset;
115 } PhysPageDesc;
117 #define L2_BITS 10
118 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119 /* XXX: this is a temporary hack for alpha target.
120 * In the future, this is to be replaced by a multi-level table
121 * to actually be able to handle the complete 64 bits address space.
123 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124 #else
125 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
126 #endif
128 #define L1_SIZE (1 << L1_BITS)
129 #define L2_SIZE (1 << L2_BITS)
131 static void io_mem_init(void);
133 unsigned long qemu_real_host_page_size;
134 unsigned long qemu_host_page_bits;
135 unsigned long qemu_host_page_size;
136 unsigned long qemu_host_page_mask;
138 /* XXX: for system emulation, it could just be an array */
139 static PageDesc *l1_map[L1_SIZE];
140 PhysPageDesc **l1_phys_map;
142 /* io memory support */
143 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
144 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
145 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
146 static int io_mem_nb;
147 #if defined(CONFIG_SOFTMMU)
148 static int io_mem_watch;
149 #endif
151 /* log support */
152 char *logfilename = "/tmp/qemu.log";
153 FILE *logfile;
154 int loglevel;
155 static int log_append = 0;
157 /* statistics */
158 static int tlb_flush_count;
159 static int tb_flush_count;
160 static int tb_phys_invalidate_count;
162 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163 typedef struct subpage_t {
164 target_phys_addr_t base;
165 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
166 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
167 void *opaque[TARGET_PAGE_SIZE];
168 } subpage_t;
170 static void page_init(void)
172 /* NOTE: we can always suppose that qemu_host_page_size >=
173 TARGET_PAGE_SIZE */
174 #ifdef _WIN32
176 SYSTEM_INFO system_info;
177 DWORD old_protect;
179 GetSystemInfo(&system_info);
180 qemu_real_host_page_size = system_info.dwPageSize;
182 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
183 PAGE_EXECUTE_READWRITE, &old_protect);
185 #else
186 qemu_real_host_page_size = getpagesize();
188 unsigned long start, end;
190 start = (unsigned long)code_gen_buffer;
191 start &= ~(qemu_real_host_page_size - 1);
193 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
194 end += qemu_real_host_page_size - 1;
195 end &= ~(qemu_real_host_page_size - 1);
197 mprotect((void *)start, end - start,
198 PROT_READ | PROT_WRITE | PROT_EXEC);
200 #endif
202 if (qemu_host_page_size == 0)
203 qemu_host_page_size = qemu_real_host_page_size;
204 if (qemu_host_page_size < TARGET_PAGE_SIZE)
205 qemu_host_page_size = TARGET_PAGE_SIZE;
206 qemu_host_page_bits = 0;
207 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
208 qemu_host_page_bits++;
209 qemu_host_page_mask = ~(qemu_host_page_size - 1);
210 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
211 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
214 static inline PageDesc *page_find_alloc(unsigned int index)
216 PageDesc **lp, *p;
218 lp = &l1_map[index >> L2_BITS];
219 p = *lp;
220 if (!p) {
221 /* allocate if not found */
222 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
223 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
224 *lp = p;
226 return p + (index & (L2_SIZE - 1));
229 static inline PageDesc *page_find(unsigned int index)
231 PageDesc *p;
233 p = l1_map[index >> L2_BITS];
234 if (!p)
235 return 0;
236 return p + (index & (L2_SIZE - 1));
239 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
241 void **lp, **p;
242 PhysPageDesc *pd;
244 p = (void **)l1_phys_map;
245 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
247 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
248 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
249 #endif
250 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
251 p = *lp;
252 if (!p) {
253 /* allocate if not found */
254 if (!alloc)
255 return NULL;
256 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
257 memset(p, 0, sizeof(void *) * L1_SIZE);
258 *lp = p;
260 #endif
261 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
262 pd = *lp;
263 if (!pd) {
264 int i;
265 /* allocate if not found */
266 if (!alloc)
267 return NULL;
268 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
269 *lp = pd;
270 for (i = 0; i < L2_SIZE; i++)
271 pd[i].phys_offset = IO_MEM_UNASSIGNED;
273 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
276 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
278 return phys_page_find_alloc(index, 0);
281 #if !defined(CONFIG_USER_ONLY)
282 static void tlb_protect_code(ram_addr_t ram_addr);
283 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
284 target_ulong vaddr);
285 #endif
287 void cpu_exec_init(CPUState *env)
289 CPUState **penv;
290 int cpu_index;
292 if (!code_gen_ptr) {
293 code_gen_ptr = code_gen_buffer;
294 page_init();
295 io_mem_init();
297 env->next_cpu = NULL;
298 penv = &first_cpu;
299 cpu_index = 0;
300 while (*penv != NULL) {
301 penv = (CPUState **)&(*penv)->next_cpu;
302 cpu_index++;
304 env->cpu_index = cpu_index;
305 env->nb_watchpoints = 0;
306 *penv = env;
309 static inline void invalidate_page_bitmap(PageDesc *p)
311 if (p->code_bitmap) {
312 qemu_free(p->code_bitmap);
313 p->code_bitmap = NULL;
315 p->code_write_count = 0;
318 /* set to NULL all the 'first_tb' fields in all PageDescs */
319 static void page_flush_tb(void)
321 int i, j;
322 PageDesc *p;
324 for(i = 0; i < L1_SIZE; i++) {
325 p = l1_map[i];
326 if (p) {
327 for(j = 0; j < L2_SIZE; j++) {
328 p->first_tb = NULL;
329 invalidate_page_bitmap(p);
330 p++;
336 /* flush all the translation blocks */
337 /* XXX: tb_flush is currently not thread safe */
338 void tb_flush(CPUState *env1)
340 CPUState *env;
341 #if defined(DEBUG_FLUSH)
342 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
343 (unsigned long)(code_gen_ptr - code_gen_buffer),
344 nb_tbs, nb_tbs > 0 ?
345 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
346 #endif
347 nb_tbs = 0;
349 for(env = first_cpu; env != NULL; env = env->next_cpu) {
350 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
353 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
354 page_flush_tb();
356 code_gen_ptr = code_gen_buffer;
357 /* XXX: flush processor icache at this point if cache flush is
358 expensive */
359 tb_flush_count++;
362 #ifdef DEBUG_TB_CHECK
364 static void tb_invalidate_check(target_ulong address)
366 TranslationBlock *tb;
367 int i;
368 address &= TARGET_PAGE_MASK;
369 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
370 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
371 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
372 address >= tb->pc + tb->size)) {
373 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
374 address, (long)tb->pc, tb->size);
380 /* verify that all the pages have correct rights for code */
381 static void tb_page_check(void)
383 TranslationBlock *tb;
384 int i, flags1, flags2;
386 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
387 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
388 flags1 = page_get_flags(tb->pc);
389 flags2 = page_get_flags(tb->pc + tb->size - 1);
390 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
391 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
392 (long)tb->pc, tb->size, flags1, flags2);
398 void tb_jmp_check(TranslationBlock *tb)
400 TranslationBlock *tb1;
401 unsigned int n1;
403 /* suppress any remaining jumps to this TB */
404 tb1 = tb->jmp_first;
405 for(;;) {
406 n1 = (long)tb1 & 3;
407 tb1 = (TranslationBlock *)((long)tb1 & ~3);
408 if (n1 == 2)
409 break;
410 tb1 = tb1->jmp_next[n1];
412 /* check end of list */
413 if (tb1 != tb) {
414 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
418 #endif
420 /* invalidate one TB */
421 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
422 int next_offset)
424 TranslationBlock *tb1;
425 for(;;) {
426 tb1 = *ptb;
427 if (tb1 == tb) {
428 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
429 break;
431 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
435 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
437 TranslationBlock *tb1;
438 unsigned int n1;
440 for(;;) {
441 tb1 = *ptb;
442 n1 = (long)tb1 & 3;
443 tb1 = (TranslationBlock *)((long)tb1 & ~3);
444 if (tb1 == tb) {
445 *ptb = tb1->page_next[n1];
446 break;
448 ptb = &tb1->page_next[n1];
452 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
454 TranslationBlock *tb1, **ptb;
455 unsigned int n1;
457 ptb = &tb->jmp_next[n];
458 tb1 = *ptb;
459 if (tb1) {
460 /* find tb(n) in circular list */
461 for(;;) {
462 tb1 = *ptb;
463 n1 = (long)tb1 & 3;
464 tb1 = (TranslationBlock *)((long)tb1 & ~3);
465 if (n1 == n && tb1 == tb)
466 break;
467 if (n1 == 2) {
468 ptb = &tb1->jmp_first;
469 } else {
470 ptb = &tb1->jmp_next[n1];
473 /* now we can suppress tb(n) from the list */
474 *ptb = tb->jmp_next[n];
476 tb->jmp_next[n] = NULL;
480 /* reset the jump entry 'n' of a TB so that it is not chained to
481 another TB */
482 static inline void tb_reset_jump(TranslationBlock *tb, int n)
484 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
487 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
489 CPUState *env;
490 PageDesc *p;
491 unsigned int h, n1;
492 target_ulong phys_pc;
493 TranslationBlock *tb1, *tb2;
495 /* remove the TB from the hash list */
496 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
497 h = tb_phys_hash_func(phys_pc);
498 tb_remove(&tb_phys_hash[h], tb,
499 offsetof(TranslationBlock, phys_hash_next));
501 /* remove the TB from the page list */
502 if (tb->page_addr[0] != page_addr) {
503 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
504 tb_page_remove(&p->first_tb, tb);
505 invalidate_page_bitmap(p);
507 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
508 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
509 tb_page_remove(&p->first_tb, tb);
510 invalidate_page_bitmap(p);
513 tb_invalidated_flag = 1;
515 /* remove the TB from the hash list */
516 h = tb_jmp_cache_hash_func(tb->pc);
517 for(env = first_cpu; env != NULL; env = env->next_cpu) {
518 if (env->tb_jmp_cache[h] == tb)
519 env->tb_jmp_cache[h] = NULL;
522 /* suppress this TB from the two jump lists */
523 tb_jmp_remove(tb, 0);
524 tb_jmp_remove(tb, 1);
526 /* suppress any remaining jumps to this TB */
527 tb1 = tb->jmp_first;
528 for(;;) {
529 n1 = (long)tb1 & 3;
530 if (n1 == 2)
531 break;
532 tb1 = (TranslationBlock *)((long)tb1 & ~3);
533 tb2 = tb1->jmp_next[n1];
534 tb_reset_jump(tb1, n1);
535 tb1->jmp_next[n1] = NULL;
536 tb1 = tb2;
538 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
540 tb_phys_invalidate_count++;
543 static inline void set_bits(uint8_t *tab, int start, int len)
545 int end, mask, end1;
547 end = start + len;
548 tab += start >> 3;
549 mask = 0xff << (start & 7);
550 if ((start & ~7) == (end & ~7)) {
551 if (start < end) {
552 mask &= ~(0xff << (end & 7));
553 *tab |= mask;
555 } else {
556 *tab++ |= mask;
557 start = (start + 8) & ~7;
558 end1 = end & ~7;
559 while (start < end1) {
560 *tab++ = 0xff;
561 start += 8;
563 if (start < end) {
564 mask = ~(0xff << (end & 7));
565 *tab |= mask;
570 static void build_page_bitmap(PageDesc *p)
572 int n, tb_start, tb_end;
573 TranslationBlock *tb;
575 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
576 if (!p->code_bitmap)
577 return;
578 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
580 tb = p->first_tb;
581 while (tb != NULL) {
582 n = (long)tb & 3;
583 tb = (TranslationBlock *)((long)tb & ~3);
584 /* NOTE: this is subtle as a TB may span two physical pages */
585 if (n == 0) {
586 /* NOTE: tb_end may be after the end of the page, but
587 it is not a problem */
588 tb_start = tb->pc & ~TARGET_PAGE_MASK;
589 tb_end = tb_start + tb->size;
590 if (tb_end > TARGET_PAGE_SIZE)
591 tb_end = TARGET_PAGE_SIZE;
592 } else {
593 tb_start = 0;
594 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
596 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
597 tb = tb->page_next[n];
601 #ifdef TARGET_HAS_PRECISE_SMC
603 static void tb_gen_code(CPUState *env,
604 target_ulong pc, target_ulong cs_base, int flags,
605 int cflags)
607 TranslationBlock *tb;
608 uint8_t *tc_ptr;
609 target_ulong phys_pc, phys_page2, virt_page2;
610 int code_gen_size;
612 phys_pc = get_phys_addr_code(env, pc);
613 tb = tb_alloc(pc);
614 if (!tb) {
615 /* flush must be done */
616 tb_flush(env);
617 /* cannot fail at this point */
618 tb = tb_alloc(pc);
620 tc_ptr = code_gen_ptr;
621 tb->tc_ptr = tc_ptr;
622 tb->cs_base = cs_base;
623 tb->flags = flags;
624 tb->cflags = cflags;
625 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
626 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
628 /* check next page if needed */
629 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
630 phys_page2 = -1;
631 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
632 phys_page2 = get_phys_addr_code(env, virt_page2);
634 tb_link_phys(tb, phys_pc, phys_page2);
636 #endif
638 /* invalidate all TBs which intersect with the target physical page
639 starting in range [start;end[. NOTE: start and end must refer to
640 the same physical page. 'is_cpu_write_access' should be true if called
641 from a real cpu write access: the virtual CPU will exit the current
642 TB if code is modified inside this TB. */
643 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
644 int is_cpu_write_access)
646 int n, current_tb_modified, current_tb_not_found, current_flags;
647 CPUState *env = cpu_single_env;
648 PageDesc *p;
649 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
650 target_ulong tb_start, tb_end;
651 target_ulong current_pc, current_cs_base;
653 p = page_find(start >> TARGET_PAGE_BITS);
654 if (!p)
655 return;
656 if (!p->code_bitmap &&
657 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
658 is_cpu_write_access) {
659 /* build code bitmap */
660 build_page_bitmap(p);
663 /* we remove all the TBs in the range [start, end[ */
664 /* XXX: see if in some cases it could be faster to invalidate all the code */
665 current_tb_not_found = is_cpu_write_access;
666 current_tb_modified = 0;
667 current_tb = NULL; /* avoid warning */
668 current_pc = 0; /* avoid warning */
669 current_cs_base = 0; /* avoid warning */
670 current_flags = 0; /* avoid warning */
671 tb = p->first_tb;
672 while (tb != NULL) {
673 n = (long)tb & 3;
674 tb = (TranslationBlock *)((long)tb & ~3);
675 tb_next = tb->page_next[n];
676 /* NOTE: this is subtle as a TB may span two physical pages */
677 if (n == 0) {
678 /* NOTE: tb_end may be after the end of the page, but
679 it is not a problem */
680 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
681 tb_end = tb_start + tb->size;
682 } else {
683 tb_start = tb->page_addr[1];
684 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
686 if (!(tb_end <= start || tb_start >= end)) {
687 #ifdef TARGET_HAS_PRECISE_SMC
688 if (current_tb_not_found) {
689 current_tb_not_found = 0;
690 current_tb = NULL;
691 if (env->mem_write_pc) {
692 /* now we have a real cpu fault */
693 current_tb = tb_find_pc(env->mem_write_pc);
696 if (current_tb == tb &&
697 !(current_tb->cflags & CF_SINGLE_INSN)) {
698 /* If we are modifying the current TB, we must stop
699 its execution. We could be more precise by checking
700 that the modification is after the current PC, but it
701 would require a specialized function to partially
702 restore the CPU state */
704 current_tb_modified = 1;
705 cpu_restore_state(current_tb, env,
706 env->mem_write_pc, NULL);
707 #if defined(TARGET_I386)
708 current_flags = env->hflags;
709 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
710 current_cs_base = (target_ulong)env->segs[R_CS].base;
711 current_pc = current_cs_base + env->eip;
712 #else
713 #error unsupported CPU
714 #endif
716 #endif /* TARGET_HAS_PRECISE_SMC */
717 /* we need to do that to handle the case where a signal
718 occurs while doing tb_phys_invalidate() */
719 saved_tb = NULL;
720 if (env) {
721 saved_tb = env->current_tb;
722 env->current_tb = NULL;
724 tb_phys_invalidate(tb, -1);
725 if (env) {
726 env->current_tb = saved_tb;
727 if (env->interrupt_request && env->current_tb)
728 cpu_interrupt(env, env->interrupt_request);
731 tb = tb_next;
733 #if !defined(CONFIG_USER_ONLY)
734 /* if no code remaining, no need to continue to use slow writes */
735 if (!p->first_tb) {
736 invalidate_page_bitmap(p);
737 if (is_cpu_write_access) {
738 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
741 #endif
742 #ifdef TARGET_HAS_PRECISE_SMC
743 if (current_tb_modified) {
744 /* we generate a block containing just the instruction
745 modifying the memory. It will ensure that it cannot modify
746 itself */
747 env->current_tb = NULL;
748 tb_gen_code(env, current_pc, current_cs_base, current_flags,
749 CF_SINGLE_INSN);
750 cpu_resume_from_signal(env, NULL);
752 #endif
755 /* len must be <= 8 and start must be a multiple of len */
756 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
758 PageDesc *p;
759 int offset, b;
760 #if 0
761 if (1) {
762 if (loglevel) {
763 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764 cpu_single_env->mem_write_vaddr, len,
765 cpu_single_env->eip,
766 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
769 #endif
770 p = page_find(start >> TARGET_PAGE_BITS);
771 if (!p)
772 return;
773 if (p->code_bitmap) {
774 offset = start & ~TARGET_PAGE_MASK;
775 b = p->code_bitmap[offset >> 3] >> (offset & 7);
776 if (b & ((1 << len) - 1))
777 goto do_invalidate;
778 } else {
779 do_invalidate:
780 tb_invalidate_phys_page_range(start, start + len, 1);
784 #if !defined(CONFIG_SOFTMMU)
785 static void tb_invalidate_phys_page(target_ulong addr,
786 unsigned long pc, void *puc)
788 int n, current_flags, current_tb_modified;
789 target_ulong current_pc, current_cs_base;
790 PageDesc *p;
791 TranslationBlock *tb, *current_tb;
792 #ifdef TARGET_HAS_PRECISE_SMC
793 CPUState *env = cpu_single_env;
794 #endif
796 addr &= TARGET_PAGE_MASK;
797 p = page_find(addr >> TARGET_PAGE_BITS);
798 if (!p)
799 return;
800 tb = p->first_tb;
801 current_tb_modified = 0;
802 current_tb = NULL;
803 current_pc = 0; /* avoid warning */
804 current_cs_base = 0; /* avoid warning */
805 current_flags = 0; /* avoid warning */
806 #ifdef TARGET_HAS_PRECISE_SMC
807 if (tb && pc != 0) {
808 current_tb = tb_find_pc(pc);
810 #endif
811 while (tb != NULL) {
812 n = (long)tb & 3;
813 tb = (TranslationBlock *)((long)tb & ~3);
814 #ifdef TARGET_HAS_PRECISE_SMC
815 if (current_tb == tb &&
816 !(current_tb->cflags & CF_SINGLE_INSN)) {
817 /* If we are modifying the current TB, we must stop
818 its execution. We could be more precise by checking
819 that the modification is after the current PC, but it
820 would require a specialized function to partially
821 restore the CPU state */
823 current_tb_modified = 1;
824 cpu_restore_state(current_tb, env, pc, puc);
825 #if defined(TARGET_I386)
826 current_flags = env->hflags;
827 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
828 current_cs_base = (target_ulong)env->segs[R_CS].base;
829 current_pc = current_cs_base + env->eip;
830 #else
831 #error unsupported CPU
832 #endif
834 #endif /* TARGET_HAS_PRECISE_SMC */
835 tb_phys_invalidate(tb, addr);
836 tb = tb->page_next[n];
838 p->first_tb = NULL;
839 #ifdef TARGET_HAS_PRECISE_SMC
840 if (current_tb_modified) {
841 /* we generate a block containing just the instruction
842 modifying the memory. It will ensure that it cannot modify
843 itself */
844 env->current_tb = NULL;
845 tb_gen_code(env, current_pc, current_cs_base, current_flags,
846 CF_SINGLE_INSN);
847 cpu_resume_from_signal(env, puc);
849 #endif
851 #endif
853 /* add the tb in the target page and protect it if necessary */
854 static inline void tb_alloc_page(TranslationBlock *tb,
855 unsigned int n, target_ulong page_addr)
857 PageDesc *p;
858 TranslationBlock *last_first_tb;
860 tb->page_addr[n] = page_addr;
861 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
862 tb->page_next[n] = p->first_tb;
863 last_first_tb = p->first_tb;
864 p->first_tb = (TranslationBlock *)((long)tb | n);
865 invalidate_page_bitmap(p);
867 #if defined(TARGET_HAS_SMC) || 1
869 #if defined(CONFIG_USER_ONLY)
870 if (p->flags & PAGE_WRITE) {
871 target_ulong addr;
872 PageDesc *p2;
873 int prot;
875 /* force the host page as non writable (writes will have a
876 page fault + mprotect overhead) */
877 page_addr &= qemu_host_page_mask;
878 prot = 0;
879 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
880 addr += TARGET_PAGE_SIZE) {
882 p2 = page_find (addr >> TARGET_PAGE_BITS);
883 if (!p2)
884 continue;
885 prot |= p2->flags;
886 p2->flags &= ~PAGE_WRITE;
887 page_get_flags(addr);
889 mprotect(g2h(page_addr), qemu_host_page_size,
890 (prot & PAGE_BITS) & ~PAGE_WRITE);
891 #ifdef DEBUG_TB_INVALIDATE
892 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
893 page_addr);
894 #endif
896 #else
897 /* if some code is already present, then the pages are already
898 protected. So we handle the case where only the first TB is
899 allocated in a physical page */
900 if (!last_first_tb) {
901 tlb_protect_code(page_addr);
903 #endif
905 #endif /* TARGET_HAS_SMC */
908 /* Allocate a new translation block. Flush the translation buffer if
909 too many translation blocks or too much generated code. */
910 TranslationBlock *tb_alloc(target_ulong pc)
912 TranslationBlock *tb;
914 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
915 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
916 return NULL;
917 tb = &tbs[nb_tbs++];
918 tb->pc = pc;
919 tb->cflags = 0;
920 return tb;
923 /* add a new TB and link it to the physical page tables. phys_page2 is
924 (-1) to indicate that only one page contains the TB. */
925 void tb_link_phys(TranslationBlock *tb,
926 target_ulong phys_pc, target_ulong phys_page2)
928 unsigned int h;
929 TranslationBlock **ptb;
931 /* add in the physical hash table */
932 h = tb_phys_hash_func(phys_pc);
933 ptb = &tb_phys_hash[h];
934 tb->phys_hash_next = *ptb;
935 *ptb = tb;
937 /* add in the page list */
938 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
939 if (phys_page2 != -1)
940 tb_alloc_page(tb, 1, phys_page2);
941 else
942 tb->page_addr[1] = -1;
944 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
945 tb->jmp_next[0] = NULL;
946 tb->jmp_next[1] = NULL;
948 /* init original jump addresses */
949 if (tb->tb_next_offset[0] != 0xffff)
950 tb_reset_jump(tb, 0);
951 if (tb->tb_next_offset[1] != 0xffff)
952 tb_reset_jump(tb, 1);
954 #ifdef DEBUG_TB_CHECK
955 tb_page_check();
956 #endif
959 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
960 tb[1].tc_ptr. Return NULL if not found */
961 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
963 int m_min, m_max, m;
964 unsigned long v;
965 TranslationBlock *tb;
967 if (nb_tbs <= 0)
968 return NULL;
969 if (tc_ptr < (unsigned long)code_gen_buffer ||
970 tc_ptr >= (unsigned long)code_gen_ptr)
971 return NULL;
972 /* binary search (cf Knuth) */
973 m_min = 0;
974 m_max = nb_tbs - 1;
975 while (m_min <= m_max) {
976 m = (m_min + m_max) >> 1;
977 tb = &tbs[m];
978 v = (unsigned long)tb->tc_ptr;
979 if (v == tc_ptr)
980 return tb;
981 else if (tc_ptr < v) {
982 m_max = m - 1;
983 } else {
984 m_min = m + 1;
987 return &tbs[m_max];
990 static void tb_reset_jump_recursive(TranslationBlock *tb);
992 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
994 TranslationBlock *tb1, *tb_next, **ptb;
995 unsigned int n1;
997 tb1 = tb->jmp_next[n];
998 if (tb1 != NULL) {
999 /* find head of list */
1000 for(;;) {
1001 n1 = (long)tb1 & 3;
1002 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1003 if (n1 == 2)
1004 break;
1005 tb1 = tb1->jmp_next[n1];
1007 /* we are now sure now that tb jumps to tb1 */
1008 tb_next = tb1;
1010 /* remove tb from the jmp_first list */
1011 ptb = &tb_next->jmp_first;
1012 for(;;) {
1013 tb1 = *ptb;
1014 n1 = (long)tb1 & 3;
1015 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1016 if (n1 == n && tb1 == tb)
1017 break;
1018 ptb = &tb1->jmp_next[n1];
1020 *ptb = tb->jmp_next[n];
1021 tb->jmp_next[n] = NULL;
1023 /* suppress the jump to next tb in generated code */
1024 tb_reset_jump(tb, n);
1026 /* suppress jumps in the tb on which we could have jumped */
1027 tb_reset_jump_recursive(tb_next);
1031 static void tb_reset_jump_recursive(TranslationBlock *tb)
1033 tb_reset_jump_recursive2(tb, 0);
1034 tb_reset_jump_recursive2(tb, 1);
1037 #if defined(TARGET_HAS_ICE)
1038 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1040 target_phys_addr_t addr;
1041 target_ulong pd;
1042 ram_addr_t ram_addr;
1043 PhysPageDesc *p;
1045 addr = cpu_get_phys_page_debug(env, pc);
1046 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1047 if (!p) {
1048 pd = IO_MEM_UNASSIGNED;
1049 } else {
1050 pd = p->phys_offset;
1052 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1053 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1055 #endif
1057 /* Add a watchpoint. */
1058 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1060 int i;
1062 for (i = 0; i < env->nb_watchpoints; i++) {
1063 if (addr == env->watchpoint[i].vaddr)
1064 return 0;
1066 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1067 return -1;
1069 i = env->nb_watchpoints++;
1070 env->watchpoint[i].vaddr = addr;
1071 tlb_flush_page(env, addr);
1072 /* FIXME: This flush is needed because of the hack to make memory ops
1073 terminate the TB. It can be removed once the proper IO trap and
1074 re-execute bits are in. */
1075 tb_flush(env);
1076 return i;
1079 /* Remove a watchpoint. */
1080 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1082 int i;
1084 for (i = 0; i < env->nb_watchpoints; i++) {
1085 if (addr == env->watchpoint[i].vaddr) {
1086 env->nb_watchpoints--;
1087 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1088 tlb_flush_page(env, addr);
1089 return 0;
1092 return -1;
1095 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1096 breakpoint is reached */
1097 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1099 #if defined(TARGET_HAS_ICE)
1100 int i;
1102 for(i = 0; i < env->nb_breakpoints; i++) {
1103 if (env->breakpoints[i] == pc)
1104 return 0;
1107 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1108 return -1;
1109 env->breakpoints[env->nb_breakpoints++] = pc;
1111 breakpoint_invalidate(env, pc);
1112 return 0;
1113 #else
1114 return -1;
1115 #endif
1118 /* remove a breakpoint */
1119 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1121 #if defined(TARGET_HAS_ICE)
1122 int i;
1123 for(i = 0; i < env->nb_breakpoints; i++) {
1124 if (env->breakpoints[i] == pc)
1125 goto found;
1127 return -1;
1128 found:
1129 env->nb_breakpoints--;
1130 if (i < env->nb_breakpoints)
1131 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1133 breakpoint_invalidate(env, pc);
1134 return 0;
1135 #else
1136 return -1;
1137 #endif
1140 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1141 CPU loop after each instruction */
1142 void cpu_single_step(CPUState *env, int enabled)
1144 #if defined(TARGET_HAS_ICE)
1145 if (env->singlestep_enabled != enabled) {
1146 env->singlestep_enabled = enabled;
1147 /* must flush all the translated code to avoid inconsistancies */
1148 /* XXX: only flush what is necessary */
1149 tb_flush(env);
1151 #endif
1154 /* enable or disable low levels log */
1155 void cpu_set_log(int log_flags)
1157 loglevel = log_flags;
1158 if (loglevel && !logfile) {
1159 logfile = fopen(logfilename, log_append ? "a" : "w");
1160 if (!logfile) {
1161 perror(logfilename);
1162 _exit(1);
1164 #if !defined(CONFIG_SOFTMMU)
1165 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1167 static uint8_t logfile_buf[4096];
1168 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1170 #else
1171 setvbuf(logfile, NULL, _IOLBF, 0);
1172 #endif
1173 log_append = 1;
1175 if (!loglevel && logfile) {
1176 fclose(logfile);
1177 logfile = NULL;
1181 void cpu_set_log_filename(const char *filename)
1183 logfilename = strdup(filename);
1184 if (logfile) {
1185 fclose(logfile);
1186 logfile = NULL;
1188 cpu_set_log(loglevel);
1191 /* mask must never be zero, except for A20 change call */
1192 void cpu_interrupt(CPUState *env, int mask)
1194 TranslationBlock *tb;
1195 static int interrupt_lock;
1197 env->interrupt_request |= mask;
1198 /* if the cpu is currently executing code, we must unlink it and
1199 all the potentially executing TB */
1200 tb = env->current_tb;
1201 if (tb && !testandset(&interrupt_lock)) {
1202 env->current_tb = NULL;
1203 tb_reset_jump_recursive(tb);
1204 interrupt_lock = 0;
1208 void cpu_reset_interrupt(CPUState *env, int mask)
1210 env->interrupt_request &= ~mask;
1213 CPULogItem cpu_log_items[] = {
1214 { CPU_LOG_TB_OUT_ASM, "out_asm",
1215 "show generated host assembly code for each compiled TB" },
1216 { CPU_LOG_TB_IN_ASM, "in_asm",
1217 "show target assembly code for each compiled TB" },
1218 { CPU_LOG_TB_OP, "op",
1219 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1220 #ifdef TARGET_I386
1221 { CPU_LOG_TB_OP_OPT, "op_opt",
1222 "show micro ops after optimization for each compiled TB" },
1223 #endif
1224 { CPU_LOG_INT, "int",
1225 "show interrupts/exceptions in short format" },
1226 { CPU_LOG_EXEC, "exec",
1227 "show trace before each executed TB (lots of logs)" },
1228 { CPU_LOG_TB_CPU, "cpu",
1229 "show CPU state before block translation" },
1230 #ifdef TARGET_I386
1231 { CPU_LOG_PCALL, "pcall",
1232 "show protected mode far calls/returns/exceptions" },
1233 #endif
1234 #ifdef DEBUG_IOPORT
1235 { CPU_LOG_IOPORT, "ioport",
1236 "show all i/o ports accesses" },
1237 #endif
1238 { 0, NULL, NULL },
1241 static int cmp1(const char *s1, int n, const char *s2)
1243 if (strlen(s2) != n)
1244 return 0;
1245 return memcmp(s1, s2, n) == 0;
1248 /* takes a comma separated list of log masks. Return 0 if error. */
1249 int cpu_str_to_log_mask(const char *str)
1251 CPULogItem *item;
1252 int mask;
1253 const char *p, *p1;
1255 p = str;
1256 mask = 0;
1257 for(;;) {
1258 p1 = strchr(p, ',');
1259 if (!p1)
1260 p1 = p + strlen(p);
1261 if(cmp1(p,p1-p,"all")) {
1262 for(item = cpu_log_items; item->mask != 0; item++) {
1263 mask |= item->mask;
1265 } else {
1266 for(item = cpu_log_items; item->mask != 0; item++) {
1267 if (cmp1(p, p1 - p, item->name))
1268 goto found;
1270 return 0;
1272 found:
1273 mask |= item->mask;
1274 if (*p1 != ',')
1275 break;
1276 p = p1 + 1;
1278 return mask;
1281 void cpu_abort(CPUState *env, const char *fmt, ...)
1283 va_list ap;
1284 va_list ap2;
1286 va_start(ap, fmt);
1287 va_copy(ap2, ap);
1288 fprintf(stderr, "qemu: fatal: ");
1289 vfprintf(stderr, fmt, ap);
1290 fprintf(stderr, "\n");
1291 #ifdef TARGET_I386
1292 if(env->intercept & INTERCEPT_SVM_MASK) {
1293 /* most probably the virtual machine should not
1294 be shut down but rather caught by the VMM */
1295 vmexit(SVM_EXIT_SHUTDOWN, 0);
1297 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1298 #else
1299 cpu_dump_state(env, stderr, fprintf, 0);
1300 #endif
1301 if (logfile) {
1302 fprintf(logfile, "qemu: fatal: ");
1303 vfprintf(logfile, fmt, ap2);
1304 fprintf(logfile, "\n");
1305 #ifdef TARGET_I386
1306 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1307 #else
1308 cpu_dump_state(env, logfile, fprintf, 0);
1309 #endif
1310 fflush(logfile);
1311 fclose(logfile);
1313 va_end(ap2);
1314 va_end(ap);
1315 abort();
1318 CPUState *cpu_copy(CPUState *env)
1320 CPUState *new_env = cpu_init(env->cpu_model_str);
1321 /* preserve chaining and index */
1322 CPUState *next_cpu = new_env->next_cpu;
1323 int cpu_index = new_env->cpu_index;
1324 memcpy(new_env, env, sizeof(CPUState));
1325 new_env->next_cpu = next_cpu;
1326 new_env->cpu_index = cpu_index;
1327 return new_env;
1330 #if !defined(CONFIG_USER_ONLY)
1332 /* NOTE: if flush_global is true, also flush global entries (not
1333 implemented yet) */
1334 void tlb_flush(CPUState *env, int flush_global)
1336 int i;
1338 #if defined(DEBUG_TLB)
1339 printf("tlb_flush:\n");
1340 #endif
1341 /* must reset current TB so that interrupts cannot modify the
1342 links while we are modifying them */
1343 env->current_tb = NULL;
1345 for(i = 0; i < CPU_TLB_SIZE; i++) {
1346 env->tlb_table[0][i].addr_read = -1;
1347 env->tlb_table[0][i].addr_write = -1;
1348 env->tlb_table[0][i].addr_code = -1;
1349 env->tlb_table[1][i].addr_read = -1;
1350 env->tlb_table[1][i].addr_write = -1;
1351 env->tlb_table[1][i].addr_code = -1;
1352 #if (NB_MMU_MODES >= 3)
1353 env->tlb_table[2][i].addr_read = -1;
1354 env->tlb_table[2][i].addr_write = -1;
1355 env->tlb_table[2][i].addr_code = -1;
1356 #if (NB_MMU_MODES == 4)
1357 env->tlb_table[3][i].addr_read = -1;
1358 env->tlb_table[3][i].addr_write = -1;
1359 env->tlb_table[3][i].addr_code = -1;
1360 #endif
1361 #endif
1364 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1366 #if !defined(CONFIG_SOFTMMU)
1367 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1368 #endif
1369 #ifdef USE_KQEMU
1370 if (env->kqemu_enabled) {
1371 kqemu_flush(env, flush_global);
1373 #endif
1374 tlb_flush_count++;
1377 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1379 if (addr == (tlb_entry->addr_read &
1380 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1381 addr == (tlb_entry->addr_write &
1382 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1383 addr == (tlb_entry->addr_code &
1384 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1385 tlb_entry->addr_read = -1;
1386 tlb_entry->addr_write = -1;
1387 tlb_entry->addr_code = -1;
1391 void tlb_flush_page(CPUState *env, target_ulong addr)
1393 int i;
1394 TranslationBlock *tb;
1396 #if defined(DEBUG_TLB)
1397 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1398 #endif
1399 /* must reset current TB so that interrupts cannot modify the
1400 links while we are modifying them */
1401 env->current_tb = NULL;
1403 addr &= TARGET_PAGE_MASK;
1404 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1405 tlb_flush_entry(&env->tlb_table[0][i], addr);
1406 tlb_flush_entry(&env->tlb_table[1][i], addr);
1407 #if (NB_MMU_MODES >= 3)
1408 tlb_flush_entry(&env->tlb_table[2][i], addr);
1409 #if (NB_MMU_MODES == 4)
1410 tlb_flush_entry(&env->tlb_table[3][i], addr);
1411 #endif
1412 #endif
1414 /* Discard jump cache entries for any tb which might potentially
1415 overlap the flushed page. */
1416 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1417 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1419 i = tb_jmp_cache_hash_page(addr);
1420 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1422 #if !defined(CONFIG_SOFTMMU)
1423 if (addr < MMAP_AREA_END)
1424 munmap((void *)addr, TARGET_PAGE_SIZE);
1425 #endif
1426 #ifdef USE_KQEMU
1427 if (env->kqemu_enabled) {
1428 kqemu_flush_page(env, addr);
1430 #endif
1433 /* update the TLBs so that writes to code in the virtual page 'addr'
1434 can be detected */
1435 static void tlb_protect_code(ram_addr_t ram_addr)
1437 cpu_physical_memory_reset_dirty(ram_addr,
1438 ram_addr + TARGET_PAGE_SIZE,
1439 CODE_DIRTY_FLAG);
1442 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1443 tested for self modifying code */
1444 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1445 target_ulong vaddr)
1447 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1450 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1451 unsigned long start, unsigned long length)
1453 unsigned long addr;
1454 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1455 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1456 if ((addr - start) < length) {
1457 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1462 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1463 int dirty_flags)
1465 CPUState *env;
1466 unsigned long length, start1;
1467 int i, mask, len;
1468 uint8_t *p;
1470 start &= TARGET_PAGE_MASK;
1471 end = TARGET_PAGE_ALIGN(end);
1473 length = end - start;
1474 if (length == 0)
1475 return;
1476 len = length >> TARGET_PAGE_BITS;
1477 #ifdef USE_KQEMU
1478 /* XXX: should not depend on cpu context */
1479 env = first_cpu;
1480 if (env->kqemu_enabled) {
1481 ram_addr_t addr;
1482 addr = start;
1483 for(i = 0; i < len; i++) {
1484 kqemu_set_notdirty(env, addr);
1485 addr += TARGET_PAGE_SIZE;
1488 #endif
1489 mask = ~dirty_flags;
1490 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1491 for(i = 0; i < len; i++)
1492 p[i] &= mask;
1494 /* we modify the TLB cache so that the dirty bit will be set again
1495 when accessing the range */
1496 start1 = start + (unsigned long)phys_ram_base;
1497 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1498 for(i = 0; i < CPU_TLB_SIZE; i++)
1499 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1500 for(i = 0; i < CPU_TLB_SIZE; i++)
1501 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1502 #if (NB_MMU_MODES >= 3)
1503 for(i = 0; i < CPU_TLB_SIZE; i++)
1504 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1505 #if (NB_MMU_MODES == 4)
1506 for(i = 0; i < CPU_TLB_SIZE; i++)
1507 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1508 #endif
1509 #endif
1512 #if !defined(CONFIG_SOFTMMU)
1513 /* XXX: this is expensive */
1515 VirtPageDesc *p;
1516 int j;
1517 target_ulong addr;
1519 for(i = 0; i < L1_SIZE; i++) {
1520 p = l1_virt_map[i];
1521 if (p) {
1522 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1523 for(j = 0; j < L2_SIZE; j++) {
1524 if (p->valid_tag == virt_valid_tag &&
1525 p->phys_addr >= start && p->phys_addr < end &&
1526 (p->prot & PROT_WRITE)) {
1527 if (addr < MMAP_AREA_END) {
1528 mprotect((void *)addr, TARGET_PAGE_SIZE,
1529 p->prot & ~PROT_WRITE);
1532 addr += TARGET_PAGE_SIZE;
1533 p++;
1538 #endif
1541 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1543 ram_addr_t ram_addr;
1545 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1546 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1547 tlb_entry->addend - (unsigned long)phys_ram_base;
1548 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1549 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1554 /* update the TLB according to the current state of the dirty bits */
1555 void cpu_tlb_update_dirty(CPUState *env)
1557 int i;
1558 for(i = 0; i < CPU_TLB_SIZE; i++)
1559 tlb_update_dirty(&env->tlb_table[0][i]);
1560 for(i = 0; i < CPU_TLB_SIZE; i++)
1561 tlb_update_dirty(&env->tlb_table[1][i]);
1562 #if (NB_MMU_MODES >= 3)
1563 for(i = 0; i < CPU_TLB_SIZE; i++)
1564 tlb_update_dirty(&env->tlb_table[2][i]);
1565 #if (NB_MMU_MODES == 4)
1566 for(i = 0; i < CPU_TLB_SIZE; i++)
1567 tlb_update_dirty(&env->tlb_table[3][i]);
1568 #endif
1569 #endif
1572 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1573 unsigned long start)
1575 unsigned long addr;
1576 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1577 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1578 if (addr == start) {
1579 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1584 /* update the TLB corresponding to virtual page vaddr and phys addr
1585 addr so that it is no longer dirty */
1586 static inline void tlb_set_dirty(CPUState *env,
1587 unsigned long addr, target_ulong vaddr)
1589 int i;
1591 addr &= TARGET_PAGE_MASK;
1592 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1593 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1594 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1595 #if (NB_MMU_MODES >= 3)
1596 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1597 #if (NB_MMU_MODES == 4)
1598 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1599 #endif
1600 #endif
1603 /* add a new TLB entry. At most one entry for a given virtual address
1604 is permitted. Return 0 if OK or 2 if the page could not be mapped
1605 (can only happen in non SOFTMMU mode for I/O pages or pages
1606 conflicting with the host address space). */
1607 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1608 target_phys_addr_t paddr, int prot,
1609 int mmu_idx, int is_softmmu)
1611 PhysPageDesc *p;
1612 unsigned long pd;
1613 unsigned int index;
1614 target_ulong address;
1615 target_phys_addr_t addend;
1616 int ret;
1617 CPUTLBEntry *te;
1618 int i;
1620 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1621 if (!p) {
1622 pd = IO_MEM_UNASSIGNED;
1623 } else {
1624 pd = p->phys_offset;
1626 #if defined(DEBUG_TLB)
1627 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1628 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1629 #endif
1631 ret = 0;
1632 #if !defined(CONFIG_SOFTMMU)
1633 if (is_softmmu)
1634 #endif
1636 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1637 /* IO memory case */
1638 address = vaddr | pd;
1639 addend = paddr;
1640 } else {
1641 /* standard memory */
1642 address = vaddr;
1643 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1646 /* Make accesses to pages with watchpoints go via the
1647 watchpoint trap routines. */
1648 for (i = 0; i < env->nb_watchpoints; i++) {
1649 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1650 if (address & ~TARGET_PAGE_MASK) {
1651 env->watchpoint[i].addend = 0;
1652 address = vaddr | io_mem_watch;
1653 } else {
1654 env->watchpoint[i].addend = pd - paddr +
1655 (unsigned long) phys_ram_base;
1656 /* TODO: Figure out how to make read watchpoints coexist
1657 with code. */
1658 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1663 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1664 addend -= vaddr;
1665 te = &env->tlb_table[mmu_idx][index];
1666 te->addend = addend;
1667 if (prot & PAGE_READ) {
1668 te->addr_read = address;
1669 } else {
1670 te->addr_read = -1;
1672 if (prot & PAGE_EXEC) {
1673 te->addr_code = address;
1674 } else {
1675 te->addr_code = -1;
1677 if (prot & PAGE_WRITE) {
1678 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1679 (pd & IO_MEM_ROMD)) {
1680 /* write access calls the I/O callback */
1681 te->addr_write = vaddr |
1682 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1683 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1684 !cpu_physical_memory_is_dirty(pd)) {
1685 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1686 } else {
1687 te->addr_write = address;
1689 } else {
1690 te->addr_write = -1;
1693 #if !defined(CONFIG_SOFTMMU)
1694 else {
1695 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1696 /* IO access: no mapping is done as it will be handled by the
1697 soft MMU */
1698 if (!(env->hflags & HF_SOFTMMU_MASK))
1699 ret = 2;
1700 } else {
1701 void *map_addr;
1703 if (vaddr >= MMAP_AREA_END) {
1704 ret = 2;
1705 } else {
1706 if (prot & PROT_WRITE) {
1707 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1708 #if defined(TARGET_HAS_SMC) || 1
1709 first_tb ||
1710 #endif
1711 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1712 !cpu_physical_memory_is_dirty(pd))) {
1713 /* ROM: we do as if code was inside */
1714 /* if code is present, we only map as read only and save the
1715 original mapping */
1716 VirtPageDesc *vp;
1718 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1719 vp->phys_addr = pd;
1720 vp->prot = prot;
1721 vp->valid_tag = virt_valid_tag;
1722 prot &= ~PAGE_WRITE;
1725 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1726 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1727 if (map_addr == MAP_FAILED) {
1728 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1729 paddr, vaddr);
1734 #endif
1735 return ret;
1738 /* called from signal handler: invalidate the code and unprotect the
1739 page. Return TRUE if the fault was succesfully handled. */
1740 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1742 #if !defined(CONFIG_SOFTMMU)
1743 VirtPageDesc *vp;
1745 #if defined(DEBUG_TLB)
1746 printf("page_unprotect: addr=0x%08x\n", addr);
1747 #endif
1748 addr &= TARGET_PAGE_MASK;
1750 /* if it is not mapped, no need to worry here */
1751 if (addr >= MMAP_AREA_END)
1752 return 0;
1753 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1754 if (!vp)
1755 return 0;
1756 /* NOTE: in this case, validate_tag is _not_ tested as it
1757 validates only the code TLB */
1758 if (vp->valid_tag != virt_valid_tag)
1759 return 0;
1760 if (!(vp->prot & PAGE_WRITE))
1761 return 0;
1762 #if defined(DEBUG_TLB)
1763 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1764 addr, vp->phys_addr, vp->prot);
1765 #endif
1766 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1767 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1768 (unsigned long)addr, vp->prot);
1769 /* set the dirty bit */
1770 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1771 /* flush the code inside */
1772 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1773 return 1;
1774 #else
1775 return 0;
1776 #endif
1779 #else
1781 void tlb_flush(CPUState *env, int flush_global)
1785 void tlb_flush_page(CPUState *env, target_ulong addr)
1789 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1790 target_phys_addr_t paddr, int prot,
1791 int mmu_idx, int is_softmmu)
1793 return 0;
1796 /* dump memory mappings */
1797 void page_dump(FILE *f)
1799 unsigned long start, end;
1800 int i, j, prot, prot1;
1801 PageDesc *p;
1803 fprintf(f, "%-8s %-8s %-8s %s\n",
1804 "start", "end", "size", "prot");
1805 start = -1;
1806 end = -1;
1807 prot = 0;
1808 for(i = 0; i <= L1_SIZE; i++) {
1809 if (i < L1_SIZE)
1810 p = l1_map[i];
1811 else
1812 p = NULL;
1813 for(j = 0;j < L2_SIZE; j++) {
1814 if (!p)
1815 prot1 = 0;
1816 else
1817 prot1 = p[j].flags;
1818 if (prot1 != prot) {
1819 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1820 if (start != -1) {
1821 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1822 start, end, end - start,
1823 prot & PAGE_READ ? 'r' : '-',
1824 prot & PAGE_WRITE ? 'w' : '-',
1825 prot & PAGE_EXEC ? 'x' : '-');
1827 if (prot1 != 0)
1828 start = end;
1829 else
1830 start = -1;
1831 prot = prot1;
1833 if (!p)
1834 break;
1839 int page_get_flags(target_ulong address)
1841 PageDesc *p;
1843 p = page_find(address >> TARGET_PAGE_BITS);
1844 if (!p)
1845 return 0;
1846 return p->flags;
1849 /* modify the flags of a page and invalidate the code if
1850 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1851 depending on PAGE_WRITE */
1852 void page_set_flags(target_ulong start, target_ulong end, int flags)
1854 PageDesc *p;
1855 target_ulong addr;
1857 start = start & TARGET_PAGE_MASK;
1858 end = TARGET_PAGE_ALIGN(end);
1859 if (flags & PAGE_WRITE)
1860 flags |= PAGE_WRITE_ORG;
1861 spin_lock(&tb_lock);
1862 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1863 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1864 /* if the write protection is set, then we invalidate the code
1865 inside */
1866 if (!(p->flags & PAGE_WRITE) &&
1867 (flags & PAGE_WRITE) &&
1868 p->first_tb) {
1869 tb_invalidate_phys_page(addr, 0, NULL);
1871 p->flags = flags;
1873 spin_unlock(&tb_lock);
1876 int page_check_range(target_ulong start, target_ulong len, int flags)
1878 PageDesc *p;
1879 target_ulong end;
1880 target_ulong addr;
1882 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1883 start = start & TARGET_PAGE_MASK;
1885 if( end < start )
1886 /* we've wrapped around */
1887 return -1;
1888 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1889 p = page_find(addr >> TARGET_PAGE_BITS);
1890 if( !p )
1891 return -1;
1892 if( !(p->flags & PAGE_VALID) )
1893 return -1;
1895 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1896 return -1;
1897 if (flags & PAGE_WRITE) {
1898 if (!(p->flags & PAGE_WRITE_ORG))
1899 return -1;
1900 /* unprotect the page if it was put read-only because it
1901 contains translated code */
1902 if (!(p->flags & PAGE_WRITE)) {
1903 if (!page_unprotect(addr, 0, NULL))
1904 return -1;
1906 return 0;
1909 return 0;
1912 /* called from signal handler: invalidate the code and unprotect the
1913 page. Return TRUE if the fault was succesfully handled. */
1914 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1916 unsigned int page_index, prot, pindex;
1917 PageDesc *p, *p1;
1918 target_ulong host_start, host_end, addr;
1920 host_start = address & qemu_host_page_mask;
1921 page_index = host_start >> TARGET_PAGE_BITS;
1922 p1 = page_find(page_index);
1923 if (!p1)
1924 return 0;
1925 host_end = host_start + qemu_host_page_size;
1926 p = p1;
1927 prot = 0;
1928 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1929 prot |= p->flags;
1930 p++;
1932 /* if the page was really writable, then we change its
1933 protection back to writable */
1934 if (prot & PAGE_WRITE_ORG) {
1935 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1936 if (!(p1[pindex].flags & PAGE_WRITE)) {
1937 mprotect((void *)g2h(host_start), qemu_host_page_size,
1938 (prot & PAGE_BITS) | PAGE_WRITE);
1939 p1[pindex].flags |= PAGE_WRITE;
1940 /* and since the content will be modified, we must invalidate
1941 the corresponding translated code. */
1942 tb_invalidate_phys_page(address, pc, puc);
1943 #ifdef DEBUG_TB_CHECK
1944 tb_invalidate_check(address);
1945 #endif
1946 return 1;
1949 return 0;
1952 static inline void tlb_set_dirty(CPUState *env,
1953 unsigned long addr, target_ulong vaddr)
1956 #endif /* defined(CONFIG_USER_ONLY) */
1958 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1959 int memory);
1960 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1961 int orig_memory);
1962 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1963 need_subpage) \
1964 do { \
1965 if (addr > start_addr) \
1966 start_addr2 = 0; \
1967 else { \
1968 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1969 if (start_addr2 > 0) \
1970 need_subpage = 1; \
1973 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
1974 end_addr2 = TARGET_PAGE_SIZE - 1; \
1975 else { \
1976 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1977 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
1978 need_subpage = 1; \
1980 } while (0)
1982 /* register physical memory. 'size' must be a multiple of the target
1983 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1984 io memory page */
1985 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1986 unsigned long size,
1987 unsigned long phys_offset)
1989 target_phys_addr_t addr, end_addr;
1990 PhysPageDesc *p;
1991 CPUState *env;
1992 unsigned long orig_size = size;
1993 void *subpage;
1995 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1996 end_addr = start_addr + (target_phys_addr_t)size;
1997 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1998 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1999 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2000 unsigned long orig_memory = p->phys_offset;
2001 target_phys_addr_t start_addr2, end_addr2;
2002 int need_subpage = 0;
2004 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2005 need_subpage);
2006 if (need_subpage) {
2007 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2008 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2009 &p->phys_offset, orig_memory);
2010 } else {
2011 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2012 >> IO_MEM_SHIFT];
2014 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2015 } else {
2016 p->phys_offset = phys_offset;
2017 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2018 (phys_offset & IO_MEM_ROMD))
2019 phys_offset += TARGET_PAGE_SIZE;
2021 } else {
2022 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2023 p->phys_offset = phys_offset;
2024 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2025 (phys_offset & IO_MEM_ROMD))
2026 phys_offset += TARGET_PAGE_SIZE;
2027 else {
2028 target_phys_addr_t start_addr2, end_addr2;
2029 int need_subpage = 0;
2031 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2032 end_addr2, need_subpage);
2034 if (need_subpage) {
2035 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2036 &p->phys_offset, IO_MEM_UNASSIGNED);
2037 subpage_register(subpage, start_addr2, end_addr2,
2038 phys_offset);
2044 /* since each CPU stores ram addresses in its TLB cache, we must
2045 reset the modified entries */
2046 /* XXX: slow ! */
2047 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2048 tlb_flush(env, 1);
2052 /* XXX: temporary until new memory mapping API */
2053 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2055 PhysPageDesc *p;
2057 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2058 if (!p)
2059 return IO_MEM_UNASSIGNED;
2060 return p->phys_offset;
2063 /* XXX: better than nothing */
2064 ram_addr_t qemu_ram_alloc(unsigned int size)
2066 ram_addr_t addr;
2067 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2068 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2069 size, phys_ram_size);
2070 abort();
2072 addr = phys_ram_alloc_offset;
2073 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2074 return addr;
2077 void qemu_ram_free(ram_addr_t addr)
2081 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2083 #ifdef DEBUG_UNASSIGNED
2084 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2085 #endif
2086 #ifdef TARGET_SPARC
2087 do_unassigned_access(addr, 0, 0, 0);
2088 #elif TARGET_CRIS
2089 do_unassigned_access(addr, 0, 0, 0);
2090 #endif
2091 return 0;
2094 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2096 #ifdef DEBUG_UNASSIGNED
2097 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2098 #endif
2099 #ifdef TARGET_SPARC
2100 do_unassigned_access(addr, 1, 0, 0);
2101 #elif TARGET_CRIS
2102 do_unassigned_access(addr, 1, 0, 0);
2103 #endif
2106 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2107 unassigned_mem_readb,
2108 unassigned_mem_readb,
2109 unassigned_mem_readb,
2112 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2113 unassigned_mem_writeb,
2114 unassigned_mem_writeb,
2115 unassigned_mem_writeb,
2118 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2120 unsigned long ram_addr;
2121 int dirty_flags;
2122 ram_addr = addr - (unsigned long)phys_ram_base;
2123 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2124 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2125 #if !defined(CONFIG_USER_ONLY)
2126 tb_invalidate_phys_page_fast(ram_addr, 1);
2127 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2128 #endif
2130 stb_p((uint8_t *)(long)addr, val);
2131 #ifdef USE_KQEMU
2132 if (cpu_single_env->kqemu_enabled &&
2133 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2134 kqemu_modify_page(cpu_single_env, ram_addr);
2135 #endif
2136 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2137 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2138 /* we remove the notdirty callback only if the code has been
2139 flushed */
2140 if (dirty_flags == 0xff)
2141 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2144 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2146 unsigned long ram_addr;
2147 int dirty_flags;
2148 ram_addr = addr - (unsigned long)phys_ram_base;
2149 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2150 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2151 #if !defined(CONFIG_USER_ONLY)
2152 tb_invalidate_phys_page_fast(ram_addr, 2);
2153 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2154 #endif
2156 stw_p((uint8_t *)(long)addr, val);
2157 #ifdef USE_KQEMU
2158 if (cpu_single_env->kqemu_enabled &&
2159 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2160 kqemu_modify_page(cpu_single_env, ram_addr);
2161 #endif
2162 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2163 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2164 /* we remove the notdirty callback only if the code has been
2165 flushed */
2166 if (dirty_flags == 0xff)
2167 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2170 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2172 unsigned long ram_addr;
2173 int dirty_flags;
2174 ram_addr = addr - (unsigned long)phys_ram_base;
2175 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2176 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2177 #if !defined(CONFIG_USER_ONLY)
2178 tb_invalidate_phys_page_fast(ram_addr, 4);
2179 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2180 #endif
2182 stl_p((uint8_t *)(long)addr, val);
2183 #ifdef USE_KQEMU
2184 if (cpu_single_env->kqemu_enabled &&
2185 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2186 kqemu_modify_page(cpu_single_env, ram_addr);
2187 #endif
2188 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2189 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2190 /* we remove the notdirty callback only if the code has been
2191 flushed */
2192 if (dirty_flags == 0xff)
2193 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2196 static CPUReadMemoryFunc *error_mem_read[3] = {
2197 NULL, /* never used */
2198 NULL, /* never used */
2199 NULL, /* never used */
2202 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2203 notdirty_mem_writeb,
2204 notdirty_mem_writew,
2205 notdirty_mem_writel,
2208 #if defined(CONFIG_SOFTMMU)
2209 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2210 so these check for a hit then pass through to the normal out-of-line
2211 phys routines. */
2212 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2214 return ldub_phys(addr);
2217 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2219 return lduw_phys(addr);
2222 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2224 return ldl_phys(addr);
2227 /* Generate a debug exception if a watchpoint has been hit.
2228 Returns the real physical address of the access. addr will be a host
2229 address in case of a RAM location. */
2230 static target_ulong check_watchpoint(target_phys_addr_t addr)
2232 CPUState *env = cpu_single_env;
2233 target_ulong watch;
2234 target_ulong retaddr;
2235 int i;
2237 retaddr = addr;
2238 for (i = 0; i < env->nb_watchpoints; i++) {
2239 watch = env->watchpoint[i].vaddr;
2240 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2241 retaddr = addr - env->watchpoint[i].addend;
2242 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2243 cpu_single_env->watchpoint_hit = i + 1;
2244 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2245 break;
2249 return retaddr;
2252 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2253 uint32_t val)
2255 addr = check_watchpoint(addr);
2256 stb_phys(addr, val);
2259 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2260 uint32_t val)
2262 addr = check_watchpoint(addr);
2263 stw_phys(addr, val);
2266 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2267 uint32_t val)
2269 addr = check_watchpoint(addr);
2270 stl_phys(addr, val);
2273 static CPUReadMemoryFunc *watch_mem_read[3] = {
2274 watch_mem_readb,
2275 watch_mem_readw,
2276 watch_mem_readl,
2279 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2280 watch_mem_writeb,
2281 watch_mem_writew,
2282 watch_mem_writel,
2284 #endif
2286 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2287 unsigned int len)
2289 CPUReadMemoryFunc **mem_read;
2290 uint32_t ret;
2291 unsigned int idx;
2293 idx = SUBPAGE_IDX(addr - mmio->base);
2294 #if defined(DEBUG_SUBPAGE)
2295 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2296 mmio, len, addr, idx);
2297 #endif
2298 mem_read = mmio->mem_read[idx];
2299 ret = (*mem_read[len])(mmio->opaque[idx], addr);
2301 return ret;
2304 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2305 uint32_t value, unsigned int len)
2307 CPUWriteMemoryFunc **mem_write;
2308 unsigned int idx;
2310 idx = SUBPAGE_IDX(addr - mmio->base);
2311 #if defined(DEBUG_SUBPAGE)
2312 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2313 mmio, len, addr, idx, value);
2314 #endif
2315 mem_write = mmio->mem_write[idx];
2316 (*mem_write[len])(mmio->opaque[idx], addr, value);
2319 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2321 #if defined(DEBUG_SUBPAGE)
2322 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2323 #endif
2325 return subpage_readlen(opaque, addr, 0);
2328 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2329 uint32_t value)
2331 #if defined(DEBUG_SUBPAGE)
2332 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2333 #endif
2334 subpage_writelen(opaque, addr, value, 0);
2337 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2339 #if defined(DEBUG_SUBPAGE)
2340 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2341 #endif
2343 return subpage_readlen(opaque, addr, 1);
2346 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2347 uint32_t value)
2349 #if defined(DEBUG_SUBPAGE)
2350 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2351 #endif
2352 subpage_writelen(opaque, addr, value, 1);
2355 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2357 #if defined(DEBUG_SUBPAGE)
2358 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2359 #endif
2361 return subpage_readlen(opaque, addr, 2);
2364 static void subpage_writel (void *opaque,
2365 target_phys_addr_t addr, uint32_t value)
2367 #if defined(DEBUG_SUBPAGE)
2368 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2369 #endif
2370 subpage_writelen(opaque, addr, value, 2);
2373 static CPUReadMemoryFunc *subpage_read[] = {
2374 &subpage_readb,
2375 &subpage_readw,
2376 &subpage_readl,
2379 static CPUWriteMemoryFunc *subpage_write[] = {
2380 &subpage_writeb,
2381 &subpage_writew,
2382 &subpage_writel,
2385 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2386 int memory)
2388 int idx, eidx;
2390 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2391 return -1;
2392 idx = SUBPAGE_IDX(start);
2393 eidx = SUBPAGE_IDX(end);
2394 #if defined(DEBUG_SUBPAGE)
2395 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2396 mmio, start, end, idx, eidx, memory);
2397 #endif
2398 memory >>= IO_MEM_SHIFT;
2399 for (; idx <= eidx; idx++) {
2400 mmio->mem_read[idx] = io_mem_read[memory];
2401 mmio->mem_write[idx] = io_mem_write[memory];
2402 mmio->opaque[idx] = io_mem_opaque[memory];
2405 return 0;
2408 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2409 int orig_memory)
2411 subpage_t *mmio;
2412 int subpage_memory;
2414 mmio = qemu_mallocz(sizeof(subpage_t));
2415 if (mmio != NULL) {
2416 mmio->base = base;
2417 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2418 #if defined(DEBUG_SUBPAGE)
2419 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2420 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2421 #endif
2422 *phys = subpage_memory | IO_MEM_SUBPAGE;
2423 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2426 return mmio;
2429 static void io_mem_init(void)
2431 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2432 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2433 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2434 io_mem_nb = 5;
2436 #if defined(CONFIG_SOFTMMU)
2437 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2438 watch_mem_write, NULL);
2439 #endif
2440 /* alloc dirty bits array */
2441 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2442 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2445 /* mem_read and mem_write are arrays of functions containing the
2446 function to access byte (index 0), word (index 1) and dword (index
2447 2). All functions must be supplied. If io_index is non zero, the
2448 corresponding io zone is modified. If it is zero, a new io zone is
2449 allocated. The return value can be used with
2450 cpu_register_physical_memory(). (-1) is returned if error. */
2451 int cpu_register_io_memory(int io_index,
2452 CPUReadMemoryFunc **mem_read,
2453 CPUWriteMemoryFunc **mem_write,
2454 void *opaque)
2456 int i;
2458 if (io_index <= 0) {
2459 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2460 return -1;
2461 io_index = io_mem_nb++;
2462 } else {
2463 if (io_index >= IO_MEM_NB_ENTRIES)
2464 return -1;
2467 for(i = 0;i < 3; i++) {
2468 io_mem_read[io_index][i] = mem_read[i];
2469 io_mem_write[io_index][i] = mem_write[i];
2471 io_mem_opaque[io_index] = opaque;
2472 return io_index << IO_MEM_SHIFT;
2475 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2477 return io_mem_write[io_index >> IO_MEM_SHIFT];
2480 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2482 return io_mem_read[io_index >> IO_MEM_SHIFT];
2485 /* physical memory access (slow version, mainly for debug) */
2486 #if defined(CONFIG_USER_ONLY)
2487 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2488 int len, int is_write)
2490 int l, flags;
2491 target_ulong page;
2492 void * p;
2494 while (len > 0) {
2495 page = addr & TARGET_PAGE_MASK;
2496 l = (page + TARGET_PAGE_SIZE) - addr;
2497 if (l > len)
2498 l = len;
2499 flags = page_get_flags(page);
2500 if (!(flags & PAGE_VALID))
2501 return;
2502 if (is_write) {
2503 if (!(flags & PAGE_WRITE))
2504 return;
2505 /* XXX: this code should not depend on lock_user */
2506 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2507 /* FIXME - should this return an error rather than just fail? */
2508 return;
2509 memcpy(p, buf, len);
2510 unlock_user(p, addr, len);
2511 } else {
2512 if (!(flags & PAGE_READ))
2513 return;
2514 /* XXX: this code should not depend on lock_user */
2515 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2516 /* FIXME - should this return an error rather than just fail? */
2517 return;
2518 memcpy(buf, p, len);
2519 unlock_user(p, addr, 0);
2521 len -= l;
2522 buf += l;
2523 addr += l;
2527 #else
2528 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2529 int len, int is_write)
2531 int l, io_index;
2532 uint8_t *ptr;
2533 uint32_t val;
2534 target_phys_addr_t page;
2535 unsigned long pd;
2536 PhysPageDesc *p;
2538 while (len > 0) {
2539 page = addr & TARGET_PAGE_MASK;
2540 l = (page + TARGET_PAGE_SIZE) - addr;
2541 if (l > len)
2542 l = len;
2543 p = phys_page_find(page >> TARGET_PAGE_BITS);
2544 if (!p) {
2545 pd = IO_MEM_UNASSIGNED;
2546 } else {
2547 pd = p->phys_offset;
2550 if (is_write) {
2551 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2552 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2553 /* XXX: could force cpu_single_env to NULL to avoid
2554 potential bugs */
2555 if (l >= 4 && ((addr & 3) == 0)) {
2556 /* 32 bit write access */
2557 val = ldl_p(buf);
2558 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2559 l = 4;
2560 } else if (l >= 2 && ((addr & 1) == 0)) {
2561 /* 16 bit write access */
2562 val = lduw_p(buf);
2563 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2564 l = 2;
2565 } else {
2566 /* 8 bit write access */
2567 val = ldub_p(buf);
2568 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2569 l = 1;
2571 } else {
2572 unsigned long addr1;
2573 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2574 /* RAM case */
2575 ptr = phys_ram_base + addr1;
2576 memcpy(ptr, buf, l);
2577 if (!cpu_physical_memory_is_dirty(addr1)) {
2578 /* invalidate code */
2579 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2580 /* set dirty bit */
2581 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2582 (0xff & ~CODE_DIRTY_FLAG);
2585 } else {
2586 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2587 !(pd & IO_MEM_ROMD)) {
2588 /* I/O case */
2589 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2590 if (l >= 4 && ((addr & 3) == 0)) {
2591 /* 32 bit read access */
2592 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2593 stl_p(buf, val);
2594 l = 4;
2595 } else if (l >= 2 && ((addr & 1) == 0)) {
2596 /* 16 bit read access */
2597 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2598 stw_p(buf, val);
2599 l = 2;
2600 } else {
2601 /* 8 bit read access */
2602 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2603 stb_p(buf, val);
2604 l = 1;
2606 } else {
2607 /* RAM case */
2608 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2609 (addr & ~TARGET_PAGE_MASK);
2610 memcpy(buf, ptr, l);
2613 len -= l;
2614 buf += l;
2615 addr += l;
2619 /* used for ROM loading : can write in RAM and ROM */
2620 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2621 const uint8_t *buf, int len)
2623 int l;
2624 uint8_t *ptr;
2625 target_phys_addr_t page;
2626 unsigned long pd;
2627 PhysPageDesc *p;
2629 while (len > 0) {
2630 page = addr & TARGET_PAGE_MASK;
2631 l = (page + TARGET_PAGE_SIZE) - addr;
2632 if (l > len)
2633 l = len;
2634 p = phys_page_find(page >> TARGET_PAGE_BITS);
2635 if (!p) {
2636 pd = IO_MEM_UNASSIGNED;
2637 } else {
2638 pd = p->phys_offset;
2641 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2642 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2643 !(pd & IO_MEM_ROMD)) {
2644 /* do nothing */
2645 } else {
2646 unsigned long addr1;
2647 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2648 /* ROM/RAM case */
2649 ptr = phys_ram_base + addr1;
2650 memcpy(ptr, buf, l);
2652 len -= l;
2653 buf += l;
2654 addr += l;
2659 /* warning: addr must be aligned */
2660 uint32_t ldl_phys(target_phys_addr_t addr)
2662 int io_index;
2663 uint8_t *ptr;
2664 uint32_t val;
2665 unsigned long pd;
2666 PhysPageDesc *p;
2668 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2669 if (!p) {
2670 pd = IO_MEM_UNASSIGNED;
2671 } else {
2672 pd = p->phys_offset;
2675 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2676 !(pd & IO_MEM_ROMD)) {
2677 /* I/O case */
2678 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2679 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2680 } else {
2681 /* RAM case */
2682 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2683 (addr & ~TARGET_PAGE_MASK);
2684 val = ldl_p(ptr);
2686 return val;
2689 /* warning: addr must be aligned */
2690 uint64_t ldq_phys(target_phys_addr_t addr)
2692 int io_index;
2693 uint8_t *ptr;
2694 uint64_t val;
2695 unsigned long pd;
2696 PhysPageDesc *p;
2698 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2699 if (!p) {
2700 pd = IO_MEM_UNASSIGNED;
2701 } else {
2702 pd = p->phys_offset;
2705 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2706 !(pd & IO_MEM_ROMD)) {
2707 /* I/O case */
2708 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2709 #ifdef TARGET_WORDS_BIGENDIAN
2710 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2711 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2712 #else
2713 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2714 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2715 #endif
2716 } else {
2717 /* RAM case */
2718 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2719 (addr & ~TARGET_PAGE_MASK);
2720 val = ldq_p(ptr);
2722 return val;
2725 /* XXX: optimize */
2726 uint32_t ldub_phys(target_phys_addr_t addr)
2728 uint8_t val;
2729 cpu_physical_memory_read(addr, &val, 1);
2730 return val;
2733 /* XXX: optimize */
2734 uint32_t lduw_phys(target_phys_addr_t addr)
2736 uint16_t val;
2737 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2738 return tswap16(val);
2741 /* warning: addr must be aligned. The ram page is not masked as dirty
2742 and the code inside is not invalidated. It is useful if the dirty
2743 bits are used to track modified PTEs */
2744 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2746 int io_index;
2747 uint8_t *ptr;
2748 unsigned long pd;
2749 PhysPageDesc *p;
2751 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2752 if (!p) {
2753 pd = IO_MEM_UNASSIGNED;
2754 } else {
2755 pd = p->phys_offset;
2758 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2759 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2760 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2761 } else {
2762 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2763 (addr & ~TARGET_PAGE_MASK);
2764 stl_p(ptr, val);
2768 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2770 int io_index;
2771 uint8_t *ptr;
2772 unsigned long pd;
2773 PhysPageDesc *p;
2775 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2776 if (!p) {
2777 pd = IO_MEM_UNASSIGNED;
2778 } else {
2779 pd = p->phys_offset;
2782 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2783 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2784 #ifdef TARGET_WORDS_BIGENDIAN
2785 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2786 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2787 #else
2788 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2789 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2790 #endif
2791 } else {
2792 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2793 (addr & ~TARGET_PAGE_MASK);
2794 stq_p(ptr, val);
2798 /* warning: addr must be aligned */
2799 void stl_phys(target_phys_addr_t addr, uint32_t val)
2801 int io_index;
2802 uint8_t *ptr;
2803 unsigned long pd;
2804 PhysPageDesc *p;
2806 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2807 if (!p) {
2808 pd = IO_MEM_UNASSIGNED;
2809 } else {
2810 pd = p->phys_offset;
2813 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2814 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2815 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2816 } else {
2817 unsigned long addr1;
2818 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2819 /* RAM case */
2820 ptr = phys_ram_base + addr1;
2821 stl_p(ptr, val);
2822 if (!cpu_physical_memory_is_dirty(addr1)) {
2823 /* invalidate code */
2824 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2825 /* set dirty bit */
2826 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2827 (0xff & ~CODE_DIRTY_FLAG);
2832 /* XXX: optimize */
2833 void stb_phys(target_phys_addr_t addr, uint32_t val)
2835 uint8_t v = val;
2836 cpu_physical_memory_write(addr, &v, 1);
2839 /* XXX: optimize */
2840 void stw_phys(target_phys_addr_t addr, uint32_t val)
2842 uint16_t v = tswap16(val);
2843 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2846 /* XXX: optimize */
2847 void stq_phys(target_phys_addr_t addr, uint64_t val)
2849 val = tswap64(val);
2850 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2853 #endif
2855 /* virtual memory access for debug */
2856 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2857 uint8_t *buf, int len, int is_write)
2859 int l;
2860 target_phys_addr_t phys_addr;
2861 target_ulong page;
2863 while (len > 0) {
2864 page = addr & TARGET_PAGE_MASK;
2865 phys_addr = cpu_get_phys_page_debug(env, page);
2866 /* if no physical page mapped, return an error */
2867 if (phys_addr == -1)
2868 return -1;
2869 l = (page + TARGET_PAGE_SIZE) - addr;
2870 if (l > len)
2871 l = len;
2872 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2873 buf, l, is_write);
2874 len -= l;
2875 buf += l;
2876 addr += l;
2878 return 0;
2881 void dump_exec_info(FILE *f,
2882 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2884 int i, target_code_size, max_target_code_size;
2885 int direct_jmp_count, direct_jmp2_count, cross_page;
2886 TranslationBlock *tb;
2888 target_code_size = 0;
2889 max_target_code_size = 0;
2890 cross_page = 0;
2891 direct_jmp_count = 0;
2892 direct_jmp2_count = 0;
2893 for(i = 0; i < nb_tbs; i++) {
2894 tb = &tbs[i];
2895 target_code_size += tb->size;
2896 if (tb->size > max_target_code_size)
2897 max_target_code_size = tb->size;
2898 if (tb->page_addr[1] != -1)
2899 cross_page++;
2900 if (tb->tb_next_offset[0] != 0xffff) {
2901 direct_jmp_count++;
2902 if (tb->tb_next_offset[1] != 0xffff) {
2903 direct_jmp2_count++;
2907 /* XXX: avoid using doubles ? */
2908 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2909 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2910 nb_tbs ? target_code_size / nb_tbs : 0,
2911 max_target_code_size);
2912 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2913 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2914 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2915 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2916 cross_page,
2917 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2918 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2919 direct_jmp_count,
2920 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2921 direct_jmp2_count,
2922 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2923 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2924 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2925 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2928 #if !defined(CONFIG_USER_ONLY)
2930 #define MMUSUFFIX _cmmu
2931 #define GETPC() NULL
2932 #define env cpu_single_env
2933 #define SOFTMMU_CODE_ACCESS
2935 #define SHIFT 0
2936 #include "softmmu_template.h"
2938 #define SHIFT 1
2939 #include "softmmu_template.h"
2941 #define SHIFT 2
2942 #include "softmmu_template.h"
2944 #define SHIFT 3
2945 #include "softmmu_template.h"
2947 #undef env
2949 #endif