Change -drive parsing so that paths don't have to be double-escaped (Laurent Vivier...
[qemu/qemu_0_9_1_stable.git] / exec.c
blobab3f9c117d5082ea6e48b8bcb137674b09e8028e
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include <qemu.h>
40 #endif
42 //#define DEBUG_TB_INVALIDATE
43 //#define DEBUG_FLUSH
44 //#define DEBUG_TLB
45 //#define DEBUG_UNASSIGNED
47 /* make various TB consistency checks */
48 //#define DEBUG_TB_CHECK
49 //#define DEBUG_TLB_CHECK
51 //#define DEBUG_IOPORT
52 //#define DEBUG_SUBPAGE
54 #if !defined(CONFIG_USER_ONLY)
55 /* TB consistency checks only implemented for usermode emulation. */
56 #undef DEBUG_TB_CHECK
57 #endif
59 /* threshold to flush the translated code buffer */
60 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
62 #define SMC_BITMAP_USE_THRESHOLD 10
64 #define MMAP_AREA_START 0x00000000
65 #define MMAP_AREA_END 0xa8000000
67 #if defined(TARGET_SPARC64)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 41
69 #elif defined(TARGET_SPARC)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 36
71 #elif defined(TARGET_ALPHA)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 42
73 #define TARGET_VIRT_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_PPC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #else
77 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
78 #define TARGET_PHYS_ADDR_SPACE_BITS 32
79 #endif
81 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
82 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
83 int nb_tbs;
84 /* any access to the tbs or the page table must use this lock */
85 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
88 uint8_t *code_gen_ptr;
90 int phys_ram_size;
91 int phys_ram_fd;
92 uint8_t *phys_ram_base;
93 uint8_t *phys_ram_dirty;
94 static ram_addr_t phys_ram_alloc_offset = 0;
96 CPUState *first_cpu;
97 /* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
99 CPUState *cpu_single_env;
101 typedef struct PageDesc {
102 /* list of TBs intersecting this ram page */
103 TranslationBlock *first_tb;
104 /* in order to optimize self modifying code, we count the number
105 of lookups we do to a given page to use a bitmap */
106 unsigned int code_write_count;
107 uint8_t *code_bitmap;
108 #if defined(CONFIG_USER_ONLY)
109 unsigned long flags;
110 #endif
111 } PageDesc;
113 typedef struct PhysPageDesc {
114 /* offset in host memory of the page + io_index in the low 12 bits */
115 uint32_t phys_offset;
116 } PhysPageDesc;
118 #define L2_BITS 10
119 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
120 /* XXX: this is a temporary hack for alpha target.
121 * In the future, this is to be replaced by a multi-level table
122 * to actually be able to handle the complete 64 bits address space.
124 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
125 #else
126 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
127 #endif
129 #define L1_SIZE (1 << L1_BITS)
130 #define L2_SIZE (1 << L2_BITS)
132 static void io_mem_init(void);
134 unsigned long qemu_real_host_page_size;
135 unsigned long qemu_host_page_bits;
136 unsigned long qemu_host_page_size;
137 unsigned long qemu_host_page_mask;
139 /* XXX: for system emulation, it could just be an array */
140 static PageDesc *l1_map[L1_SIZE];
141 PhysPageDesc **l1_phys_map;
143 /* io memory support */
144 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
145 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
146 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
147 static int io_mem_nb;
148 #if defined(CONFIG_SOFTMMU)
149 static int io_mem_watch;
150 #endif
152 /* log support */
153 char *logfilename = "/tmp/qemu.log";
154 FILE *logfile;
155 int loglevel;
156 static int log_append = 0;
158 /* statistics */
159 static int tlb_flush_count;
160 static int tb_flush_count;
161 static int tb_phys_invalidate_count;
163 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
164 typedef struct subpage_t {
165 target_phys_addr_t base;
166 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
167 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
168 void *opaque[TARGET_PAGE_SIZE][2][4];
169 } subpage_t;
171 static void page_init(void)
173 /* NOTE: we can always suppose that qemu_host_page_size >=
174 TARGET_PAGE_SIZE */
175 #ifdef _WIN32
177 SYSTEM_INFO system_info;
178 DWORD old_protect;
180 GetSystemInfo(&system_info);
181 qemu_real_host_page_size = system_info.dwPageSize;
183 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
184 PAGE_EXECUTE_READWRITE, &old_protect);
186 #else
187 qemu_real_host_page_size = getpagesize();
189 unsigned long start, end;
191 start = (unsigned long)code_gen_buffer;
192 start &= ~(qemu_real_host_page_size - 1);
194 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
195 end += qemu_real_host_page_size - 1;
196 end &= ~(qemu_real_host_page_size - 1);
198 mprotect((void *)start, end - start,
199 PROT_READ | PROT_WRITE | PROT_EXEC);
201 #endif
203 if (qemu_host_page_size == 0)
204 qemu_host_page_size = qemu_real_host_page_size;
205 if (qemu_host_page_size < TARGET_PAGE_SIZE)
206 qemu_host_page_size = TARGET_PAGE_SIZE;
207 qemu_host_page_bits = 0;
208 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
209 qemu_host_page_bits++;
210 qemu_host_page_mask = ~(qemu_host_page_size - 1);
211 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
212 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
214 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
216 long long startaddr, endaddr;
217 FILE *f;
218 int n;
220 f = fopen("/proc/self/maps", "r");
221 if (f) {
222 do {
223 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
224 if (n == 2) {
225 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
226 TARGET_PAGE_ALIGN(endaddr),
227 PAGE_RESERVED);
229 } while (!feof(f));
230 fclose(f);
233 #endif
236 static inline PageDesc *page_find_alloc(unsigned int index)
238 PageDesc **lp, *p;
240 lp = &l1_map[index >> L2_BITS];
241 p = *lp;
242 if (!p) {
243 /* allocate if not found */
244 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
245 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
246 *lp = p;
248 return p + (index & (L2_SIZE - 1));
251 static inline PageDesc *page_find(unsigned int index)
253 PageDesc *p;
255 p = l1_map[index >> L2_BITS];
256 if (!p)
257 return 0;
258 return p + (index & (L2_SIZE - 1));
261 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
263 void **lp, **p;
264 PhysPageDesc *pd;
266 p = (void **)l1_phys_map;
267 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
269 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
270 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
271 #endif
272 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
273 p = *lp;
274 if (!p) {
275 /* allocate if not found */
276 if (!alloc)
277 return NULL;
278 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
279 memset(p, 0, sizeof(void *) * L1_SIZE);
280 *lp = p;
282 #endif
283 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
284 pd = *lp;
285 if (!pd) {
286 int i;
287 /* allocate if not found */
288 if (!alloc)
289 return NULL;
290 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
291 *lp = pd;
292 for (i = 0; i < L2_SIZE; i++)
293 pd[i].phys_offset = IO_MEM_UNASSIGNED;
295 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
298 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
300 return phys_page_find_alloc(index, 0);
303 #if !defined(CONFIG_USER_ONLY)
304 static void tlb_protect_code(ram_addr_t ram_addr);
305 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
306 target_ulong vaddr);
307 #endif
309 void cpu_exec_init(CPUState *env)
311 CPUState **penv;
312 int cpu_index;
314 if (!code_gen_ptr) {
315 code_gen_ptr = code_gen_buffer;
316 page_init();
317 io_mem_init();
319 env->next_cpu = NULL;
320 penv = &first_cpu;
321 cpu_index = 0;
322 while (*penv != NULL) {
323 penv = (CPUState **)&(*penv)->next_cpu;
324 cpu_index++;
326 env->cpu_index = cpu_index;
327 env->nb_watchpoints = 0;
328 *penv = env;
331 static inline void invalidate_page_bitmap(PageDesc *p)
333 if (p->code_bitmap) {
334 qemu_free(p->code_bitmap);
335 p->code_bitmap = NULL;
337 p->code_write_count = 0;
340 /* set to NULL all the 'first_tb' fields in all PageDescs */
341 static void page_flush_tb(void)
343 int i, j;
344 PageDesc *p;
346 for(i = 0; i < L1_SIZE; i++) {
347 p = l1_map[i];
348 if (p) {
349 for(j = 0; j < L2_SIZE; j++) {
350 p->first_tb = NULL;
351 invalidate_page_bitmap(p);
352 p++;
358 /* flush all the translation blocks */
359 /* XXX: tb_flush is currently not thread safe */
360 void tb_flush(CPUState *env1)
362 CPUState *env;
363 #if defined(DEBUG_FLUSH)
364 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
365 (unsigned long)(code_gen_ptr - code_gen_buffer),
366 nb_tbs, nb_tbs > 0 ?
367 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
368 #endif
369 nb_tbs = 0;
371 for(env = first_cpu; env != NULL; env = env->next_cpu) {
372 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
375 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
376 page_flush_tb();
378 code_gen_ptr = code_gen_buffer;
379 /* XXX: flush processor icache at this point if cache flush is
380 expensive */
381 tb_flush_count++;
384 #ifdef DEBUG_TB_CHECK
386 static void tb_invalidate_check(target_ulong address)
388 TranslationBlock *tb;
389 int i;
390 address &= TARGET_PAGE_MASK;
391 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
392 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
393 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
394 address >= tb->pc + tb->size)) {
395 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
396 address, (long)tb->pc, tb->size);
402 /* verify that all the pages have correct rights for code */
403 static void tb_page_check(void)
405 TranslationBlock *tb;
406 int i, flags1, flags2;
408 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
409 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
410 flags1 = page_get_flags(tb->pc);
411 flags2 = page_get_flags(tb->pc + tb->size - 1);
412 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
413 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
414 (long)tb->pc, tb->size, flags1, flags2);
420 void tb_jmp_check(TranslationBlock *tb)
422 TranslationBlock *tb1;
423 unsigned int n1;
425 /* suppress any remaining jumps to this TB */
426 tb1 = tb->jmp_first;
427 for(;;) {
428 n1 = (long)tb1 & 3;
429 tb1 = (TranslationBlock *)((long)tb1 & ~3);
430 if (n1 == 2)
431 break;
432 tb1 = tb1->jmp_next[n1];
434 /* check end of list */
435 if (tb1 != tb) {
436 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
440 #endif
442 /* invalidate one TB */
443 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
444 int next_offset)
446 TranslationBlock *tb1;
447 for(;;) {
448 tb1 = *ptb;
449 if (tb1 == tb) {
450 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
451 break;
453 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
457 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
459 TranslationBlock *tb1;
460 unsigned int n1;
462 for(;;) {
463 tb1 = *ptb;
464 n1 = (long)tb1 & 3;
465 tb1 = (TranslationBlock *)((long)tb1 & ~3);
466 if (tb1 == tb) {
467 *ptb = tb1->page_next[n1];
468 break;
470 ptb = &tb1->page_next[n1];
474 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
476 TranslationBlock *tb1, **ptb;
477 unsigned int n1;
479 ptb = &tb->jmp_next[n];
480 tb1 = *ptb;
481 if (tb1) {
482 /* find tb(n) in circular list */
483 for(;;) {
484 tb1 = *ptb;
485 n1 = (long)tb1 & 3;
486 tb1 = (TranslationBlock *)((long)tb1 & ~3);
487 if (n1 == n && tb1 == tb)
488 break;
489 if (n1 == 2) {
490 ptb = &tb1->jmp_first;
491 } else {
492 ptb = &tb1->jmp_next[n1];
495 /* now we can suppress tb(n) from the list */
496 *ptb = tb->jmp_next[n];
498 tb->jmp_next[n] = NULL;
502 /* reset the jump entry 'n' of a TB so that it is not chained to
503 another TB */
504 static inline void tb_reset_jump(TranslationBlock *tb, int n)
506 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
509 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
511 CPUState *env;
512 PageDesc *p;
513 unsigned int h, n1;
514 target_ulong phys_pc;
515 TranslationBlock *tb1, *tb2;
517 /* remove the TB from the hash list */
518 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
519 h = tb_phys_hash_func(phys_pc);
520 tb_remove(&tb_phys_hash[h], tb,
521 offsetof(TranslationBlock, phys_hash_next));
523 /* remove the TB from the page list */
524 if (tb->page_addr[0] != page_addr) {
525 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
526 tb_page_remove(&p->first_tb, tb);
527 invalidate_page_bitmap(p);
529 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
530 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
531 tb_page_remove(&p->first_tb, tb);
532 invalidate_page_bitmap(p);
535 tb_invalidated_flag = 1;
537 /* remove the TB from the hash list */
538 h = tb_jmp_cache_hash_func(tb->pc);
539 for(env = first_cpu; env != NULL; env = env->next_cpu) {
540 if (env->tb_jmp_cache[h] == tb)
541 env->tb_jmp_cache[h] = NULL;
544 /* suppress this TB from the two jump lists */
545 tb_jmp_remove(tb, 0);
546 tb_jmp_remove(tb, 1);
548 /* suppress any remaining jumps to this TB */
549 tb1 = tb->jmp_first;
550 for(;;) {
551 n1 = (long)tb1 & 3;
552 if (n1 == 2)
553 break;
554 tb1 = (TranslationBlock *)((long)tb1 & ~3);
555 tb2 = tb1->jmp_next[n1];
556 tb_reset_jump(tb1, n1);
557 tb1->jmp_next[n1] = NULL;
558 tb1 = tb2;
560 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
562 tb_phys_invalidate_count++;
565 static inline void set_bits(uint8_t *tab, int start, int len)
567 int end, mask, end1;
569 end = start + len;
570 tab += start >> 3;
571 mask = 0xff << (start & 7);
572 if ((start & ~7) == (end & ~7)) {
573 if (start < end) {
574 mask &= ~(0xff << (end & 7));
575 *tab |= mask;
577 } else {
578 *tab++ |= mask;
579 start = (start + 8) & ~7;
580 end1 = end & ~7;
581 while (start < end1) {
582 *tab++ = 0xff;
583 start += 8;
585 if (start < end) {
586 mask = ~(0xff << (end & 7));
587 *tab |= mask;
592 static void build_page_bitmap(PageDesc *p)
594 int n, tb_start, tb_end;
595 TranslationBlock *tb;
597 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
598 if (!p->code_bitmap)
599 return;
600 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
602 tb = p->first_tb;
603 while (tb != NULL) {
604 n = (long)tb & 3;
605 tb = (TranslationBlock *)((long)tb & ~3);
606 /* NOTE: this is subtle as a TB may span two physical pages */
607 if (n == 0) {
608 /* NOTE: tb_end may be after the end of the page, but
609 it is not a problem */
610 tb_start = tb->pc & ~TARGET_PAGE_MASK;
611 tb_end = tb_start + tb->size;
612 if (tb_end > TARGET_PAGE_SIZE)
613 tb_end = TARGET_PAGE_SIZE;
614 } else {
615 tb_start = 0;
616 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
618 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
619 tb = tb->page_next[n];
623 #ifdef TARGET_HAS_PRECISE_SMC
625 static void tb_gen_code(CPUState *env,
626 target_ulong pc, target_ulong cs_base, int flags,
627 int cflags)
629 TranslationBlock *tb;
630 uint8_t *tc_ptr;
631 target_ulong phys_pc, phys_page2, virt_page2;
632 int code_gen_size;
634 phys_pc = get_phys_addr_code(env, pc);
635 tb = tb_alloc(pc);
636 if (!tb) {
637 /* flush must be done */
638 tb_flush(env);
639 /* cannot fail at this point */
640 tb = tb_alloc(pc);
642 tc_ptr = code_gen_ptr;
643 tb->tc_ptr = tc_ptr;
644 tb->cs_base = cs_base;
645 tb->flags = flags;
646 tb->cflags = cflags;
647 cpu_gen_code(env, tb, &code_gen_size);
648 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
650 /* check next page if needed */
651 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
652 phys_page2 = -1;
653 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
654 phys_page2 = get_phys_addr_code(env, virt_page2);
656 tb_link_phys(tb, phys_pc, phys_page2);
658 #endif
660 /* invalidate all TBs which intersect with the target physical page
661 starting in range [start;end[. NOTE: start and end must refer to
662 the same physical page. 'is_cpu_write_access' should be true if called
663 from a real cpu write access: the virtual CPU will exit the current
664 TB if code is modified inside this TB. */
665 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
666 int is_cpu_write_access)
668 int n, current_tb_modified, current_tb_not_found, current_flags;
669 CPUState *env = cpu_single_env;
670 PageDesc *p;
671 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
672 target_ulong tb_start, tb_end;
673 target_ulong current_pc, current_cs_base;
675 p = page_find(start >> TARGET_PAGE_BITS);
676 if (!p)
677 return;
678 if (!p->code_bitmap &&
679 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
680 is_cpu_write_access) {
681 /* build code bitmap */
682 build_page_bitmap(p);
685 /* we remove all the TBs in the range [start, end[ */
686 /* XXX: see if in some cases it could be faster to invalidate all the code */
687 current_tb_not_found = is_cpu_write_access;
688 current_tb_modified = 0;
689 current_tb = NULL; /* avoid warning */
690 current_pc = 0; /* avoid warning */
691 current_cs_base = 0; /* avoid warning */
692 current_flags = 0; /* avoid warning */
693 tb = p->first_tb;
694 while (tb != NULL) {
695 n = (long)tb & 3;
696 tb = (TranslationBlock *)((long)tb & ~3);
697 tb_next = tb->page_next[n];
698 /* NOTE: this is subtle as a TB may span two physical pages */
699 if (n == 0) {
700 /* NOTE: tb_end may be after the end of the page, but
701 it is not a problem */
702 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
703 tb_end = tb_start + tb->size;
704 } else {
705 tb_start = tb->page_addr[1];
706 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
708 if (!(tb_end <= start || tb_start >= end)) {
709 #ifdef TARGET_HAS_PRECISE_SMC
710 if (current_tb_not_found) {
711 current_tb_not_found = 0;
712 current_tb = NULL;
713 if (env->mem_write_pc) {
714 /* now we have a real cpu fault */
715 current_tb = tb_find_pc(env->mem_write_pc);
718 if (current_tb == tb &&
719 !(current_tb->cflags & CF_SINGLE_INSN)) {
720 /* If we are modifying the current TB, we must stop
721 its execution. We could be more precise by checking
722 that the modification is after the current PC, but it
723 would require a specialized function to partially
724 restore the CPU state */
726 current_tb_modified = 1;
727 cpu_restore_state(current_tb, env,
728 env->mem_write_pc, NULL);
729 #if defined(TARGET_I386)
730 current_flags = env->hflags;
731 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
732 current_cs_base = (target_ulong)env->segs[R_CS].base;
733 current_pc = current_cs_base + env->eip;
734 #else
735 #error unsupported CPU
736 #endif
738 #endif /* TARGET_HAS_PRECISE_SMC */
739 /* we need to do that to handle the case where a signal
740 occurs while doing tb_phys_invalidate() */
741 saved_tb = NULL;
742 if (env) {
743 saved_tb = env->current_tb;
744 env->current_tb = NULL;
746 tb_phys_invalidate(tb, -1);
747 if (env) {
748 env->current_tb = saved_tb;
749 if (env->interrupt_request && env->current_tb)
750 cpu_interrupt(env, env->interrupt_request);
753 tb = tb_next;
755 #if !defined(CONFIG_USER_ONLY)
756 /* if no code remaining, no need to continue to use slow writes */
757 if (!p->first_tb) {
758 invalidate_page_bitmap(p);
759 if (is_cpu_write_access) {
760 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
763 #endif
764 #ifdef TARGET_HAS_PRECISE_SMC
765 if (current_tb_modified) {
766 /* we generate a block containing just the instruction
767 modifying the memory. It will ensure that it cannot modify
768 itself */
769 env->current_tb = NULL;
770 tb_gen_code(env, current_pc, current_cs_base, current_flags,
771 CF_SINGLE_INSN);
772 cpu_resume_from_signal(env, NULL);
774 #endif
777 /* len must be <= 8 and start must be a multiple of len */
778 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
780 PageDesc *p;
781 int offset, b;
782 #if 0
783 if (1) {
784 if (loglevel) {
785 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
786 cpu_single_env->mem_write_vaddr, len,
787 cpu_single_env->eip,
788 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
791 #endif
792 p = page_find(start >> TARGET_PAGE_BITS);
793 if (!p)
794 return;
795 if (p->code_bitmap) {
796 offset = start & ~TARGET_PAGE_MASK;
797 b = p->code_bitmap[offset >> 3] >> (offset & 7);
798 if (b & ((1 << len) - 1))
799 goto do_invalidate;
800 } else {
801 do_invalidate:
802 tb_invalidate_phys_page_range(start, start + len, 1);
806 #if !defined(CONFIG_SOFTMMU)
807 static void tb_invalidate_phys_page(target_ulong addr,
808 unsigned long pc, void *puc)
810 int n, current_flags, current_tb_modified;
811 target_ulong current_pc, current_cs_base;
812 PageDesc *p;
813 TranslationBlock *tb, *current_tb;
814 #ifdef TARGET_HAS_PRECISE_SMC
815 CPUState *env = cpu_single_env;
816 #endif
818 addr &= TARGET_PAGE_MASK;
819 p = page_find(addr >> TARGET_PAGE_BITS);
820 if (!p)
821 return;
822 tb = p->first_tb;
823 current_tb_modified = 0;
824 current_tb = NULL;
825 current_pc = 0; /* avoid warning */
826 current_cs_base = 0; /* avoid warning */
827 current_flags = 0; /* avoid warning */
828 #ifdef TARGET_HAS_PRECISE_SMC
829 if (tb && pc != 0) {
830 current_tb = tb_find_pc(pc);
832 #endif
833 while (tb != NULL) {
834 n = (long)tb & 3;
835 tb = (TranslationBlock *)((long)tb & ~3);
836 #ifdef TARGET_HAS_PRECISE_SMC
837 if (current_tb == tb &&
838 !(current_tb->cflags & CF_SINGLE_INSN)) {
839 /* If we are modifying the current TB, we must stop
840 its execution. We could be more precise by checking
841 that the modification is after the current PC, but it
842 would require a specialized function to partially
843 restore the CPU state */
845 current_tb_modified = 1;
846 cpu_restore_state(current_tb, env, pc, puc);
847 #if defined(TARGET_I386)
848 current_flags = env->hflags;
849 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
850 current_cs_base = (target_ulong)env->segs[R_CS].base;
851 current_pc = current_cs_base + env->eip;
852 #else
853 #error unsupported CPU
854 #endif
856 #endif /* TARGET_HAS_PRECISE_SMC */
857 tb_phys_invalidate(tb, addr);
858 tb = tb->page_next[n];
860 p->first_tb = NULL;
861 #ifdef TARGET_HAS_PRECISE_SMC
862 if (current_tb_modified) {
863 /* we generate a block containing just the instruction
864 modifying the memory. It will ensure that it cannot modify
865 itself */
866 env->current_tb = NULL;
867 tb_gen_code(env, current_pc, current_cs_base, current_flags,
868 CF_SINGLE_INSN);
869 cpu_resume_from_signal(env, puc);
871 #endif
873 #endif
875 /* add the tb in the target page and protect it if necessary */
876 static inline void tb_alloc_page(TranslationBlock *tb,
877 unsigned int n, target_ulong page_addr)
879 PageDesc *p;
880 TranslationBlock *last_first_tb;
882 tb->page_addr[n] = page_addr;
883 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
884 tb->page_next[n] = p->first_tb;
885 last_first_tb = p->first_tb;
886 p->first_tb = (TranslationBlock *)((long)tb | n);
887 invalidate_page_bitmap(p);
889 #if defined(TARGET_HAS_SMC) || 1
891 #if defined(CONFIG_USER_ONLY)
892 if (p->flags & PAGE_WRITE) {
893 target_ulong addr;
894 PageDesc *p2;
895 int prot;
897 /* force the host page as non writable (writes will have a
898 page fault + mprotect overhead) */
899 page_addr &= qemu_host_page_mask;
900 prot = 0;
901 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
902 addr += TARGET_PAGE_SIZE) {
904 p2 = page_find (addr >> TARGET_PAGE_BITS);
905 if (!p2)
906 continue;
907 prot |= p2->flags;
908 p2->flags &= ~PAGE_WRITE;
909 page_get_flags(addr);
911 mprotect(g2h(page_addr), qemu_host_page_size,
912 (prot & PAGE_BITS) & ~PAGE_WRITE);
913 #ifdef DEBUG_TB_INVALIDATE
914 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
915 page_addr);
916 #endif
918 #else
919 /* if some code is already present, then the pages are already
920 protected. So we handle the case where only the first TB is
921 allocated in a physical page */
922 if (!last_first_tb) {
923 tlb_protect_code(page_addr);
925 #endif
927 #endif /* TARGET_HAS_SMC */
930 /* Allocate a new translation block. Flush the translation buffer if
931 too many translation blocks or too much generated code. */
932 TranslationBlock *tb_alloc(target_ulong pc)
934 TranslationBlock *tb;
936 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
937 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
938 return NULL;
939 tb = &tbs[nb_tbs++];
940 tb->pc = pc;
941 tb->cflags = 0;
942 return tb;
945 /* add a new TB and link it to the physical page tables. phys_page2 is
946 (-1) to indicate that only one page contains the TB. */
947 void tb_link_phys(TranslationBlock *tb,
948 target_ulong phys_pc, target_ulong phys_page2)
950 unsigned int h;
951 TranslationBlock **ptb;
953 /* add in the physical hash table */
954 h = tb_phys_hash_func(phys_pc);
955 ptb = &tb_phys_hash[h];
956 tb->phys_hash_next = *ptb;
957 *ptb = tb;
959 /* add in the page list */
960 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
961 if (phys_page2 != -1)
962 tb_alloc_page(tb, 1, phys_page2);
963 else
964 tb->page_addr[1] = -1;
966 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
967 tb->jmp_next[0] = NULL;
968 tb->jmp_next[1] = NULL;
970 /* init original jump addresses */
971 if (tb->tb_next_offset[0] != 0xffff)
972 tb_reset_jump(tb, 0);
973 if (tb->tb_next_offset[1] != 0xffff)
974 tb_reset_jump(tb, 1);
976 #ifdef DEBUG_TB_CHECK
977 tb_page_check();
978 #endif
981 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
982 tb[1].tc_ptr. Return NULL if not found */
983 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
985 int m_min, m_max, m;
986 unsigned long v;
987 TranslationBlock *tb;
989 if (nb_tbs <= 0)
990 return NULL;
991 if (tc_ptr < (unsigned long)code_gen_buffer ||
992 tc_ptr >= (unsigned long)code_gen_ptr)
993 return NULL;
994 /* binary search (cf Knuth) */
995 m_min = 0;
996 m_max = nb_tbs - 1;
997 while (m_min <= m_max) {
998 m = (m_min + m_max) >> 1;
999 tb = &tbs[m];
1000 v = (unsigned long)tb->tc_ptr;
1001 if (v == tc_ptr)
1002 return tb;
1003 else if (tc_ptr < v) {
1004 m_max = m - 1;
1005 } else {
1006 m_min = m + 1;
1009 return &tbs[m_max];
1012 static void tb_reset_jump_recursive(TranslationBlock *tb);
1014 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1016 TranslationBlock *tb1, *tb_next, **ptb;
1017 unsigned int n1;
1019 tb1 = tb->jmp_next[n];
1020 if (tb1 != NULL) {
1021 /* find head of list */
1022 for(;;) {
1023 n1 = (long)tb1 & 3;
1024 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1025 if (n1 == 2)
1026 break;
1027 tb1 = tb1->jmp_next[n1];
1029 /* we are now sure now that tb jumps to tb1 */
1030 tb_next = tb1;
1032 /* remove tb from the jmp_first list */
1033 ptb = &tb_next->jmp_first;
1034 for(;;) {
1035 tb1 = *ptb;
1036 n1 = (long)tb1 & 3;
1037 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1038 if (n1 == n && tb1 == tb)
1039 break;
1040 ptb = &tb1->jmp_next[n1];
1042 *ptb = tb->jmp_next[n];
1043 tb->jmp_next[n] = NULL;
1045 /* suppress the jump to next tb in generated code */
1046 tb_reset_jump(tb, n);
1048 /* suppress jumps in the tb on which we could have jumped */
1049 tb_reset_jump_recursive(tb_next);
1053 static void tb_reset_jump_recursive(TranslationBlock *tb)
1055 tb_reset_jump_recursive2(tb, 0);
1056 tb_reset_jump_recursive2(tb, 1);
1059 #if defined(TARGET_HAS_ICE)
1060 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1062 target_phys_addr_t addr;
1063 target_ulong pd;
1064 ram_addr_t ram_addr;
1065 PhysPageDesc *p;
1067 addr = cpu_get_phys_page_debug(env, pc);
1068 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1069 if (!p) {
1070 pd = IO_MEM_UNASSIGNED;
1071 } else {
1072 pd = p->phys_offset;
1074 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1075 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1077 #endif
1079 /* Add a watchpoint. */
1080 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1082 int i;
1084 for (i = 0; i < env->nb_watchpoints; i++) {
1085 if (addr == env->watchpoint[i].vaddr)
1086 return 0;
1088 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1089 return -1;
1091 i = env->nb_watchpoints++;
1092 env->watchpoint[i].vaddr = addr;
1093 tlb_flush_page(env, addr);
1094 /* FIXME: This flush is needed because of the hack to make memory ops
1095 terminate the TB. It can be removed once the proper IO trap and
1096 re-execute bits are in. */
1097 tb_flush(env);
1098 return i;
1101 /* Remove a watchpoint. */
1102 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1104 int i;
1106 for (i = 0; i < env->nb_watchpoints; i++) {
1107 if (addr == env->watchpoint[i].vaddr) {
1108 env->nb_watchpoints--;
1109 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1110 tlb_flush_page(env, addr);
1111 return 0;
1114 return -1;
1117 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1118 breakpoint is reached */
1119 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1121 #if defined(TARGET_HAS_ICE)
1122 int i;
1124 for(i = 0; i < env->nb_breakpoints; i++) {
1125 if (env->breakpoints[i] == pc)
1126 return 0;
1129 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1130 return -1;
1131 env->breakpoints[env->nb_breakpoints++] = pc;
1133 breakpoint_invalidate(env, pc);
1134 return 0;
1135 #else
1136 return -1;
1137 #endif
1140 /* remove a breakpoint */
1141 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1143 #if defined(TARGET_HAS_ICE)
1144 int i;
1145 for(i = 0; i < env->nb_breakpoints; i++) {
1146 if (env->breakpoints[i] == pc)
1147 goto found;
1149 return -1;
1150 found:
1151 env->nb_breakpoints--;
1152 if (i < env->nb_breakpoints)
1153 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1155 breakpoint_invalidate(env, pc);
1156 return 0;
1157 #else
1158 return -1;
1159 #endif
1162 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1163 CPU loop after each instruction */
1164 void cpu_single_step(CPUState *env, int enabled)
1166 #if defined(TARGET_HAS_ICE)
1167 if (env->singlestep_enabled != enabled) {
1168 env->singlestep_enabled = enabled;
1169 /* must flush all the translated code to avoid inconsistancies */
1170 /* XXX: only flush what is necessary */
1171 tb_flush(env);
1173 #endif
1176 /* enable or disable low levels log */
1177 void cpu_set_log(int log_flags)
1179 loglevel = log_flags;
1180 if (loglevel && !logfile) {
1181 logfile = fopen(logfilename, log_append ? "a" : "w");
1182 if (!logfile) {
1183 perror(logfilename);
1184 _exit(1);
1186 #if !defined(CONFIG_SOFTMMU)
1187 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1189 static uint8_t logfile_buf[4096];
1190 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1192 #else
1193 setvbuf(logfile, NULL, _IOLBF, 0);
1194 #endif
1195 log_append = 1;
1197 if (!loglevel && logfile) {
1198 fclose(logfile);
1199 logfile = NULL;
1203 void cpu_set_log_filename(const char *filename)
1205 logfilename = strdup(filename);
1206 if (logfile) {
1207 fclose(logfile);
1208 logfile = NULL;
1210 cpu_set_log(loglevel);
1213 /* mask must never be zero, except for A20 change call */
1214 void cpu_interrupt(CPUState *env, int mask)
1216 TranslationBlock *tb;
1217 static int interrupt_lock;
1219 env->interrupt_request |= mask;
1220 /* if the cpu is currently executing code, we must unlink it and
1221 all the potentially executing TB */
1222 tb = env->current_tb;
1223 if (tb && !testandset(&interrupt_lock)) {
1224 env->current_tb = NULL;
1225 tb_reset_jump_recursive(tb);
1226 interrupt_lock = 0;
1230 void cpu_reset_interrupt(CPUState *env, int mask)
1232 env->interrupt_request &= ~mask;
1235 CPULogItem cpu_log_items[] = {
1236 { CPU_LOG_TB_OUT_ASM, "out_asm",
1237 "show generated host assembly code for each compiled TB" },
1238 { CPU_LOG_TB_IN_ASM, "in_asm",
1239 "show target assembly code for each compiled TB" },
1240 { CPU_LOG_TB_OP, "op",
1241 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1242 #ifdef TARGET_I386
1243 { CPU_LOG_TB_OP_OPT, "op_opt",
1244 "show micro ops after optimization for each compiled TB" },
1245 #endif
1246 { CPU_LOG_INT, "int",
1247 "show interrupts/exceptions in short format" },
1248 { CPU_LOG_EXEC, "exec",
1249 "show trace before each executed TB (lots of logs)" },
1250 { CPU_LOG_TB_CPU, "cpu",
1251 "show CPU state before block translation" },
1252 #ifdef TARGET_I386
1253 { CPU_LOG_PCALL, "pcall",
1254 "show protected mode far calls/returns/exceptions" },
1255 #endif
1256 #ifdef DEBUG_IOPORT
1257 { CPU_LOG_IOPORT, "ioport",
1258 "show all i/o ports accesses" },
1259 #endif
1260 { 0, NULL, NULL },
1263 static int cmp1(const char *s1, int n, const char *s2)
1265 if (strlen(s2) != n)
1266 return 0;
1267 return memcmp(s1, s2, n) == 0;
1270 /* takes a comma separated list of log masks. Return 0 if error. */
1271 int cpu_str_to_log_mask(const char *str)
1273 CPULogItem *item;
1274 int mask;
1275 const char *p, *p1;
1277 p = str;
1278 mask = 0;
1279 for(;;) {
1280 p1 = strchr(p, ',');
1281 if (!p1)
1282 p1 = p + strlen(p);
1283 if(cmp1(p,p1-p,"all")) {
1284 for(item = cpu_log_items; item->mask != 0; item++) {
1285 mask |= item->mask;
1287 } else {
1288 for(item = cpu_log_items; item->mask != 0; item++) {
1289 if (cmp1(p, p1 - p, item->name))
1290 goto found;
1292 return 0;
1294 found:
1295 mask |= item->mask;
1296 if (*p1 != ',')
1297 break;
1298 p = p1 + 1;
1300 return mask;
1303 void cpu_abort(CPUState *env, const char *fmt, ...)
1305 va_list ap;
1306 va_list ap2;
1308 va_start(ap, fmt);
1309 va_copy(ap2, ap);
1310 fprintf(stderr, "qemu: fatal: ");
1311 vfprintf(stderr, fmt, ap);
1312 fprintf(stderr, "\n");
1313 #ifdef TARGET_I386
1314 if(env->intercept & INTERCEPT_SVM_MASK) {
1315 /* most probably the virtual machine should not
1316 be shut down but rather caught by the VMM */
1317 vmexit(SVM_EXIT_SHUTDOWN, 0);
1319 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1320 #else
1321 cpu_dump_state(env, stderr, fprintf, 0);
1322 #endif
1323 if (logfile) {
1324 fprintf(logfile, "qemu: fatal: ");
1325 vfprintf(logfile, fmt, ap2);
1326 fprintf(logfile, "\n");
1327 #ifdef TARGET_I386
1328 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1329 #else
1330 cpu_dump_state(env, logfile, fprintf, 0);
1331 #endif
1332 fflush(logfile);
1333 fclose(logfile);
1335 va_end(ap2);
1336 va_end(ap);
1337 abort();
1340 CPUState *cpu_copy(CPUState *env)
1342 CPUState *new_env = cpu_init(env->cpu_model_str);
1343 /* preserve chaining and index */
1344 CPUState *next_cpu = new_env->next_cpu;
1345 int cpu_index = new_env->cpu_index;
1346 memcpy(new_env, env, sizeof(CPUState));
1347 new_env->next_cpu = next_cpu;
1348 new_env->cpu_index = cpu_index;
1349 return new_env;
1352 #if !defined(CONFIG_USER_ONLY)
1354 /* NOTE: if flush_global is true, also flush global entries (not
1355 implemented yet) */
1356 void tlb_flush(CPUState *env, int flush_global)
1358 int i;
1360 #if defined(DEBUG_TLB)
1361 printf("tlb_flush:\n");
1362 #endif
1363 /* must reset current TB so that interrupts cannot modify the
1364 links while we are modifying them */
1365 env->current_tb = NULL;
1367 for(i = 0; i < CPU_TLB_SIZE; i++) {
1368 env->tlb_table[0][i].addr_read = -1;
1369 env->tlb_table[0][i].addr_write = -1;
1370 env->tlb_table[0][i].addr_code = -1;
1371 env->tlb_table[1][i].addr_read = -1;
1372 env->tlb_table[1][i].addr_write = -1;
1373 env->tlb_table[1][i].addr_code = -1;
1374 #if (NB_MMU_MODES >= 3)
1375 env->tlb_table[2][i].addr_read = -1;
1376 env->tlb_table[2][i].addr_write = -1;
1377 env->tlb_table[2][i].addr_code = -1;
1378 #if (NB_MMU_MODES == 4)
1379 env->tlb_table[3][i].addr_read = -1;
1380 env->tlb_table[3][i].addr_write = -1;
1381 env->tlb_table[3][i].addr_code = -1;
1382 #endif
1383 #endif
1386 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1388 #if !defined(CONFIG_SOFTMMU)
1389 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1390 #endif
1391 #ifdef USE_KQEMU
1392 if (env->kqemu_enabled) {
1393 kqemu_flush(env, flush_global);
1395 #endif
1396 tlb_flush_count++;
1399 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1401 if (addr == (tlb_entry->addr_read &
1402 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1403 addr == (tlb_entry->addr_write &
1404 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1405 addr == (tlb_entry->addr_code &
1406 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1407 tlb_entry->addr_read = -1;
1408 tlb_entry->addr_write = -1;
1409 tlb_entry->addr_code = -1;
1413 void tlb_flush_page(CPUState *env, target_ulong addr)
1415 int i;
1416 TranslationBlock *tb;
1418 #if defined(DEBUG_TLB)
1419 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1420 #endif
1421 /* must reset current TB so that interrupts cannot modify the
1422 links while we are modifying them */
1423 env->current_tb = NULL;
1425 addr &= TARGET_PAGE_MASK;
1426 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1427 tlb_flush_entry(&env->tlb_table[0][i], addr);
1428 tlb_flush_entry(&env->tlb_table[1][i], addr);
1429 #if (NB_MMU_MODES >= 3)
1430 tlb_flush_entry(&env->tlb_table[2][i], addr);
1431 #if (NB_MMU_MODES == 4)
1432 tlb_flush_entry(&env->tlb_table[3][i], addr);
1433 #endif
1434 #endif
1436 /* Discard jump cache entries for any tb which might potentially
1437 overlap the flushed page. */
1438 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1439 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1441 i = tb_jmp_cache_hash_page(addr);
1442 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1444 #if !defined(CONFIG_SOFTMMU)
1445 if (addr < MMAP_AREA_END)
1446 munmap((void *)addr, TARGET_PAGE_SIZE);
1447 #endif
1448 #ifdef USE_KQEMU
1449 if (env->kqemu_enabled) {
1450 kqemu_flush_page(env, addr);
1452 #endif
1455 /* update the TLBs so that writes to code in the virtual page 'addr'
1456 can be detected */
1457 static void tlb_protect_code(ram_addr_t ram_addr)
1459 cpu_physical_memory_reset_dirty(ram_addr,
1460 ram_addr + TARGET_PAGE_SIZE,
1461 CODE_DIRTY_FLAG);
1464 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1465 tested for self modifying code */
1466 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1467 target_ulong vaddr)
1469 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1472 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1473 unsigned long start, unsigned long length)
1475 unsigned long addr;
1476 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1477 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1478 if ((addr - start) < length) {
1479 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1484 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1485 int dirty_flags)
1487 CPUState *env;
1488 unsigned long length, start1;
1489 int i, mask, len;
1490 uint8_t *p;
1492 start &= TARGET_PAGE_MASK;
1493 end = TARGET_PAGE_ALIGN(end);
1495 length = end - start;
1496 if (length == 0)
1497 return;
1498 len = length >> TARGET_PAGE_BITS;
1499 #ifdef USE_KQEMU
1500 /* XXX: should not depend on cpu context */
1501 env = first_cpu;
1502 if (env->kqemu_enabled) {
1503 ram_addr_t addr;
1504 addr = start;
1505 for(i = 0; i < len; i++) {
1506 kqemu_set_notdirty(env, addr);
1507 addr += TARGET_PAGE_SIZE;
1510 #endif
1511 mask = ~dirty_flags;
1512 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1513 for(i = 0; i < len; i++)
1514 p[i] &= mask;
1516 /* we modify the TLB cache so that the dirty bit will be set again
1517 when accessing the range */
1518 start1 = start + (unsigned long)phys_ram_base;
1519 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1520 for(i = 0; i < CPU_TLB_SIZE; i++)
1521 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1522 for(i = 0; i < CPU_TLB_SIZE; i++)
1523 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1524 #if (NB_MMU_MODES >= 3)
1525 for(i = 0; i < CPU_TLB_SIZE; i++)
1526 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1527 #if (NB_MMU_MODES == 4)
1528 for(i = 0; i < CPU_TLB_SIZE; i++)
1529 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1530 #endif
1531 #endif
1534 #if !defined(CONFIG_SOFTMMU)
1535 /* XXX: this is expensive */
1537 VirtPageDesc *p;
1538 int j;
1539 target_ulong addr;
1541 for(i = 0; i < L1_SIZE; i++) {
1542 p = l1_virt_map[i];
1543 if (p) {
1544 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1545 for(j = 0; j < L2_SIZE; j++) {
1546 if (p->valid_tag == virt_valid_tag &&
1547 p->phys_addr >= start && p->phys_addr < end &&
1548 (p->prot & PROT_WRITE)) {
1549 if (addr < MMAP_AREA_END) {
1550 mprotect((void *)addr, TARGET_PAGE_SIZE,
1551 p->prot & ~PROT_WRITE);
1554 addr += TARGET_PAGE_SIZE;
1555 p++;
1560 #endif
1563 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1565 ram_addr_t ram_addr;
1567 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1568 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1569 tlb_entry->addend - (unsigned long)phys_ram_base;
1570 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1571 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1576 /* update the TLB according to the current state of the dirty bits */
1577 void cpu_tlb_update_dirty(CPUState *env)
1579 int i;
1580 for(i = 0; i < CPU_TLB_SIZE; i++)
1581 tlb_update_dirty(&env->tlb_table[0][i]);
1582 for(i = 0; i < CPU_TLB_SIZE; i++)
1583 tlb_update_dirty(&env->tlb_table[1][i]);
1584 #if (NB_MMU_MODES >= 3)
1585 for(i = 0; i < CPU_TLB_SIZE; i++)
1586 tlb_update_dirty(&env->tlb_table[2][i]);
1587 #if (NB_MMU_MODES == 4)
1588 for(i = 0; i < CPU_TLB_SIZE; i++)
1589 tlb_update_dirty(&env->tlb_table[3][i]);
1590 #endif
1591 #endif
1594 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1595 unsigned long start)
1597 unsigned long addr;
1598 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1599 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1600 if (addr == start) {
1601 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1606 /* update the TLB corresponding to virtual page vaddr and phys addr
1607 addr so that it is no longer dirty */
1608 static inline void tlb_set_dirty(CPUState *env,
1609 unsigned long addr, target_ulong vaddr)
1611 int i;
1613 addr &= TARGET_PAGE_MASK;
1614 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1615 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1616 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1617 #if (NB_MMU_MODES >= 3)
1618 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1619 #if (NB_MMU_MODES == 4)
1620 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1621 #endif
1622 #endif
1625 /* add a new TLB entry. At most one entry for a given virtual address
1626 is permitted. Return 0 if OK or 2 if the page could not be mapped
1627 (can only happen in non SOFTMMU mode for I/O pages or pages
1628 conflicting with the host address space). */
1629 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1630 target_phys_addr_t paddr, int prot,
1631 int mmu_idx, int is_softmmu)
1633 PhysPageDesc *p;
1634 unsigned long pd;
1635 unsigned int index;
1636 target_ulong address;
1637 target_phys_addr_t addend;
1638 int ret;
1639 CPUTLBEntry *te;
1640 int i;
1642 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1643 if (!p) {
1644 pd = IO_MEM_UNASSIGNED;
1645 } else {
1646 pd = p->phys_offset;
1648 #if defined(DEBUG_TLB)
1649 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1650 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1651 #endif
1653 ret = 0;
1654 #if !defined(CONFIG_SOFTMMU)
1655 if (is_softmmu)
1656 #endif
1658 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1659 /* IO memory case */
1660 address = vaddr | pd;
1661 addend = paddr;
1662 } else {
1663 /* standard memory */
1664 address = vaddr;
1665 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1668 /* Make accesses to pages with watchpoints go via the
1669 watchpoint trap routines. */
1670 for (i = 0; i < env->nb_watchpoints; i++) {
1671 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1672 if (address & ~TARGET_PAGE_MASK) {
1673 env->watchpoint[i].addend = 0;
1674 address = vaddr | io_mem_watch;
1675 } else {
1676 env->watchpoint[i].addend = pd - paddr +
1677 (unsigned long) phys_ram_base;
1678 /* TODO: Figure out how to make read watchpoints coexist
1679 with code. */
1680 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1685 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1686 addend -= vaddr;
1687 te = &env->tlb_table[mmu_idx][index];
1688 te->addend = addend;
1689 if (prot & PAGE_READ) {
1690 te->addr_read = address;
1691 } else {
1692 te->addr_read = -1;
1694 if (prot & PAGE_EXEC) {
1695 te->addr_code = address;
1696 } else {
1697 te->addr_code = -1;
1699 if (prot & PAGE_WRITE) {
1700 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1701 (pd & IO_MEM_ROMD)) {
1702 /* write access calls the I/O callback */
1703 te->addr_write = vaddr |
1704 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1705 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1706 !cpu_physical_memory_is_dirty(pd)) {
1707 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1708 } else {
1709 te->addr_write = address;
1711 } else {
1712 te->addr_write = -1;
1715 #if !defined(CONFIG_SOFTMMU)
1716 else {
1717 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1718 /* IO access: no mapping is done as it will be handled by the
1719 soft MMU */
1720 if (!(env->hflags & HF_SOFTMMU_MASK))
1721 ret = 2;
1722 } else {
1723 void *map_addr;
1725 if (vaddr >= MMAP_AREA_END) {
1726 ret = 2;
1727 } else {
1728 if (prot & PROT_WRITE) {
1729 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1730 #if defined(TARGET_HAS_SMC) || 1
1731 first_tb ||
1732 #endif
1733 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1734 !cpu_physical_memory_is_dirty(pd))) {
1735 /* ROM: we do as if code was inside */
1736 /* if code is present, we only map as read only and save the
1737 original mapping */
1738 VirtPageDesc *vp;
1740 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1741 vp->phys_addr = pd;
1742 vp->prot = prot;
1743 vp->valid_tag = virt_valid_tag;
1744 prot &= ~PAGE_WRITE;
1747 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1748 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1749 if (map_addr == MAP_FAILED) {
1750 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1751 paddr, vaddr);
1756 #endif
1757 return ret;
1760 /* called from signal handler: invalidate the code and unprotect the
1761 page. Return TRUE if the fault was succesfully handled. */
1762 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1764 #if !defined(CONFIG_SOFTMMU)
1765 VirtPageDesc *vp;
1767 #if defined(DEBUG_TLB)
1768 printf("page_unprotect: addr=0x%08x\n", addr);
1769 #endif
1770 addr &= TARGET_PAGE_MASK;
1772 /* if it is not mapped, no need to worry here */
1773 if (addr >= MMAP_AREA_END)
1774 return 0;
1775 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1776 if (!vp)
1777 return 0;
1778 /* NOTE: in this case, validate_tag is _not_ tested as it
1779 validates only the code TLB */
1780 if (vp->valid_tag != virt_valid_tag)
1781 return 0;
1782 if (!(vp->prot & PAGE_WRITE))
1783 return 0;
1784 #if defined(DEBUG_TLB)
1785 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1786 addr, vp->phys_addr, vp->prot);
1787 #endif
1788 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1789 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1790 (unsigned long)addr, vp->prot);
1791 /* set the dirty bit */
1792 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1793 /* flush the code inside */
1794 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1795 return 1;
1796 #else
1797 return 0;
1798 #endif
1801 #else
1803 void tlb_flush(CPUState *env, int flush_global)
1807 void tlb_flush_page(CPUState *env, target_ulong addr)
1811 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1812 target_phys_addr_t paddr, int prot,
1813 int mmu_idx, int is_softmmu)
1815 return 0;
1818 /* dump memory mappings */
1819 void page_dump(FILE *f)
1821 unsigned long start, end;
1822 int i, j, prot, prot1;
1823 PageDesc *p;
1825 fprintf(f, "%-8s %-8s %-8s %s\n",
1826 "start", "end", "size", "prot");
1827 start = -1;
1828 end = -1;
1829 prot = 0;
1830 for(i = 0; i <= L1_SIZE; i++) {
1831 if (i < L1_SIZE)
1832 p = l1_map[i];
1833 else
1834 p = NULL;
1835 for(j = 0;j < L2_SIZE; j++) {
1836 if (!p)
1837 prot1 = 0;
1838 else
1839 prot1 = p[j].flags;
1840 if (prot1 != prot) {
1841 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1842 if (start != -1) {
1843 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1844 start, end, end - start,
1845 prot & PAGE_READ ? 'r' : '-',
1846 prot & PAGE_WRITE ? 'w' : '-',
1847 prot & PAGE_EXEC ? 'x' : '-');
1849 if (prot1 != 0)
1850 start = end;
1851 else
1852 start = -1;
1853 prot = prot1;
1855 if (!p)
1856 break;
1861 int page_get_flags(target_ulong address)
1863 PageDesc *p;
1865 p = page_find(address >> TARGET_PAGE_BITS);
1866 if (!p)
1867 return 0;
1868 return p->flags;
1871 /* modify the flags of a page and invalidate the code if
1872 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1873 depending on PAGE_WRITE */
1874 void page_set_flags(target_ulong start, target_ulong end, int flags)
1876 PageDesc *p;
1877 target_ulong addr;
1879 start = start & TARGET_PAGE_MASK;
1880 end = TARGET_PAGE_ALIGN(end);
1881 if (flags & PAGE_WRITE)
1882 flags |= PAGE_WRITE_ORG;
1883 spin_lock(&tb_lock);
1884 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1885 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1886 /* if the write protection is set, then we invalidate the code
1887 inside */
1888 if (!(p->flags & PAGE_WRITE) &&
1889 (flags & PAGE_WRITE) &&
1890 p->first_tb) {
1891 tb_invalidate_phys_page(addr, 0, NULL);
1893 p->flags = flags;
1895 spin_unlock(&tb_lock);
1898 int page_check_range(target_ulong start, target_ulong len, int flags)
1900 PageDesc *p;
1901 target_ulong end;
1902 target_ulong addr;
1904 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1905 start = start & TARGET_PAGE_MASK;
1907 if( end < start )
1908 /* we've wrapped around */
1909 return -1;
1910 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1911 p = page_find(addr >> TARGET_PAGE_BITS);
1912 if( !p )
1913 return -1;
1914 if( !(p->flags & PAGE_VALID) )
1915 return -1;
1917 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1918 return -1;
1919 if (flags & PAGE_WRITE) {
1920 if (!(p->flags & PAGE_WRITE_ORG))
1921 return -1;
1922 /* unprotect the page if it was put read-only because it
1923 contains translated code */
1924 if (!(p->flags & PAGE_WRITE)) {
1925 if (!page_unprotect(addr, 0, NULL))
1926 return -1;
1928 return 0;
1931 return 0;
1934 /* called from signal handler: invalidate the code and unprotect the
1935 page. Return TRUE if the fault was succesfully handled. */
1936 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1938 unsigned int page_index, prot, pindex;
1939 PageDesc *p, *p1;
1940 target_ulong host_start, host_end, addr;
1942 host_start = address & qemu_host_page_mask;
1943 page_index = host_start >> TARGET_PAGE_BITS;
1944 p1 = page_find(page_index);
1945 if (!p1)
1946 return 0;
1947 host_end = host_start + qemu_host_page_size;
1948 p = p1;
1949 prot = 0;
1950 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1951 prot |= p->flags;
1952 p++;
1954 /* if the page was really writable, then we change its
1955 protection back to writable */
1956 if (prot & PAGE_WRITE_ORG) {
1957 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1958 if (!(p1[pindex].flags & PAGE_WRITE)) {
1959 mprotect((void *)g2h(host_start), qemu_host_page_size,
1960 (prot & PAGE_BITS) | PAGE_WRITE);
1961 p1[pindex].flags |= PAGE_WRITE;
1962 /* and since the content will be modified, we must invalidate
1963 the corresponding translated code. */
1964 tb_invalidate_phys_page(address, pc, puc);
1965 #ifdef DEBUG_TB_CHECK
1966 tb_invalidate_check(address);
1967 #endif
1968 return 1;
1971 return 0;
1974 static inline void tlb_set_dirty(CPUState *env,
1975 unsigned long addr, target_ulong vaddr)
1978 #endif /* defined(CONFIG_USER_ONLY) */
1980 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1981 int memory);
1982 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1983 int orig_memory);
1984 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1985 need_subpage) \
1986 do { \
1987 if (addr > start_addr) \
1988 start_addr2 = 0; \
1989 else { \
1990 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1991 if (start_addr2 > 0) \
1992 need_subpage = 1; \
1995 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
1996 end_addr2 = TARGET_PAGE_SIZE - 1; \
1997 else { \
1998 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1999 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2000 need_subpage = 1; \
2002 } while (0)
2004 /* register physical memory. 'size' must be a multiple of the target
2005 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2006 io memory page */
2007 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2008 unsigned long size,
2009 unsigned long phys_offset)
2011 target_phys_addr_t addr, end_addr;
2012 PhysPageDesc *p;
2013 CPUState *env;
2014 unsigned long orig_size = size;
2015 void *subpage;
2017 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2018 end_addr = start_addr + (target_phys_addr_t)size;
2019 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2020 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2021 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2022 unsigned long orig_memory = p->phys_offset;
2023 target_phys_addr_t start_addr2, end_addr2;
2024 int need_subpage = 0;
2026 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2027 need_subpage);
2028 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2029 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2030 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2031 &p->phys_offset, orig_memory);
2032 } else {
2033 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2034 >> IO_MEM_SHIFT];
2036 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2037 } else {
2038 p->phys_offset = phys_offset;
2039 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2040 (phys_offset & IO_MEM_ROMD))
2041 phys_offset += TARGET_PAGE_SIZE;
2043 } else {
2044 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2045 p->phys_offset = phys_offset;
2046 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2047 (phys_offset & IO_MEM_ROMD))
2048 phys_offset += TARGET_PAGE_SIZE;
2049 else {
2050 target_phys_addr_t start_addr2, end_addr2;
2051 int need_subpage = 0;
2053 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2054 end_addr2, need_subpage);
2056 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2057 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2058 &p->phys_offset, IO_MEM_UNASSIGNED);
2059 subpage_register(subpage, start_addr2, end_addr2,
2060 phys_offset);
2066 /* since each CPU stores ram addresses in its TLB cache, we must
2067 reset the modified entries */
2068 /* XXX: slow ! */
2069 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2070 tlb_flush(env, 1);
2074 /* XXX: temporary until new memory mapping API */
2075 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2077 PhysPageDesc *p;
2079 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2080 if (!p)
2081 return IO_MEM_UNASSIGNED;
2082 return p->phys_offset;
2085 /* XXX: better than nothing */
2086 ram_addr_t qemu_ram_alloc(unsigned int size)
2088 ram_addr_t addr;
2089 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2090 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2091 size, phys_ram_size);
2092 abort();
2094 addr = phys_ram_alloc_offset;
2095 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2096 return addr;
2099 void qemu_ram_free(ram_addr_t addr)
2103 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2105 #ifdef DEBUG_UNASSIGNED
2106 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2107 #endif
2108 #ifdef TARGET_SPARC
2109 do_unassigned_access(addr, 0, 0, 0);
2110 #elif TARGET_CRIS
2111 do_unassigned_access(addr, 0, 0, 0);
2112 #endif
2113 return 0;
2116 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2118 #ifdef DEBUG_UNASSIGNED
2119 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2120 #endif
2121 #ifdef TARGET_SPARC
2122 do_unassigned_access(addr, 1, 0, 0);
2123 #elif TARGET_CRIS
2124 do_unassigned_access(addr, 1, 0, 0);
2125 #endif
2128 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2129 unassigned_mem_readb,
2130 unassigned_mem_readb,
2131 unassigned_mem_readb,
2134 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2135 unassigned_mem_writeb,
2136 unassigned_mem_writeb,
2137 unassigned_mem_writeb,
2140 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2142 unsigned long ram_addr;
2143 int dirty_flags;
2144 ram_addr = addr - (unsigned long)phys_ram_base;
2145 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2146 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2147 #if !defined(CONFIG_USER_ONLY)
2148 tb_invalidate_phys_page_fast(ram_addr, 1);
2149 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2150 #endif
2152 stb_p((uint8_t *)(long)addr, val);
2153 #ifdef USE_KQEMU
2154 if (cpu_single_env->kqemu_enabled &&
2155 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2156 kqemu_modify_page(cpu_single_env, ram_addr);
2157 #endif
2158 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2159 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2160 /* we remove the notdirty callback only if the code has been
2161 flushed */
2162 if (dirty_flags == 0xff)
2163 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2166 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2168 unsigned long ram_addr;
2169 int dirty_flags;
2170 ram_addr = addr - (unsigned long)phys_ram_base;
2171 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2172 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2173 #if !defined(CONFIG_USER_ONLY)
2174 tb_invalidate_phys_page_fast(ram_addr, 2);
2175 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2176 #endif
2178 stw_p((uint8_t *)(long)addr, val);
2179 #ifdef USE_KQEMU
2180 if (cpu_single_env->kqemu_enabled &&
2181 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2182 kqemu_modify_page(cpu_single_env, ram_addr);
2183 #endif
2184 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2185 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2186 /* we remove the notdirty callback only if the code has been
2187 flushed */
2188 if (dirty_flags == 0xff)
2189 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2192 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2194 unsigned long ram_addr;
2195 int dirty_flags;
2196 ram_addr = addr - (unsigned long)phys_ram_base;
2197 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2198 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2199 #if !defined(CONFIG_USER_ONLY)
2200 tb_invalidate_phys_page_fast(ram_addr, 4);
2201 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2202 #endif
2204 stl_p((uint8_t *)(long)addr, val);
2205 #ifdef USE_KQEMU
2206 if (cpu_single_env->kqemu_enabled &&
2207 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2208 kqemu_modify_page(cpu_single_env, ram_addr);
2209 #endif
2210 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2211 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2212 /* we remove the notdirty callback only if the code has been
2213 flushed */
2214 if (dirty_flags == 0xff)
2215 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2218 static CPUReadMemoryFunc *error_mem_read[3] = {
2219 NULL, /* never used */
2220 NULL, /* never used */
2221 NULL, /* never used */
2224 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2225 notdirty_mem_writeb,
2226 notdirty_mem_writew,
2227 notdirty_mem_writel,
2230 #if defined(CONFIG_SOFTMMU)
2231 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2232 so these check for a hit then pass through to the normal out-of-line
2233 phys routines. */
2234 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2236 return ldub_phys(addr);
2239 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2241 return lduw_phys(addr);
2244 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2246 return ldl_phys(addr);
2249 /* Generate a debug exception if a watchpoint has been hit.
2250 Returns the real physical address of the access. addr will be a host
2251 address in case of a RAM location. */
2252 static target_ulong check_watchpoint(target_phys_addr_t addr)
2254 CPUState *env = cpu_single_env;
2255 target_ulong watch;
2256 target_ulong retaddr;
2257 int i;
2259 retaddr = addr;
2260 for (i = 0; i < env->nb_watchpoints; i++) {
2261 watch = env->watchpoint[i].vaddr;
2262 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2263 retaddr = addr - env->watchpoint[i].addend;
2264 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2265 cpu_single_env->watchpoint_hit = i + 1;
2266 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2267 break;
2271 return retaddr;
2274 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2275 uint32_t val)
2277 addr = check_watchpoint(addr);
2278 stb_phys(addr, val);
2281 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2282 uint32_t val)
2284 addr = check_watchpoint(addr);
2285 stw_phys(addr, val);
2288 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2289 uint32_t val)
2291 addr = check_watchpoint(addr);
2292 stl_phys(addr, val);
2295 static CPUReadMemoryFunc *watch_mem_read[3] = {
2296 watch_mem_readb,
2297 watch_mem_readw,
2298 watch_mem_readl,
2301 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2302 watch_mem_writeb,
2303 watch_mem_writew,
2304 watch_mem_writel,
2306 #endif
2308 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2309 unsigned int len)
2311 uint32_t ret;
2312 unsigned int idx;
2314 idx = SUBPAGE_IDX(addr - mmio->base);
2315 #if defined(DEBUG_SUBPAGE)
2316 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2317 mmio, len, addr, idx);
2318 #endif
2319 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2321 return ret;
2324 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2325 uint32_t value, unsigned int len)
2327 unsigned int idx;
2329 idx = SUBPAGE_IDX(addr - mmio->base);
2330 #if defined(DEBUG_SUBPAGE)
2331 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2332 mmio, len, addr, idx, value);
2333 #endif
2334 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2337 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2339 #if defined(DEBUG_SUBPAGE)
2340 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2341 #endif
2343 return subpage_readlen(opaque, addr, 0);
2346 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2347 uint32_t value)
2349 #if defined(DEBUG_SUBPAGE)
2350 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2351 #endif
2352 subpage_writelen(opaque, addr, value, 0);
2355 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2357 #if defined(DEBUG_SUBPAGE)
2358 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2359 #endif
2361 return subpage_readlen(opaque, addr, 1);
2364 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2365 uint32_t value)
2367 #if defined(DEBUG_SUBPAGE)
2368 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2369 #endif
2370 subpage_writelen(opaque, addr, value, 1);
2373 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2375 #if defined(DEBUG_SUBPAGE)
2376 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2377 #endif
2379 return subpage_readlen(opaque, addr, 2);
2382 static void subpage_writel (void *opaque,
2383 target_phys_addr_t addr, uint32_t value)
2385 #if defined(DEBUG_SUBPAGE)
2386 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2387 #endif
2388 subpage_writelen(opaque, addr, value, 2);
2391 static CPUReadMemoryFunc *subpage_read[] = {
2392 &subpage_readb,
2393 &subpage_readw,
2394 &subpage_readl,
2397 static CPUWriteMemoryFunc *subpage_write[] = {
2398 &subpage_writeb,
2399 &subpage_writew,
2400 &subpage_writel,
2403 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2404 int memory)
2406 int idx, eidx;
2407 unsigned int i;
2409 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2410 return -1;
2411 idx = SUBPAGE_IDX(start);
2412 eidx = SUBPAGE_IDX(end);
2413 #if defined(DEBUG_SUBPAGE)
2414 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2415 mmio, start, end, idx, eidx, memory);
2416 #endif
2417 memory >>= IO_MEM_SHIFT;
2418 for (; idx <= eidx; idx++) {
2419 for (i = 0; i < 4; i++) {
2420 if (io_mem_read[memory][i]) {
2421 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2422 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2424 if (io_mem_write[memory][i]) {
2425 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2426 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2431 return 0;
2434 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2435 int orig_memory)
2437 subpage_t *mmio;
2438 int subpage_memory;
2440 mmio = qemu_mallocz(sizeof(subpage_t));
2441 if (mmio != NULL) {
2442 mmio->base = base;
2443 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2444 #if defined(DEBUG_SUBPAGE)
2445 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2446 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2447 #endif
2448 *phys = subpage_memory | IO_MEM_SUBPAGE;
2449 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2452 return mmio;
2455 static void io_mem_init(void)
2457 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2458 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2459 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2460 io_mem_nb = 5;
2462 #if defined(CONFIG_SOFTMMU)
2463 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2464 watch_mem_write, NULL);
2465 #endif
2466 /* alloc dirty bits array */
2467 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2468 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2471 /* mem_read and mem_write are arrays of functions containing the
2472 function to access byte (index 0), word (index 1) and dword (index
2473 2). Functions can be omitted with a NULL function pointer. The
2474 registered functions may be modified dynamically later.
2475 If io_index is non zero, the corresponding io zone is
2476 modified. If it is zero, a new io zone is allocated. The return
2477 value can be used with cpu_register_physical_memory(). (-1) is
2478 returned if error. */
2479 int cpu_register_io_memory(int io_index,
2480 CPUReadMemoryFunc **mem_read,
2481 CPUWriteMemoryFunc **mem_write,
2482 void *opaque)
2484 int i, subwidth = 0;
2486 if (io_index <= 0) {
2487 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2488 return -1;
2489 io_index = io_mem_nb++;
2490 } else {
2491 if (io_index >= IO_MEM_NB_ENTRIES)
2492 return -1;
2495 for(i = 0;i < 3; i++) {
2496 if (!mem_read[i] || !mem_write[i])
2497 subwidth = IO_MEM_SUBWIDTH;
2498 io_mem_read[io_index][i] = mem_read[i];
2499 io_mem_write[io_index][i] = mem_write[i];
2501 io_mem_opaque[io_index] = opaque;
2502 return (io_index << IO_MEM_SHIFT) | subwidth;
2505 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2507 return io_mem_write[io_index >> IO_MEM_SHIFT];
2510 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2512 return io_mem_read[io_index >> IO_MEM_SHIFT];
2515 /* physical memory access (slow version, mainly for debug) */
2516 #if defined(CONFIG_USER_ONLY)
2517 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2518 int len, int is_write)
2520 int l, flags;
2521 target_ulong page;
2522 void * p;
2524 while (len > 0) {
2525 page = addr & TARGET_PAGE_MASK;
2526 l = (page + TARGET_PAGE_SIZE) - addr;
2527 if (l > len)
2528 l = len;
2529 flags = page_get_flags(page);
2530 if (!(flags & PAGE_VALID))
2531 return;
2532 if (is_write) {
2533 if (!(flags & PAGE_WRITE))
2534 return;
2535 /* XXX: this code should not depend on lock_user */
2536 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2537 /* FIXME - should this return an error rather than just fail? */
2538 return;
2539 memcpy(p, buf, len);
2540 unlock_user(p, addr, len);
2541 } else {
2542 if (!(flags & PAGE_READ))
2543 return;
2544 /* XXX: this code should not depend on lock_user */
2545 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2546 /* FIXME - should this return an error rather than just fail? */
2547 return;
2548 memcpy(buf, p, len);
2549 unlock_user(p, addr, 0);
2551 len -= l;
2552 buf += l;
2553 addr += l;
2557 #else
2558 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2559 int len, int is_write)
2561 int l, io_index;
2562 uint8_t *ptr;
2563 uint32_t val;
2564 target_phys_addr_t page;
2565 unsigned long pd;
2566 PhysPageDesc *p;
2568 while (len > 0) {
2569 page = addr & TARGET_PAGE_MASK;
2570 l = (page + TARGET_PAGE_SIZE) - addr;
2571 if (l > len)
2572 l = len;
2573 p = phys_page_find(page >> TARGET_PAGE_BITS);
2574 if (!p) {
2575 pd = IO_MEM_UNASSIGNED;
2576 } else {
2577 pd = p->phys_offset;
2580 if (is_write) {
2581 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2582 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2583 /* XXX: could force cpu_single_env to NULL to avoid
2584 potential bugs */
2585 if (l >= 4 && ((addr & 3) == 0)) {
2586 /* 32 bit write access */
2587 val = ldl_p(buf);
2588 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2589 l = 4;
2590 } else if (l >= 2 && ((addr & 1) == 0)) {
2591 /* 16 bit write access */
2592 val = lduw_p(buf);
2593 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2594 l = 2;
2595 } else {
2596 /* 8 bit write access */
2597 val = ldub_p(buf);
2598 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2599 l = 1;
2601 } else {
2602 unsigned long addr1;
2603 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2604 /* RAM case */
2605 ptr = phys_ram_base + addr1;
2606 memcpy(ptr, buf, l);
2607 if (!cpu_physical_memory_is_dirty(addr1)) {
2608 /* invalidate code */
2609 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2610 /* set dirty bit */
2611 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2612 (0xff & ~CODE_DIRTY_FLAG);
2615 } else {
2616 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2617 !(pd & IO_MEM_ROMD)) {
2618 /* I/O case */
2619 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2620 if (l >= 4 && ((addr & 3) == 0)) {
2621 /* 32 bit read access */
2622 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2623 stl_p(buf, val);
2624 l = 4;
2625 } else if (l >= 2 && ((addr & 1) == 0)) {
2626 /* 16 bit read access */
2627 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2628 stw_p(buf, val);
2629 l = 2;
2630 } else {
2631 /* 8 bit read access */
2632 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2633 stb_p(buf, val);
2634 l = 1;
2636 } else {
2637 /* RAM case */
2638 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2639 (addr & ~TARGET_PAGE_MASK);
2640 memcpy(buf, ptr, l);
2643 len -= l;
2644 buf += l;
2645 addr += l;
2649 /* used for ROM loading : can write in RAM and ROM */
2650 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2651 const uint8_t *buf, int len)
2653 int l;
2654 uint8_t *ptr;
2655 target_phys_addr_t page;
2656 unsigned long pd;
2657 PhysPageDesc *p;
2659 while (len > 0) {
2660 page = addr & TARGET_PAGE_MASK;
2661 l = (page + TARGET_PAGE_SIZE) - addr;
2662 if (l > len)
2663 l = len;
2664 p = phys_page_find(page >> TARGET_PAGE_BITS);
2665 if (!p) {
2666 pd = IO_MEM_UNASSIGNED;
2667 } else {
2668 pd = p->phys_offset;
2671 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2672 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2673 !(pd & IO_MEM_ROMD)) {
2674 /* do nothing */
2675 } else {
2676 unsigned long addr1;
2677 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2678 /* ROM/RAM case */
2679 ptr = phys_ram_base + addr1;
2680 memcpy(ptr, buf, l);
2682 len -= l;
2683 buf += l;
2684 addr += l;
2689 /* warning: addr must be aligned */
2690 uint32_t ldl_phys(target_phys_addr_t addr)
2692 int io_index;
2693 uint8_t *ptr;
2694 uint32_t val;
2695 unsigned long pd;
2696 PhysPageDesc *p;
2698 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2699 if (!p) {
2700 pd = IO_MEM_UNASSIGNED;
2701 } else {
2702 pd = p->phys_offset;
2705 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2706 !(pd & IO_MEM_ROMD)) {
2707 /* I/O case */
2708 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2709 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2710 } else {
2711 /* RAM case */
2712 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2713 (addr & ~TARGET_PAGE_MASK);
2714 val = ldl_p(ptr);
2716 return val;
2719 /* warning: addr must be aligned */
2720 uint64_t ldq_phys(target_phys_addr_t addr)
2722 int io_index;
2723 uint8_t *ptr;
2724 uint64_t val;
2725 unsigned long pd;
2726 PhysPageDesc *p;
2728 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2729 if (!p) {
2730 pd = IO_MEM_UNASSIGNED;
2731 } else {
2732 pd = p->phys_offset;
2735 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2736 !(pd & IO_MEM_ROMD)) {
2737 /* I/O case */
2738 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2739 #ifdef TARGET_WORDS_BIGENDIAN
2740 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2741 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2742 #else
2743 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2744 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2745 #endif
2746 } else {
2747 /* RAM case */
2748 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2749 (addr & ~TARGET_PAGE_MASK);
2750 val = ldq_p(ptr);
2752 return val;
2755 /* XXX: optimize */
2756 uint32_t ldub_phys(target_phys_addr_t addr)
2758 uint8_t val;
2759 cpu_physical_memory_read(addr, &val, 1);
2760 return val;
2763 /* XXX: optimize */
2764 uint32_t lduw_phys(target_phys_addr_t addr)
2766 uint16_t val;
2767 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2768 return tswap16(val);
2771 /* warning: addr must be aligned. The ram page is not masked as dirty
2772 and the code inside is not invalidated. It is useful if the dirty
2773 bits are used to track modified PTEs */
2774 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2776 int io_index;
2777 uint8_t *ptr;
2778 unsigned long pd;
2779 PhysPageDesc *p;
2781 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2782 if (!p) {
2783 pd = IO_MEM_UNASSIGNED;
2784 } else {
2785 pd = p->phys_offset;
2788 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2789 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2790 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2791 } else {
2792 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2793 (addr & ~TARGET_PAGE_MASK);
2794 stl_p(ptr, val);
2798 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2800 int io_index;
2801 uint8_t *ptr;
2802 unsigned long pd;
2803 PhysPageDesc *p;
2805 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2806 if (!p) {
2807 pd = IO_MEM_UNASSIGNED;
2808 } else {
2809 pd = p->phys_offset;
2812 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2813 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2814 #ifdef TARGET_WORDS_BIGENDIAN
2815 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2816 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2817 #else
2818 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2819 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2820 #endif
2821 } else {
2822 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2823 (addr & ~TARGET_PAGE_MASK);
2824 stq_p(ptr, val);
2828 /* warning: addr must be aligned */
2829 void stl_phys(target_phys_addr_t addr, uint32_t val)
2831 int io_index;
2832 uint8_t *ptr;
2833 unsigned long pd;
2834 PhysPageDesc *p;
2836 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2837 if (!p) {
2838 pd = IO_MEM_UNASSIGNED;
2839 } else {
2840 pd = p->phys_offset;
2843 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2844 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2845 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2846 } else {
2847 unsigned long addr1;
2848 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2849 /* RAM case */
2850 ptr = phys_ram_base + addr1;
2851 stl_p(ptr, val);
2852 if (!cpu_physical_memory_is_dirty(addr1)) {
2853 /* invalidate code */
2854 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2855 /* set dirty bit */
2856 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2857 (0xff & ~CODE_DIRTY_FLAG);
2862 /* XXX: optimize */
2863 void stb_phys(target_phys_addr_t addr, uint32_t val)
2865 uint8_t v = val;
2866 cpu_physical_memory_write(addr, &v, 1);
2869 /* XXX: optimize */
2870 void stw_phys(target_phys_addr_t addr, uint32_t val)
2872 uint16_t v = tswap16(val);
2873 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2876 /* XXX: optimize */
2877 void stq_phys(target_phys_addr_t addr, uint64_t val)
2879 val = tswap64(val);
2880 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2883 #endif
2885 /* virtual memory access for debug */
2886 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2887 uint8_t *buf, int len, int is_write)
2889 int l;
2890 target_phys_addr_t phys_addr;
2891 target_ulong page;
2893 while (len > 0) {
2894 page = addr & TARGET_PAGE_MASK;
2895 phys_addr = cpu_get_phys_page_debug(env, page);
2896 /* if no physical page mapped, return an error */
2897 if (phys_addr == -1)
2898 return -1;
2899 l = (page + TARGET_PAGE_SIZE) - addr;
2900 if (l > len)
2901 l = len;
2902 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2903 buf, l, is_write);
2904 len -= l;
2905 buf += l;
2906 addr += l;
2908 return 0;
2911 void dump_exec_info(FILE *f,
2912 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2914 int i, target_code_size, max_target_code_size;
2915 int direct_jmp_count, direct_jmp2_count, cross_page;
2916 TranslationBlock *tb;
2918 target_code_size = 0;
2919 max_target_code_size = 0;
2920 cross_page = 0;
2921 direct_jmp_count = 0;
2922 direct_jmp2_count = 0;
2923 for(i = 0; i < nb_tbs; i++) {
2924 tb = &tbs[i];
2925 target_code_size += tb->size;
2926 if (tb->size > max_target_code_size)
2927 max_target_code_size = tb->size;
2928 if (tb->page_addr[1] != -1)
2929 cross_page++;
2930 if (tb->tb_next_offset[0] != 0xffff) {
2931 direct_jmp_count++;
2932 if (tb->tb_next_offset[1] != 0xffff) {
2933 direct_jmp2_count++;
2937 /* XXX: avoid using doubles ? */
2938 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2939 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2940 nb_tbs ? target_code_size / nb_tbs : 0,
2941 max_target_code_size);
2942 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2943 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2944 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2945 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2946 cross_page,
2947 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2948 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2949 direct_jmp_count,
2950 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2951 direct_jmp2_count,
2952 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2953 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2954 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2955 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2958 #if !defined(CONFIG_USER_ONLY)
2960 #define MMUSUFFIX _cmmu
2961 #define GETPC() NULL
2962 #define env cpu_single_env
2963 #define SOFTMMU_CODE_ACCESS
2965 #define SHIFT 0
2966 #include "softmmu_template.h"
2968 #define SHIFT 1
2969 #include "softmmu_template.h"
2971 #define SHIFT 2
2972 #include "softmmu_template.h"
2974 #define SHIFT 3
2975 #include "softmmu_template.h"
2977 #undef env
2979 #endif