Fix char* signedness, by Andre Przywara.
[qemu/qemu_0_9_1_stable.git] / exec.c
blob07d5de04ae3596883e55fec08173c82b6199d1a2
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
51 //#define DEBUG_SUBPAGE
53 #if !defined(CONFIG_USER_ONLY)
54 /* TB consistency checks only implemented for usermode emulation. */
55 #undef DEBUG_TB_CHECK
56 #endif
58 /* threshold to flush the translated code buffer */
59 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #else
76 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77 #define TARGET_PHYS_ADDR_SPACE_BITS 32
78 #endif
80 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
81 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
82 int nb_tbs;
83 /* any access to the tbs or the page table must use this lock */
84 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
86 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
87 uint8_t *code_gen_ptr;
89 int phys_ram_size;
90 int phys_ram_fd;
91 uint8_t *phys_ram_base;
92 uint8_t *phys_ram_dirty;
93 static ram_addr_t phys_ram_alloc_offset = 0;
95 CPUState *first_cpu;
96 /* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
98 CPUState *cpu_single_env;
100 typedef struct PageDesc {
101 /* list of TBs intersecting this ram page */
102 TranslationBlock *first_tb;
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count;
106 uint8_t *code_bitmap;
107 #if defined(CONFIG_USER_ONLY)
108 unsigned long flags;
109 #endif
110 } PageDesc;
112 typedef struct PhysPageDesc {
113 /* offset in host memory of the page + io_index in the low 12 bits */
114 uint32_t phys_offset;
115 } PhysPageDesc;
117 #define L2_BITS 10
118 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119 /* XXX: this is a temporary hack for alpha target.
120 * In the future, this is to be replaced by a multi-level table
121 * to actually be able to handle the complete 64 bits address space.
123 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124 #else
125 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
126 #endif
128 #define L1_SIZE (1 << L1_BITS)
129 #define L2_SIZE (1 << L2_BITS)
131 static void io_mem_init(void);
133 unsigned long qemu_real_host_page_size;
134 unsigned long qemu_host_page_bits;
135 unsigned long qemu_host_page_size;
136 unsigned long qemu_host_page_mask;
138 /* XXX: for system emulation, it could just be an array */
139 static PageDesc *l1_map[L1_SIZE];
140 PhysPageDesc **l1_phys_map;
142 /* io memory support */
143 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
144 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
145 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
146 static int io_mem_nb;
147 #if defined(CONFIG_SOFTMMU)
148 static int io_mem_watch;
149 #endif
151 /* log support */
152 char *logfilename = "/tmp/qemu.log";
153 FILE *logfile;
154 int loglevel;
155 static int log_append = 0;
157 /* statistics */
158 static int tlb_flush_count;
159 static int tb_flush_count;
160 static int tb_phys_invalidate_count;
162 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163 typedef struct subpage_t {
164 target_phys_addr_t base;
165 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
166 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
167 void *opaque[TARGET_PAGE_SIZE];
168 } subpage_t;
170 static void page_init(void)
172 /* NOTE: we can always suppose that qemu_host_page_size >=
173 TARGET_PAGE_SIZE */
174 #ifdef _WIN32
176 SYSTEM_INFO system_info;
177 DWORD old_protect;
179 GetSystemInfo(&system_info);
180 qemu_real_host_page_size = system_info.dwPageSize;
182 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
183 PAGE_EXECUTE_READWRITE, &old_protect);
185 #else
186 qemu_real_host_page_size = getpagesize();
188 unsigned long start, end;
190 start = (unsigned long)code_gen_buffer;
191 start &= ~(qemu_real_host_page_size - 1);
193 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
194 end += qemu_real_host_page_size - 1;
195 end &= ~(qemu_real_host_page_size - 1);
197 mprotect((void *)start, end - start,
198 PROT_READ | PROT_WRITE | PROT_EXEC);
200 #endif
202 if (qemu_host_page_size == 0)
203 qemu_host_page_size = qemu_real_host_page_size;
204 if (qemu_host_page_size < TARGET_PAGE_SIZE)
205 qemu_host_page_size = TARGET_PAGE_SIZE;
206 qemu_host_page_bits = 0;
207 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
208 qemu_host_page_bits++;
209 qemu_host_page_mask = ~(qemu_host_page_size - 1);
210 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
211 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
213 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
215 long long startaddr, endaddr;
216 FILE *f;
217 int n;
219 f = fopen("/proc/self/maps", "r");
220 if (f) {
221 do {
222 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
223 if (n == 2) {
224 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
225 TARGET_PAGE_ALIGN(endaddr),
226 PAGE_RESERVED);
228 } while (!feof(f));
229 fclose(f);
232 #endif
235 static inline PageDesc *page_find_alloc(unsigned int index)
237 PageDesc **lp, *p;
239 lp = &l1_map[index >> L2_BITS];
240 p = *lp;
241 if (!p) {
242 /* allocate if not found */
243 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
244 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
245 *lp = p;
247 return p + (index & (L2_SIZE - 1));
250 static inline PageDesc *page_find(unsigned int index)
252 PageDesc *p;
254 p = l1_map[index >> L2_BITS];
255 if (!p)
256 return 0;
257 return p + (index & (L2_SIZE - 1));
260 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
262 void **lp, **p;
263 PhysPageDesc *pd;
265 p = (void **)l1_phys_map;
266 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
268 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
269 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
270 #endif
271 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
272 p = *lp;
273 if (!p) {
274 /* allocate if not found */
275 if (!alloc)
276 return NULL;
277 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
278 memset(p, 0, sizeof(void *) * L1_SIZE);
279 *lp = p;
281 #endif
282 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
283 pd = *lp;
284 if (!pd) {
285 int i;
286 /* allocate if not found */
287 if (!alloc)
288 return NULL;
289 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
290 *lp = pd;
291 for (i = 0; i < L2_SIZE; i++)
292 pd[i].phys_offset = IO_MEM_UNASSIGNED;
294 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
297 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
299 return phys_page_find_alloc(index, 0);
302 #if !defined(CONFIG_USER_ONLY)
303 static void tlb_protect_code(ram_addr_t ram_addr);
304 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
305 target_ulong vaddr);
306 #endif
308 void cpu_exec_init(CPUState *env)
310 CPUState **penv;
311 int cpu_index;
313 if (!code_gen_ptr) {
314 code_gen_ptr = code_gen_buffer;
315 page_init();
316 io_mem_init();
318 env->next_cpu = NULL;
319 penv = &first_cpu;
320 cpu_index = 0;
321 while (*penv != NULL) {
322 penv = (CPUState **)&(*penv)->next_cpu;
323 cpu_index++;
325 env->cpu_index = cpu_index;
326 env->nb_watchpoints = 0;
327 *penv = env;
330 static inline void invalidate_page_bitmap(PageDesc *p)
332 if (p->code_bitmap) {
333 qemu_free(p->code_bitmap);
334 p->code_bitmap = NULL;
336 p->code_write_count = 0;
339 /* set to NULL all the 'first_tb' fields in all PageDescs */
340 static void page_flush_tb(void)
342 int i, j;
343 PageDesc *p;
345 for(i = 0; i < L1_SIZE; i++) {
346 p = l1_map[i];
347 if (p) {
348 for(j = 0; j < L2_SIZE; j++) {
349 p->first_tb = NULL;
350 invalidate_page_bitmap(p);
351 p++;
357 /* flush all the translation blocks */
358 /* XXX: tb_flush is currently not thread safe */
359 void tb_flush(CPUState *env1)
361 CPUState *env;
362 #if defined(DEBUG_FLUSH)
363 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
364 (unsigned long)(code_gen_ptr - code_gen_buffer),
365 nb_tbs, nb_tbs > 0 ?
366 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
367 #endif
368 nb_tbs = 0;
370 for(env = first_cpu; env != NULL; env = env->next_cpu) {
371 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
374 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
375 page_flush_tb();
377 code_gen_ptr = code_gen_buffer;
378 /* XXX: flush processor icache at this point if cache flush is
379 expensive */
380 tb_flush_count++;
383 #ifdef DEBUG_TB_CHECK
385 static void tb_invalidate_check(target_ulong address)
387 TranslationBlock *tb;
388 int i;
389 address &= TARGET_PAGE_MASK;
390 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
391 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
392 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
393 address >= tb->pc + tb->size)) {
394 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
395 address, (long)tb->pc, tb->size);
401 /* verify that all the pages have correct rights for code */
402 static void tb_page_check(void)
404 TranslationBlock *tb;
405 int i, flags1, flags2;
407 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
408 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
409 flags1 = page_get_flags(tb->pc);
410 flags2 = page_get_flags(tb->pc + tb->size - 1);
411 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
412 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
413 (long)tb->pc, tb->size, flags1, flags2);
419 void tb_jmp_check(TranslationBlock *tb)
421 TranslationBlock *tb1;
422 unsigned int n1;
424 /* suppress any remaining jumps to this TB */
425 tb1 = tb->jmp_first;
426 for(;;) {
427 n1 = (long)tb1 & 3;
428 tb1 = (TranslationBlock *)((long)tb1 & ~3);
429 if (n1 == 2)
430 break;
431 tb1 = tb1->jmp_next[n1];
433 /* check end of list */
434 if (tb1 != tb) {
435 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
439 #endif
441 /* invalidate one TB */
442 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
443 int next_offset)
445 TranslationBlock *tb1;
446 for(;;) {
447 tb1 = *ptb;
448 if (tb1 == tb) {
449 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
450 break;
452 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
456 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
458 TranslationBlock *tb1;
459 unsigned int n1;
461 for(;;) {
462 tb1 = *ptb;
463 n1 = (long)tb1 & 3;
464 tb1 = (TranslationBlock *)((long)tb1 & ~3);
465 if (tb1 == tb) {
466 *ptb = tb1->page_next[n1];
467 break;
469 ptb = &tb1->page_next[n1];
473 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
475 TranslationBlock *tb1, **ptb;
476 unsigned int n1;
478 ptb = &tb->jmp_next[n];
479 tb1 = *ptb;
480 if (tb1) {
481 /* find tb(n) in circular list */
482 for(;;) {
483 tb1 = *ptb;
484 n1 = (long)tb1 & 3;
485 tb1 = (TranslationBlock *)((long)tb1 & ~3);
486 if (n1 == n && tb1 == tb)
487 break;
488 if (n1 == 2) {
489 ptb = &tb1->jmp_first;
490 } else {
491 ptb = &tb1->jmp_next[n1];
494 /* now we can suppress tb(n) from the list */
495 *ptb = tb->jmp_next[n];
497 tb->jmp_next[n] = NULL;
501 /* reset the jump entry 'n' of a TB so that it is not chained to
502 another TB */
503 static inline void tb_reset_jump(TranslationBlock *tb, int n)
505 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
508 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
510 CPUState *env;
511 PageDesc *p;
512 unsigned int h, n1;
513 target_ulong phys_pc;
514 TranslationBlock *tb1, *tb2;
516 /* remove the TB from the hash list */
517 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
518 h = tb_phys_hash_func(phys_pc);
519 tb_remove(&tb_phys_hash[h], tb,
520 offsetof(TranslationBlock, phys_hash_next));
522 /* remove the TB from the page list */
523 if (tb->page_addr[0] != page_addr) {
524 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
525 tb_page_remove(&p->first_tb, tb);
526 invalidate_page_bitmap(p);
528 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
529 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
530 tb_page_remove(&p->first_tb, tb);
531 invalidate_page_bitmap(p);
534 tb_invalidated_flag = 1;
536 /* remove the TB from the hash list */
537 h = tb_jmp_cache_hash_func(tb->pc);
538 for(env = first_cpu; env != NULL; env = env->next_cpu) {
539 if (env->tb_jmp_cache[h] == tb)
540 env->tb_jmp_cache[h] = NULL;
543 /* suppress this TB from the two jump lists */
544 tb_jmp_remove(tb, 0);
545 tb_jmp_remove(tb, 1);
547 /* suppress any remaining jumps to this TB */
548 tb1 = tb->jmp_first;
549 for(;;) {
550 n1 = (long)tb1 & 3;
551 if (n1 == 2)
552 break;
553 tb1 = (TranslationBlock *)((long)tb1 & ~3);
554 tb2 = tb1->jmp_next[n1];
555 tb_reset_jump(tb1, n1);
556 tb1->jmp_next[n1] = NULL;
557 tb1 = tb2;
559 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
561 tb_phys_invalidate_count++;
564 static inline void set_bits(uint8_t *tab, int start, int len)
566 int end, mask, end1;
568 end = start + len;
569 tab += start >> 3;
570 mask = 0xff << (start & 7);
571 if ((start & ~7) == (end & ~7)) {
572 if (start < end) {
573 mask &= ~(0xff << (end & 7));
574 *tab |= mask;
576 } else {
577 *tab++ |= mask;
578 start = (start + 8) & ~7;
579 end1 = end & ~7;
580 while (start < end1) {
581 *tab++ = 0xff;
582 start += 8;
584 if (start < end) {
585 mask = ~(0xff << (end & 7));
586 *tab |= mask;
591 static void build_page_bitmap(PageDesc *p)
593 int n, tb_start, tb_end;
594 TranslationBlock *tb;
596 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
597 if (!p->code_bitmap)
598 return;
599 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
601 tb = p->first_tb;
602 while (tb != NULL) {
603 n = (long)tb & 3;
604 tb = (TranslationBlock *)((long)tb & ~3);
605 /* NOTE: this is subtle as a TB may span two physical pages */
606 if (n == 0) {
607 /* NOTE: tb_end may be after the end of the page, but
608 it is not a problem */
609 tb_start = tb->pc & ~TARGET_PAGE_MASK;
610 tb_end = tb_start + tb->size;
611 if (tb_end > TARGET_PAGE_SIZE)
612 tb_end = TARGET_PAGE_SIZE;
613 } else {
614 tb_start = 0;
615 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
617 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
618 tb = tb->page_next[n];
622 #ifdef TARGET_HAS_PRECISE_SMC
624 static void tb_gen_code(CPUState *env,
625 target_ulong pc, target_ulong cs_base, int flags,
626 int cflags)
628 TranslationBlock *tb;
629 uint8_t *tc_ptr;
630 target_ulong phys_pc, phys_page2, virt_page2;
631 int code_gen_size;
633 phys_pc = get_phys_addr_code(env, pc);
634 tb = tb_alloc(pc);
635 if (!tb) {
636 /* flush must be done */
637 tb_flush(env);
638 /* cannot fail at this point */
639 tb = tb_alloc(pc);
641 tc_ptr = code_gen_ptr;
642 tb->tc_ptr = tc_ptr;
643 tb->cs_base = cs_base;
644 tb->flags = flags;
645 tb->cflags = cflags;
646 cpu_gen_code(env, tb, &code_gen_size);
647 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
649 /* check next page if needed */
650 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
651 phys_page2 = -1;
652 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
653 phys_page2 = get_phys_addr_code(env, virt_page2);
655 tb_link_phys(tb, phys_pc, phys_page2);
657 #endif
659 /* invalidate all TBs which intersect with the target physical page
660 starting in range [start;end[. NOTE: start and end must refer to
661 the same physical page. 'is_cpu_write_access' should be true if called
662 from a real cpu write access: the virtual CPU will exit the current
663 TB if code is modified inside this TB. */
664 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
665 int is_cpu_write_access)
667 int n, current_tb_modified, current_tb_not_found, current_flags;
668 CPUState *env = cpu_single_env;
669 PageDesc *p;
670 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
671 target_ulong tb_start, tb_end;
672 target_ulong current_pc, current_cs_base;
674 p = page_find(start >> TARGET_PAGE_BITS);
675 if (!p)
676 return;
677 if (!p->code_bitmap &&
678 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
679 is_cpu_write_access) {
680 /* build code bitmap */
681 build_page_bitmap(p);
684 /* we remove all the TBs in the range [start, end[ */
685 /* XXX: see if in some cases it could be faster to invalidate all the code */
686 current_tb_not_found = is_cpu_write_access;
687 current_tb_modified = 0;
688 current_tb = NULL; /* avoid warning */
689 current_pc = 0; /* avoid warning */
690 current_cs_base = 0; /* avoid warning */
691 current_flags = 0; /* avoid warning */
692 tb = p->first_tb;
693 while (tb != NULL) {
694 n = (long)tb & 3;
695 tb = (TranslationBlock *)((long)tb & ~3);
696 tb_next = tb->page_next[n];
697 /* NOTE: this is subtle as a TB may span two physical pages */
698 if (n == 0) {
699 /* NOTE: tb_end may be after the end of the page, but
700 it is not a problem */
701 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
702 tb_end = tb_start + tb->size;
703 } else {
704 tb_start = tb->page_addr[1];
705 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
707 if (!(tb_end <= start || tb_start >= end)) {
708 #ifdef TARGET_HAS_PRECISE_SMC
709 if (current_tb_not_found) {
710 current_tb_not_found = 0;
711 current_tb = NULL;
712 if (env->mem_write_pc) {
713 /* now we have a real cpu fault */
714 current_tb = tb_find_pc(env->mem_write_pc);
717 if (current_tb == tb &&
718 !(current_tb->cflags & CF_SINGLE_INSN)) {
719 /* If we are modifying the current TB, we must stop
720 its execution. We could be more precise by checking
721 that the modification is after the current PC, but it
722 would require a specialized function to partially
723 restore the CPU state */
725 current_tb_modified = 1;
726 cpu_restore_state(current_tb, env,
727 env->mem_write_pc, NULL);
728 #if defined(TARGET_I386)
729 current_flags = env->hflags;
730 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
731 current_cs_base = (target_ulong)env->segs[R_CS].base;
732 current_pc = current_cs_base + env->eip;
733 #else
734 #error unsupported CPU
735 #endif
737 #endif /* TARGET_HAS_PRECISE_SMC */
738 /* we need to do that to handle the case where a signal
739 occurs while doing tb_phys_invalidate() */
740 saved_tb = NULL;
741 if (env) {
742 saved_tb = env->current_tb;
743 env->current_tb = NULL;
745 tb_phys_invalidate(tb, -1);
746 if (env) {
747 env->current_tb = saved_tb;
748 if (env->interrupt_request && env->current_tb)
749 cpu_interrupt(env, env->interrupt_request);
752 tb = tb_next;
754 #if !defined(CONFIG_USER_ONLY)
755 /* if no code remaining, no need to continue to use slow writes */
756 if (!p->first_tb) {
757 invalidate_page_bitmap(p);
758 if (is_cpu_write_access) {
759 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
762 #endif
763 #ifdef TARGET_HAS_PRECISE_SMC
764 if (current_tb_modified) {
765 /* we generate a block containing just the instruction
766 modifying the memory. It will ensure that it cannot modify
767 itself */
768 env->current_tb = NULL;
769 tb_gen_code(env, current_pc, current_cs_base, current_flags,
770 CF_SINGLE_INSN);
771 cpu_resume_from_signal(env, NULL);
773 #endif
776 /* len must be <= 8 and start must be a multiple of len */
777 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
779 PageDesc *p;
780 int offset, b;
781 #if 0
782 if (1) {
783 if (loglevel) {
784 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
785 cpu_single_env->mem_write_vaddr, len,
786 cpu_single_env->eip,
787 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
790 #endif
791 p = page_find(start >> TARGET_PAGE_BITS);
792 if (!p)
793 return;
794 if (p->code_bitmap) {
795 offset = start & ~TARGET_PAGE_MASK;
796 b = p->code_bitmap[offset >> 3] >> (offset & 7);
797 if (b & ((1 << len) - 1))
798 goto do_invalidate;
799 } else {
800 do_invalidate:
801 tb_invalidate_phys_page_range(start, start + len, 1);
805 #if !defined(CONFIG_SOFTMMU)
806 static void tb_invalidate_phys_page(target_ulong addr,
807 unsigned long pc, void *puc)
809 int n, current_flags, current_tb_modified;
810 target_ulong current_pc, current_cs_base;
811 PageDesc *p;
812 TranslationBlock *tb, *current_tb;
813 #ifdef TARGET_HAS_PRECISE_SMC
814 CPUState *env = cpu_single_env;
815 #endif
817 addr &= TARGET_PAGE_MASK;
818 p = page_find(addr >> TARGET_PAGE_BITS);
819 if (!p)
820 return;
821 tb = p->first_tb;
822 current_tb_modified = 0;
823 current_tb = NULL;
824 current_pc = 0; /* avoid warning */
825 current_cs_base = 0; /* avoid warning */
826 current_flags = 0; /* avoid warning */
827 #ifdef TARGET_HAS_PRECISE_SMC
828 if (tb && pc != 0) {
829 current_tb = tb_find_pc(pc);
831 #endif
832 while (tb != NULL) {
833 n = (long)tb & 3;
834 tb = (TranslationBlock *)((long)tb & ~3);
835 #ifdef TARGET_HAS_PRECISE_SMC
836 if (current_tb == tb &&
837 !(current_tb->cflags & CF_SINGLE_INSN)) {
838 /* If we are modifying the current TB, we must stop
839 its execution. We could be more precise by checking
840 that the modification is after the current PC, but it
841 would require a specialized function to partially
842 restore the CPU state */
844 current_tb_modified = 1;
845 cpu_restore_state(current_tb, env, pc, puc);
846 #if defined(TARGET_I386)
847 current_flags = env->hflags;
848 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
849 current_cs_base = (target_ulong)env->segs[R_CS].base;
850 current_pc = current_cs_base + env->eip;
851 #else
852 #error unsupported CPU
853 #endif
855 #endif /* TARGET_HAS_PRECISE_SMC */
856 tb_phys_invalidate(tb, addr);
857 tb = tb->page_next[n];
859 p->first_tb = NULL;
860 #ifdef TARGET_HAS_PRECISE_SMC
861 if (current_tb_modified) {
862 /* we generate a block containing just the instruction
863 modifying the memory. It will ensure that it cannot modify
864 itself */
865 env->current_tb = NULL;
866 tb_gen_code(env, current_pc, current_cs_base, current_flags,
867 CF_SINGLE_INSN);
868 cpu_resume_from_signal(env, puc);
870 #endif
872 #endif
874 /* add the tb in the target page and protect it if necessary */
875 static inline void tb_alloc_page(TranslationBlock *tb,
876 unsigned int n, target_ulong page_addr)
878 PageDesc *p;
879 TranslationBlock *last_first_tb;
881 tb->page_addr[n] = page_addr;
882 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
883 tb->page_next[n] = p->first_tb;
884 last_first_tb = p->first_tb;
885 p->first_tb = (TranslationBlock *)((long)tb | n);
886 invalidate_page_bitmap(p);
888 #if defined(TARGET_HAS_SMC) || 1
890 #if defined(CONFIG_USER_ONLY)
891 if (p->flags & PAGE_WRITE) {
892 target_ulong addr;
893 PageDesc *p2;
894 int prot;
896 /* force the host page as non writable (writes will have a
897 page fault + mprotect overhead) */
898 page_addr &= qemu_host_page_mask;
899 prot = 0;
900 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
901 addr += TARGET_PAGE_SIZE) {
903 p2 = page_find (addr >> TARGET_PAGE_BITS);
904 if (!p2)
905 continue;
906 prot |= p2->flags;
907 p2->flags &= ~PAGE_WRITE;
908 page_get_flags(addr);
910 mprotect(g2h(page_addr), qemu_host_page_size,
911 (prot & PAGE_BITS) & ~PAGE_WRITE);
912 #ifdef DEBUG_TB_INVALIDATE
913 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
914 page_addr);
915 #endif
917 #else
918 /* if some code is already present, then the pages are already
919 protected. So we handle the case where only the first TB is
920 allocated in a physical page */
921 if (!last_first_tb) {
922 tlb_protect_code(page_addr);
924 #endif
926 #endif /* TARGET_HAS_SMC */
929 /* Allocate a new translation block. Flush the translation buffer if
930 too many translation blocks or too much generated code. */
931 TranslationBlock *tb_alloc(target_ulong pc)
933 TranslationBlock *tb;
935 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
936 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
937 return NULL;
938 tb = &tbs[nb_tbs++];
939 tb->pc = pc;
940 tb->cflags = 0;
941 return tb;
944 /* add a new TB and link it to the physical page tables. phys_page2 is
945 (-1) to indicate that only one page contains the TB. */
946 void tb_link_phys(TranslationBlock *tb,
947 target_ulong phys_pc, target_ulong phys_page2)
949 unsigned int h;
950 TranslationBlock **ptb;
952 /* add in the physical hash table */
953 h = tb_phys_hash_func(phys_pc);
954 ptb = &tb_phys_hash[h];
955 tb->phys_hash_next = *ptb;
956 *ptb = tb;
958 /* add in the page list */
959 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
960 if (phys_page2 != -1)
961 tb_alloc_page(tb, 1, phys_page2);
962 else
963 tb->page_addr[1] = -1;
965 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
966 tb->jmp_next[0] = NULL;
967 tb->jmp_next[1] = NULL;
969 /* init original jump addresses */
970 if (tb->tb_next_offset[0] != 0xffff)
971 tb_reset_jump(tb, 0);
972 if (tb->tb_next_offset[1] != 0xffff)
973 tb_reset_jump(tb, 1);
975 #ifdef DEBUG_TB_CHECK
976 tb_page_check();
977 #endif
980 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
981 tb[1].tc_ptr. Return NULL if not found */
982 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
984 int m_min, m_max, m;
985 unsigned long v;
986 TranslationBlock *tb;
988 if (nb_tbs <= 0)
989 return NULL;
990 if (tc_ptr < (unsigned long)code_gen_buffer ||
991 tc_ptr >= (unsigned long)code_gen_ptr)
992 return NULL;
993 /* binary search (cf Knuth) */
994 m_min = 0;
995 m_max = nb_tbs - 1;
996 while (m_min <= m_max) {
997 m = (m_min + m_max) >> 1;
998 tb = &tbs[m];
999 v = (unsigned long)tb->tc_ptr;
1000 if (v == tc_ptr)
1001 return tb;
1002 else if (tc_ptr < v) {
1003 m_max = m - 1;
1004 } else {
1005 m_min = m + 1;
1008 return &tbs[m_max];
1011 static void tb_reset_jump_recursive(TranslationBlock *tb);
1013 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1015 TranslationBlock *tb1, *tb_next, **ptb;
1016 unsigned int n1;
1018 tb1 = tb->jmp_next[n];
1019 if (tb1 != NULL) {
1020 /* find head of list */
1021 for(;;) {
1022 n1 = (long)tb1 & 3;
1023 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1024 if (n1 == 2)
1025 break;
1026 tb1 = tb1->jmp_next[n1];
1028 /* we are now sure now that tb jumps to tb1 */
1029 tb_next = tb1;
1031 /* remove tb from the jmp_first list */
1032 ptb = &tb_next->jmp_first;
1033 for(;;) {
1034 tb1 = *ptb;
1035 n1 = (long)tb1 & 3;
1036 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1037 if (n1 == n && tb1 == tb)
1038 break;
1039 ptb = &tb1->jmp_next[n1];
1041 *ptb = tb->jmp_next[n];
1042 tb->jmp_next[n] = NULL;
1044 /* suppress the jump to next tb in generated code */
1045 tb_reset_jump(tb, n);
1047 /* suppress jumps in the tb on which we could have jumped */
1048 tb_reset_jump_recursive(tb_next);
1052 static void tb_reset_jump_recursive(TranslationBlock *tb)
1054 tb_reset_jump_recursive2(tb, 0);
1055 tb_reset_jump_recursive2(tb, 1);
1058 #if defined(TARGET_HAS_ICE)
1059 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1061 target_phys_addr_t addr;
1062 target_ulong pd;
1063 ram_addr_t ram_addr;
1064 PhysPageDesc *p;
1066 addr = cpu_get_phys_page_debug(env, pc);
1067 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1068 if (!p) {
1069 pd = IO_MEM_UNASSIGNED;
1070 } else {
1071 pd = p->phys_offset;
1073 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1074 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1076 #endif
1078 /* Add a watchpoint. */
1079 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1081 int i;
1083 for (i = 0; i < env->nb_watchpoints; i++) {
1084 if (addr == env->watchpoint[i].vaddr)
1085 return 0;
1087 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1088 return -1;
1090 i = env->nb_watchpoints++;
1091 env->watchpoint[i].vaddr = addr;
1092 tlb_flush_page(env, addr);
1093 /* FIXME: This flush is needed because of the hack to make memory ops
1094 terminate the TB. It can be removed once the proper IO trap and
1095 re-execute bits are in. */
1096 tb_flush(env);
1097 return i;
1100 /* Remove a watchpoint. */
1101 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1103 int i;
1105 for (i = 0; i < env->nb_watchpoints; i++) {
1106 if (addr == env->watchpoint[i].vaddr) {
1107 env->nb_watchpoints--;
1108 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1109 tlb_flush_page(env, addr);
1110 return 0;
1113 return -1;
1116 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1117 breakpoint is reached */
1118 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1120 #if defined(TARGET_HAS_ICE)
1121 int i;
1123 for(i = 0; i < env->nb_breakpoints; i++) {
1124 if (env->breakpoints[i] == pc)
1125 return 0;
1128 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1129 return -1;
1130 env->breakpoints[env->nb_breakpoints++] = pc;
1132 breakpoint_invalidate(env, pc);
1133 return 0;
1134 #else
1135 return -1;
1136 #endif
1139 /* remove a breakpoint */
1140 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1142 #if defined(TARGET_HAS_ICE)
1143 int i;
1144 for(i = 0; i < env->nb_breakpoints; i++) {
1145 if (env->breakpoints[i] == pc)
1146 goto found;
1148 return -1;
1149 found:
1150 env->nb_breakpoints--;
1151 if (i < env->nb_breakpoints)
1152 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1154 breakpoint_invalidate(env, pc);
1155 return 0;
1156 #else
1157 return -1;
1158 #endif
1161 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1162 CPU loop after each instruction */
1163 void cpu_single_step(CPUState *env, int enabled)
1165 #if defined(TARGET_HAS_ICE)
1166 if (env->singlestep_enabled != enabled) {
1167 env->singlestep_enabled = enabled;
1168 /* must flush all the translated code to avoid inconsistancies */
1169 /* XXX: only flush what is necessary */
1170 tb_flush(env);
1172 #endif
1175 /* enable or disable low levels log */
1176 void cpu_set_log(int log_flags)
1178 loglevel = log_flags;
1179 if (loglevel && !logfile) {
1180 logfile = fopen(logfilename, log_append ? "a" : "w");
1181 if (!logfile) {
1182 perror(logfilename);
1183 _exit(1);
1185 #if !defined(CONFIG_SOFTMMU)
1186 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1188 static uint8_t logfile_buf[4096];
1189 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1191 #else
1192 setvbuf(logfile, NULL, _IOLBF, 0);
1193 #endif
1194 log_append = 1;
1196 if (!loglevel && logfile) {
1197 fclose(logfile);
1198 logfile = NULL;
1202 void cpu_set_log_filename(const char *filename)
1204 logfilename = strdup(filename);
1205 if (logfile) {
1206 fclose(logfile);
1207 logfile = NULL;
1209 cpu_set_log(loglevel);
1212 /* mask must never be zero, except for A20 change call */
1213 void cpu_interrupt(CPUState *env, int mask)
1215 TranslationBlock *tb;
1216 static int interrupt_lock;
1218 env->interrupt_request |= mask;
1219 /* if the cpu is currently executing code, we must unlink it and
1220 all the potentially executing TB */
1221 tb = env->current_tb;
1222 if (tb && !testandset(&interrupt_lock)) {
1223 env->current_tb = NULL;
1224 tb_reset_jump_recursive(tb);
1225 interrupt_lock = 0;
1229 void cpu_reset_interrupt(CPUState *env, int mask)
1231 env->interrupt_request &= ~mask;
1234 CPULogItem cpu_log_items[] = {
1235 { CPU_LOG_TB_OUT_ASM, "out_asm",
1236 "show generated host assembly code for each compiled TB" },
1237 { CPU_LOG_TB_IN_ASM, "in_asm",
1238 "show target assembly code for each compiled TB" },
1239 { CPU_LOG_TB_OP, "op",
1240 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1241 #ifdef TARGET_I386
1242 { CPU_LOG_TB_OP_OPT, "op_opt",
1243 "show micro ops after optimization for each compiled TB" },
1244 #endif
1245 { CPU_LOG_INT, "int",
1246 "show interrupts/exceptions in short format" },
1247 { CPU_LOG_EXEC, "exec",
1248 "show trace before each executed TB (lots of logs)" },
1249 { CPU_LOG_TB_CPU, "cpu",
1250 "show CPU state before block translation" },
1251 #ifdef TARGET_I386
1252 { CPU_LOG_PCALL, "pcall",
1253 "show protected mode far calls/returns/exceptions" },
1254 #endif
1255 #ifdef DEBUG_IOPORT
1256 { CPU_LOG_IOPORT, "ioport",
1257 "show all i/o ports accesses" },
1258 #endif
1259 { 0, NULL, NULL },
1262 static int cmp1(const char *s1, int n, const char *s2)
1264 if (strlen(s2) != n)
1265 return 0;
1266 return memcmp(s1, s2, n) == 0;
1269 /* takes a comma separated list of log masks. Return 0 if error. */
1270 int cpu_str_to_log_mask(const char *str)
1272 CPULogItem *item;
1273 int mask;
1274 const char *p, *p1;
1276 p = str;
1277 mask = 0;
1278 for(;;) {
1279 p1 = strchr(p, ',');
1280 if (!p1)
1281 p1 = p + strlen(p);
1282 if(cmp1(p,p1-p,"all")) {
1283 for(item = cpu_log_items; item->mask != 0; item++) {
1284 mask |= item->mask;
1286 } else {
1287 for(item = cpu_log_items; item->mask != 0; item++) {
1288 if (cmp1(p, p1 - p, item->name))
1289 goto found;
1291 return 0;
1293 found:
1294 mask |= item->mask;
1295 if (*p1 != ',')
1296 break;
1297 p = p1 + 1;
1299 return mask;
1302 void cpu_abort(CPUState *env, const char *fmt, ...)
1304 va_list ap;
1305 va_list ap2;
1307 va_start(ap, fmt);
1308 va_copy(ap2, ap);
1309 fprintf(stderr, "qemu: fatal: ");
1310 vfprintf(stderr, fmt, ap);
1311 fprintf(stderr, "\n");
1312 #ifdef TARGET_I386
1313 if(env->intercept & INTERCEPT_SVM_MASK) {
1314 /* most probably the virtual machine should not
1315 be shut down but rather caught by the VMM */
1316 vmexit(SVM_EXIT_SHUTDOWN, 0);
1318 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1319 #else
1320 cpu_dump_state(env, stderr, fprintf, 0);
1321 #endif
1322 if (logfile) {
1323 fprintf(logfile, "qemu: fatal: ");
1324 vfprintf(logfile, fmt, ap2);
1325 fprintf(logfile, "\n");
1326 #ifdef TARGET_I386
1327 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1328 #else
1329 cpu_dump_state(env, logfile, fprintf, 0);
1330 #endif
1331 fflush(logfile);
1332 fclose(logfile);
1334 va_end(ap2);
1335 va_end(ap);
1336 abort();
1339 CPUState *cpu_copy(CPUState *env)
1341 CPUState *new_env = cpu_init(env->cpu_model_str);
1342 /* preserve chaining and index */
1343 CPUState *next_cpu = new_env->next_cpu;
1344 int cpu_index = new_env->cpu_index;
1345 memcpy(new_env, env, sizeof(CPUState));
1346 new_env->next_cpu = next_cpu;
1347 new_env->cpu_index = cpu_index;
1348 return new_env;
1351 #if !defined(CONFIG_USER_ONLY)
1353 /* NOTE: if flush_global is true, also flush global entries (not
1354 implemented yet) */
1355 void tlb_flush(CPUState *env, int flush_global)
1357 int i;
1359 #if defined(DEBUG_TLB)
1360 printf("tlb_flush:\n");
1361 #endif
1362 /* must reset current TB so that interrupts cannot modify the
1363 links while we are modifying them */
1364 env->current_tb = NULL;
1366 for(i = 0; i < CPU_TLB_SIZE; i++) {
1367 env->tlb_table[0][i].addr_read = -1;
1368 env->tlb_table[0][i].addr_write = -1;
1369 env->tlb_table[0][i].addr_code = -1;
1370 env->tlb_table[1][i].addr_read = -1;
1371 env->tlb_table[1][i].addr_write = -1;
1372 env->tlb_table[1][i].addr_code = -1;
1373 #if (NB_MMU_MODES >= 3)
1374 env->tlb_table[2][i].addr_read = -1;
1375 env->tlb_table[2][i].addr_write = -1;
1376 env->tlb_table[2][i].addr_code = -1;
1377 #if (NB_MMU_MODES == 4)
1378 env->tlb_table[3][i].addr_read = -1;
1379 env->tlb_table[3][i].addr_write = -1;
1380 env->tlb_table[3][i].addr_code = -1;
1381 #endif
1382 #endif
1385 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1387 #if !defined(CONFIG_SOFTMMU)
1388 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1389 #endif
1390 #ifdef USE_KQEMU
1391 if (env->kqemu_enabled) {
1392 kqemu_flush(env, flush_global);
1394 #endif
1395 tlb_flush_count++;
1398 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1400 if (addr == (tlb_entry->addr_read &
1401 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1402 addr == (tlb_entry->addr_write &
1403 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1404 addr == (tlb_entry->addr_code &
1405 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1406 tlb_entry->addr_read = -1;
1407 tlb_entry->addr_write = -1;
1408 tlb_entry->addr_code = -1;
1412 void tlb_flush_page(CPUState *env, target_ulong addr)
1414 int i;
1415 TranslationBlock *tb;
1417 #if defined(DEBUG_TLB)
1418 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1419 #endif
1420 /* must reset current TB so that interrupts cannot modify the
1421 links while we are modifying them */
1422 env->current_tb = NULL;
1424 addr &= TARGET_PAGE_MASK;
1425 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1426 tlb_flush_entry(&env->tlb_table[0][i], addr);
1427 tlb_flush_entry(&env->tlb_table[1][i], addr);
1428 #if (NB_MMU_MODES >= 3)
1429 tlb_flush_entry(&env->tlb_table[2][i], addr);
1430 #if (NB_MMU_MODES == 4)
1431 tlb_flush_entry(&env->tlb_table[3][i], addr);
1432 #endif
1433 #endif
1435 /* Discard jump cache entries for any tb which might potentially
1436 overlap the flushed page. */
1437 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1438 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1440 i = tb_jmp_cache_hash_page(addr);
1441 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1443 #if !defined(CONFIG_SOFTMMU)
1444 if (addr < MMAP_AREA_END)
1445 munmap((void *)addr, TARGET_PAGE_SIZE);
1446 #endif
1447 #ifdef USE_KQEMU
1448 if (env->kqemu_enabled) {
1449 kqemu_flush_page(env, addr);
1451 #endif
1454 /* update the TLBs so that writes to code in the virtual page 'addr'
1455 can be detected */
1456 static void tlb_protect_code(ram_addr_t ram_addr)
1458 cpu_physical_memory_reset_dirty(ram_addr,
1459 ram_addr + TARGET_PAGE_SIZE,
1460 CODE_DIRTY_FLAG);
1463 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1464 tested for self modifying code */
1465 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1466 target_ulong vaddr)
1468 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1471 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1472 unsigned long start, unsigned long length)
1474 unsigned long addr;
1475 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1476 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1477 if ((addr - start) < length) {
1478 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1483 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1484 int dirty_flags)
1486 CPUState *env;
1487 unsigned long length, start1;
1488 int i, mask, len;
1489 uint8_t *p;
1491 start &= TARGET_PAGE_MASK;
1492 end = TARGET_PAGE_ALIGN(end);
1494 length = end - start;
1495 if (length == 0)
1496 return;
1497 len = length >> TARGET_PAGE_BITS;
1498 #ifdef USE_KQEMU
1499 /* XXX: should not depend on cpu context */
1500 env = first_cpu;
1501 if (env->kqemu_enabled) {
1502 ram_addr_t addr;
1503 addr = start;
1504 for(i = 0; i < len; i++) {
1505 kqemu_set_notdirty(env, addr);
1506 addr += TARGET_PAGE_SIZE;
1509 #endif
1510 mask = ~dirty_flags;
1511 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1512 for(i = 0; i < len; i++)
1513 p[i] &= mask;
1515 /* we modify the TLB cache so that the dirty bit will be set again
1516 when accessing the range */
1517 start1 = start + (unsigned long)phys_ram_base;
1518 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1519 for(i = 0; i < CPU_TLB_SIZE; i++)
1520 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1521 for(i = 0; i < CPU_TLB_SIZE; i++)
1522 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1523 #if (NB_MMU_MODES >= 3)
1524 for(i = 0; i < CPU_TLB_SIZE; i++)
1525 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1526 #if (NB_MMU_MODES == 4)
1527 for(i = 0; i < CPU_TLB_SIZE; i++)
1528 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1529 #endif
1530 #endif
1533 #if !defined(CONFIG_SOFTMMU)
1534 /* XXX: this is expensive */
1536 VirtPageDesc *p;
1537 int j;
1538 target_ulong addr;
1540 for(i = 0; i < L1_SIZE; i++) {
1541 p = l1_virt_map[i];
1542 if (p) {
1543 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1544 for(j = 0; j < L2_SIZE; j++) {
1545 if (p->valid_tag == virt_valid_tag &&
1546 p->phys_addr >= start && p->phys_addr < end &&
1547 (p->prot & PROT_WRITE)) {
1548 if (addr < MMAP_AREA_END) {
1549 mprotect((void *)addr, TARGET_PAGE_SIZE,
1550 p->prot & ~PROT_WRITE);
1553 addr += TARGET_PAGE_SIZE;
1554 p++;
1559 #endif
1562 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1564 ram_addr_t ram_addr;
1566 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1567 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1568 tlb_entry->addend - (unsigned long)phys_ram_base;
1569 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1570 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1575 /* update the TLB according to the current state of the dirty bits */
1576 void cpu_tlb_update_dirty(CPUState *env)
1578 int i;
1579 for(i = 0; i < CPU_TLB_SIZE; i++)
1580 tlb_update_dirty(&env->tlb_table[0][i]);
1581 for(i = 0; i < CPU_TLB_SIZE; i++)
1582 tlb_update_dirty(&env->tlb_table[1][i]);
1583 #if (NB_MMU_MODES >= 3)
1584 for(i = 0; i < CPU_TLB_SIZE; i++)
1585 tlb_update_dirty(&env->tlb_table[2][i]);
1586 #if (NB_MMU_MODES == 4)
1587 for(i = 0; i < CPU_TLB_SIZE; i++)
1588 tlb_update_dirty(&env->tlb_table[3][i]);
1589 #endif
1590 #endif
1593 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1594 unsigned long start)
1596 unsigned long addr;
1597 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1598 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1599 if (addr == start) {
1600 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1605 /* update the TLB corresponding to virtual page vaddr and phys addr
1606 addr so that it is no longer dirty */
1607 static inline void tlb_set_dirty(CPUState *env,
1608 unsigned long addr, target_ulong vaddr)
1610 int i;
1612 addr &= TARGET_PAGE_MASK;
1613 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1614 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1615 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1616 #if (NB_MMU_MODES >= 3)
1617 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1618 #if (NB_MMU_MODES == 4)
1619 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1620 #endif
1621 #endif
1624 /* add a new TLB entry. At most one entry for a given virtual address
1625 is permitted. Return 0 if OK or 2 if the page could not be mapped
1626 (can only happen in non SOFTMMU mode for I/O pages or pages
1627 conflicting with the host address space). */
1628 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1629 target_phys_addr_t paddr, int prot,
1630 int mmu_idx, int is_softmmu)
1632 PhysPageDesc *p;
1633 unsigned long pd;
1634 unsigned int index;
1635 target_ulong address;
1636 target_phys_addr_t addend;
1637 int ret;
1638 CPUTLBEntry *te;
1639 int i;
1641 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1642 if (!p) {
1643 pd = IO_MEM_UNASSIGNED;
1644 } else {
1645 pd = p->phys_offset;
1647 #if defined(DEBUG_TLB)
1648 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1649 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1650 #endif
1652 ret = 0;
1653 #if !defined(CONFIG_SOFTMMU)
1654 if (is_softmmu)
1655 #endif
1657 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1658 /* IO memory case */
1659 address = vaddr | pd;
1660 addend = paddr;
1661 } else {
1662 /* standard memory */
1663 address = vaddr;
1664 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1667 /* Make accesses to pages with watchpoints go via the
1668 watchpoint trap routines. */
1669 for (i = 0; i < env->nb_watchpoints; i++) {
1670 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1671 if (address & ~TARGET_PAGE_MASK) {
1672 env->watchpoint[i].addend = 0;
1673 address = vaddr | io_mem_watch;
1674 } else {
1675 env->watchpoint[i].addend = pd - paddr +
1676 (unsigned long) phys_ram_base;
1677 /* TODO: Figure out how to make read watchpoints coexist
1678 with code. */
1679 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1684 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1685 addend -= vaddr;
1686 te = &env->tlb_table[mmu_idx][index];
1687 te->addend = addend;
1688 if (prot & PAGE_READ) {
1689 te->addr_read = address;
1690 } else {
1691 te->addr_read = -1;
1693 if (prot & PAGE_EXEC) {
1694 te->addr_code = address;
1695 } else {
1696 te->addr_code = -1;
1698 if (prot & PAGE_WRITE) {
1699 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1700 (pd & IO_MEM_ROMD)) {
1701 /* write access calls the I/O callback */
1702 te->addr_write = vaddr |
1703 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1704 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1705 !cpu_physical_memory_is_dirty(pd)) {
1706 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1707 } else {
1708 te->addr_write = address;
1710 } else {
1711 te->addr_write = -1;
1714 #if !defined(CONFIG_SOFTMMU)
1715 else {
1716 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1717 /* IO access: no mapping is done as it will be handled by the
1718 soft MMU */
1719 if (!(env->hflags & HF_SOFTMMU_MASK))
1720 ret = 2;
1721 } else {
1722 void *map_addr;
1724 if (vaddr >= MMAP_AREA_END) {
1725 ret = 2;
1726 } else {
1727 if (prot & PROT_WRITE) {
1728 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1729 #if defined(TARGET_HAS_SMC) || 1
1730 first_tb ||
1731 #endif
1732 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1733 !cpu_physical_memory_is_dirty(pd))) {
1734 /* ROM: we do as if code was inside */
1735 /* if code is present, we only map as read only and save the
1736 original mapping */
1737 VirtPageDesc *vp;
1739 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1740 vp->phys_addr = pd;
1741 vp->prot = prot;
1742 vp->valid_tag = virt_valid_tag;
1743 prot &= ~PAGE_WRITE;
1746 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1747 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1748 if (map_addr == MAP_FAILED) {
1749 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1750 paddr, vaddr);
1755 #endif
1756 return ret;
1759 /* called from signal handler: invalidate the code and unprotect the
1760 page. Return TRUE if the fault was succesfully handled. */
1761 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1763 #if !defined(CONFIG_SOFTMMU)
1764 VirtPageDesc *vp;
1766 #if defined(DEBUG_TLB)
1767 printf("page_unprotect: addr=0x%08x\n", addr);
1768 #endif
1769 addr &= TARGET_PAGE_MASK;
1771 /* if it is not mapped, no need to worry here */
1772 if (addr >= MMAP_AREA_END)
1773 return 0;
1774 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1775 if (!vp)
1776 return 0;
1777 /* NOTE: in this case, validate_tag is _not_ tested as it
1778 validates only the code TLB */
1779 if (vp->valid_tag != virt_valid_tag)
1780 return 0;
1781 if (!(vp->prot & PAGE_WRITE))
1782 return 0;
1783 #if defined(DEBUG_TLB)
1784 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1785 addr, vp->phys_addr, vp->prot);
1786 #endif
1787 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1788 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1789 (unsigned long)addr, vp->prot);
1790 /* set the dirty bit */
1791 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1792 /* flush the code inside */
1793 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1794 return 1;
1795 #else
1796 return 0;
1797 #endif
1800 #else
1802 void tlb_flush(CPUState *env, int flush_global)
1806 void tlb_flush_page(CPUState *env, target_ulong addr)
1810 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1811 target_phys_addr_t paddr, int prot,
1812 int mmu_idx, int is_softmmu)
1814 return 0;
1817 /* dump memory mappings */
1818 void page_dump(FILE *f)
1820 unsigned long start, end;
1821 int i, j, prot, prot1;
1822 PageDesc *p;
1824 fprintf(f, "%-8s %-8s %-8s %s\n",
1825 "start", "end", "size", "prot");
1826 start = -1;
1827 end = -1;
1828 prot = 0;
1829 for(i = 0; i <= L1_SIZE; i++) {
1830 if (i < L1_SIZE)
1831 p = l1_map[i];
1832 else
1833 p = NULL;
1834 for(j = 0;j < L2_SIZE; j++) {
1835 if (!p)
1836 prot1 = 0;
1837 else
1838 prot1 = p[j].flags;
1839 if (prot1 != prot) {
1840 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1841 if (start != -1) {
1842 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1843 start, end, end - start,
1844 prot & PAGE_READ ? 'r' : '-',
1845 prot & PAGE_WRITE ? 'w' : '-',
1846 prot & PAGE_EXEC ? 'x' : '-');
1848 if (prot1 != 0)
1849 start = end;
1850 else
1851 start = -1;
1852 prot = prot1;
1854 if (!p)
1855 break;
1860 int page_get_flags(target_ulong address)
1862 PageDesc *p;
1864 p = page_find(address >> TARGET_PAGE_BITS);
1865 if (!p)
1866 return 0;
1867 return p->flags;
1870 /* modify the flags of a page and invalidate the code if
1871 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1872 depending on PAGE_WRITE */
1873 void page_set_flags(target_ulong start, target_ulong end, int flags)
1875 PageDesc *p;
1876 target_ulong addr;
1878 start = start & TARGET_PAGE_MASK;
1879 end = TARGET_PAGE_ALIGN(end);
1880 if (flags & PAGE_WRITE)
1881 flags |= PAGE_WRITE_ORG;
1882 spin_lock(&tb_lock);
1883 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1884 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1885 /* if the write protection is set, then we invalidate the code
1886 inside */
1887 if (!(p->flags & PAGE_WRITE) &&
1888 (flags & PAGE_WRITE) &&
1889 p->first_tb) {
1890 tb_invalidate_phys_page(addr, 0, NULL);
1892 p->flags = flags;
1894 spin_unlock(&tb_lock);
1897 int page_check_range(target_ulong start, target_ulong len, int flags)
1899 PageDesc *p;
1900 target_ulong end;
1901 target_ulong addr;
1903 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1904 start = start & TARGET_PAGE_MASK;
1906 if( end < start )
1907 /* we've wrapped around */
1908 return -1;
1909 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1910 p = page_find(addr >> TARGET_PAGE_BITS);
1911 if( !p )
1912 return -1;
1913 if( !(p->flags & PAGE_VALID) )
1914 return -1;
1916 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1917 return -1;
1918 if (flags & PAGE_WRITE) {
1919 if (!(p->flags & PAGE_WRITE_ORG))
1920 return -1;
1921 /* unprotect the page if it was put read-only because it
1922 contains translated code */
1923 if (!(p->flags & PAGE_WRITE)) {
1924 if (!page_unprotect(addr, 0, NULL))
1925 return -1;
1927 return 0;
1930 return 0;
1933 /* called from signal handler: invalidate the code and unprotect the
1934 page. Return TRUE if the fault was succesfully handled. */
1935 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1937 unsigned int page_index, prot, pindex;
1938 PageDesc *p, *p1;
1939 target_ulong host_start, host_end, addr;
1941 host_start = address & qemu_host_page_mask;
1942 page_index = host_start >> TARGET_PAGE_BITS;
1943 p1 = page_find(page_index);
1944 if (!p1)
1945 return 0;
1946 host_end = host_start + qemu_host_page_size;
1947 p = p1;
1948 prot = 0;
1949 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1950 prot |= p->flags;
1951 p++;
1953 /* if the page was really writable, then we change its
1954 protection back to writable */
1955 if (prot & PAGE_WRITE_ORG) {
1956 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1957 if (!(p1[pindex].flags & PAGE_WRITE)) {
1958 mprotect((void *)g2h(host_start), qemu_host_page_size,
1959 (prot & PAGE_BITS) | PAGE_WRITE);
1960 p1[pindex].flags |= PAGE_WRITE;
1961 /* and since the content will be modified, we must invalidate
1962 the corresponding translated code. */
1963 tb_invalidate_phys_page(address, pc, puc);
1964 #ifdef DEBUG_TB_CHECK
1965 tb_invalidate_check(address);
1966 #endif
1967 return 1;
1970 return 0;
1973 static inline void tlb_set_dirty(CPUState *env,
1974 unsigned long addr, target_ulong vaddr)
1977 #endif /* defined(CONFIG_USER_ONLY) */
1979 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1980 int memory);
1981 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1982 int orig_memory);
1983 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1984 need_subpage) \
1985 do { \
1986 if (addr > start_addr) \
1987 start_addr2 = 0; \
1988 else { \
1989 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1990 if (start_addr2 > 0) \
1991 need_subpage = 1; \
1994 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
1995 end_addr2 = TARGET_PAGE_SIZE - 1; \
1996 else { \
1997 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1998 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
1999 need_subpage = 1; \
2001 } while (0)
2003 /* register physical memory. 'size' must be a multiple of the target
2004 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2005 io memory page */
2006 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2007 unsigned long size,
2008 unsigned long phys_offset)
2010 target_phys_addr_t addr, end_addr;
2011 PhysPageDesc *p;
2012 CPUState *env;
2013 unsigned long orig_size = size;
2014 void *subpage;
2016 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2017 end_addr = start_addr + (target_phys_addr_t)size;
2018 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2019 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2020 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2021 unsigned long orig_memory = p->phys_offset;
2022 target_phys_addr_t start_addr2, end_addr2;
2023 int need_subpage = 0;
2025 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2026 need_subpage);
2027 if (need_subpage) {
2028 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2029 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2030 &p->phys_offset, orig_memory);
2031 } else {
2032 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2033 >> IO_MEM_SHIFT];
2035 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2036 } else {
2037 p->phys_offset = phys_offset;
2038 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2039 (phys_offset & IO_MEM_ROMD))
2040 phys_offset += TARGET_PAGE_SIZE;
2042 } else {
2043 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2044 p->phys_offset = phys_offset;
2045 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2046 (phys_offset & IO_MEM_ROMD))
2047 phys_offset += TARGET_PAGE_SIZE;
2048 else {
2049 target_phys_addr_t start_addr2, end_addr2;
2050 int need_subpage = 0;
2052 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2053 end_addr2, need_subpage);
2055 if (need_subpage) {
2056 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2057 &p->phys_offset, IO_MEM_UNASSIGNED);
2058 subpage_register(subpage, start_addr2, end_addr2,
2059 phys_offset);
2065 /* since each CPU stores ram addresses in its TLB cache, we must
2066 reset the modified entries */
2067 /* XXX: slow ! */
2068 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2069 tlb_flush(env, 1);
2073 /* XXX: temporary until new memory mapping API */
2074 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2076 PhysPageDesc *p;
2078 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2079 if (!p)
2080 return IO_MEM_UNASSIGNED;
2081 return p->phys_offset;
2084 /* XXX: better than nothing */
2085 ram_addr_t qemu_ram_alloc(unsigned int size)
2087 ram_addr_t addr;
2088 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2089 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2090 size, phys_ram_size);
2091 abort();
2093 addr = phys_ram_alloc_offset;
2094 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2095 return addr;
2098 void qemu_ram_free(ram_addr_t addr)
2102 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2104 #ifdef DEBUG_UNASSIGNED
2105 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2106 #endif
2107 #ifdef TARGET_SPARC
2108 do_unassigned_access(addr, 0, 0, 0);
2109 #elif TARGET_CRIS
2110 do_unassigned_access(addr, 0, 0, 0);
2111 #endif
2112 return 0;
2115 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2117 #ifdef DEBUG_UNASSIGNED
2118 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2119 #endif
2120 #ifdef TARGET_SPARC
2121 do_unassigned_access(addr, 1, 0, 0);
2122 #elif TARGET_CRIS
2123 do_unassigned_access(addr, 1, 0, 0);
2124 #endif
2127 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2128 unassigned_mem_readb,
2129 unassigned_mem_readb,
2130 unassigned_mem_readb,
2133 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2134 unassigned_mem_writeb,
2135 unassigned_mem_writeb,
2136 unassigned_mem_writeb,
2139 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2141 unsigned long ram_addr;
2142 int dirty_flags;
2143 ram_addr = addr - (unsigned long)phys_ram_base;
2144 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2145 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2146 #if !defined(CONFIG_USER_ONLY)
2147 tb_invalidate_phys_page_fast(ram_addr, 1);
2148 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2149 #endif
2151 stb_p((uint8_t *)(long)addr, val);
2152 #ifdef USE_KQEMU
2153 if (cpu_single_env->kqemu_enabled &&
2154 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2155 kqemu_modify_page(cpu_single_env, ram_addr);
2156 #endif
2157 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2158 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2159 /* we remove the notdirty callback only if the code has been
2160 flushed */
2161 if (dirty_flags == 0xff)
2162 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2165 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2167 unsigned long ram_addr;
2168 int dirty_flags;
2169 ram_addr = addr - (unsigned long)phys_ram_base;
2170 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2171 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2172 #if !defined(CONFIG_USER_ONLY)
2173 tb_invalidate_phys_page_fast(ram_addr, 2);
2174 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2175 #endif
2177 stw_p((uint8_t *)(long)addr, val);
2178 #ifdef USE_KQEMU
2179 if (cpu_single_env->kqemu_enabled &&
2180 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2181 kqemu_modify_page(cpu_single_env, ram_addr);
2182 #endif
2183 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2184 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2185 /* we remove the notdirty callback only if the code has been
2186 flushed */
2187 if (dirty_flags == 0xff)
2188 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2191 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2193 unsigned long ram_addr;
2194 int dirty_flags;
2195 ram_addr = addr - (unsigned long)phys_ram_base;
2196 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2197 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2198 #if !defined(CONFIG_USER_ONLY)
2199 tb_invalidate_phys_page_fast(ram_addr, 4);
2200 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2201 #endif
2203 stl_p((uint8_t *)(long)addr, val);
2204 #ifdef USE_KQEMU
2205 if (cpu_single_env->kqemu_enabled &&
2206 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2207 kqemu_modify_page(cpu_single_env, ram_addr);
2208 #endif
2209 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2210 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2211 /* we remove the notdirty callback only if the code has been
2212 flushed */
2213 if (dirty_flags == 0xff)
2214 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2217 static CPUReadMemoryFunc *error_mem_read[3] = {
2218 NULL, /* never used */
2219 NULL, /* never used */
2220 NULL, /* never used */
2223 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2224 notdirty_mem_writeb,
2225 notdirty_mem_writew,
2226 notdirty_mem_writel,
2229 #if defined(CONFIG_SOFTMMU)
2230 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2231 so these check for a hit then pass through to the normal out-of-line
2232 phys routines. */
2233 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2235 return ldub_phys(addr);
2238 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2240 return lduw_phys(addr);
2243 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2245 return ldl_phys(addr);
2248 /* Generate a debug exception if a watchpoint has been hit.
2249 Returns the real physical address of the access. addr will be a host
2250 address in case of a RAM location. */
2251 static target_ulong check_watchpoint(target_phys_addr_t addr)
2253 CPUState *env = cpu_single_env;
2254 target_ulong watch;
2255 target_ulong retaddr;
2256 int i;
2258 retaddr = addr;
2259 for (i = 0; i < env->nb_watchpoints; i++) {
2260 watch = env->watchpoint[i].vaddr;
2261 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2262 retaddr = addr - env->watchpoint[i].addend;
2263 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2264 cpu_single_env->watchpoint_hit = i + 1;
2265 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2266 break;
2270 return retaddr;
2273 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2274 uint32_t val)
2276 addr = check_watchpoint(addr);
2277 stb_phys(addr, val);
2280 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2281 uint32_t val)
2283 addr = check_watchpoint(addr);
2284 stw_phys(addr, val);
2287 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2288 uint32_t val)
2290 addr = check_watchpoint(addr);
2291 stl_phys(addr, val);
2294 static CPUReadMemoryFunc *watch_mem_read[3] = {
2295 watch_mem_readb,
2296 watch_mem_readw,
2297 watch_mem_readl,
2300 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2301 watch_mem_writeb,
2302 watch_mem_writew,
2303 watch_mem_writel,
2305 #endif
2307 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2308 unsigned int len)
2310 CPUReadMemoryFunc **mem_read;
2311 uint32_t ret;
2312 unsigned int idx;
2314 idx = SUBPAGE_IDX(addr - mmio->base);
2315 #if defined(DEBUG_SUBPAGE)
2316 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2317 mmio, len, addr, idx);
2318 #endif
2319 mem_read = mmio->mem_read[idx];
2320 ret = (*mem_read[len])(mmio->opaque[idx], addr);
2322 return ret;
2325 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2326 uint32_t value, unsigned int len)
2328 CPUWriteMemoryFunc **mem_write;
2329 unsigned int idx;
2331 idx = SUBPAGE_IDX(addr - mmio->base);
2332 #if defined(DEBUG_SUBPAGE)
2333 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2334 mmio, len, addr, idx, value);
2335 #endif
2336 mem_write = mmio->mem_write[idx];
2337 (*mem_write[len])(mmio->opaque[idx], addr, value);
2340 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2342 #if defined(DEBUG_SUBPAGE)
2343 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2344 #endif
2346 return subpage_readlen(opaque, addr, 0);
2349 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2350 uint32_t value)
2352 #if defined(DEBUG_SUBPAGE)
2353 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2354 #endif
2355 subpage_writelen(opaque, addr, value, 0);
2358 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2360 #if defined(DEBUG_SUBPAGE)
2361 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2362 #endif
2364 return subpage_readlen(opaque, addr, 1);
2367 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2368 uint32_t value)
2370 #if defined(DEBUG_SUBPAGE)
2371 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2372 #endif
2373 subpage_writelen(opaque, addr, value, 1);
2376 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2378 #if defined(DEBUG_SUBPAGE)
2379 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2380 #endif
2382 return subpage_readlen(opaque, addr, 2);
2385 static void subpage_writel (void *opaque,
2386 target_phys_addr_t addr, uint32_t value)
2388 #if defined(DEBUG_SUBPAGE)
2389 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2390 #endif
2391 subpage_writelen(opaque, addr, value, 2);
2394 static CPUReadMemoryFunc *subpage_read[] = {
2395 &subpage_readb,
2396 &subpage_readw,
2397 &subpage_readl,
2400 static CPUWriteMemoryFunc *subpage_write[] = {
2401 &subpage_writeb,
2402 &subpage_writew,
2403 &subpage_writel,
2406 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2407 int memory)
2409 int idx, eidx;
2411 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2412 return -1;
2413 idx = SUBPAGE_IDX(start);
2414 eidx = SUBPAGE_IDX(end);
2415 #if defined(DEBUG_SUBPAGE)
2416 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2417 mmio, start, end, idx, eidx, memory);
2418 #endif
2419 memory >>= IO_MEM_SHIFT;
2420 for (; idx <= eidx; idx++) {
2421 mmio->mem_read[idx] = io_mem_read[memory];
2422 mmio->mem_write[idx] = io_mem_write[memory];
2423 mmio->opaque[idx] = io_mem_opaque[memory];
2426 return 0;
2429 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2430 int orig_memory)
2432 subpage_t *mmio;
2433 int subpage_memory;
2435 mmio = qemu_mallocz(sizeof(subpage_t));
2436 if (mmio != NULL) {
2437 mmio->base = base;
2438 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2439 #if defined(DEBUG_SUBPAGE)
2440 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2441 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2442 #endif
2443 *phys = subpage_memory | IO_MEM_SUBPAGE;
2444 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2447 return mmio;
2450 static void io_mem_init(void)
2452 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2453 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2454 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2455 io_mem_nb = 5;
2457 #if defined(CONFIG_SOFTMMU)
2458 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2459 watch_mem_write, NULL);
2460 #endif
2461 /* alloc dirty bits array */
2462 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2463 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2466 /* mem_read and mem_write are arrays of functions containing the
2467 function to access byte (index 0), word (index 1) and dword (index
2468 2). All functions must be supplied. If io_index is non zero, the
2469 corresponding io zone is modified. If it is zero, a new io zone is
2470 allocated. The return value can be used with
2471 cpu_register_physical_memory(). (-1) is returned if error. */
2472 int cpu_register_io_memory(int io_index,
2473 CPUReadMemoryFunc **mem_read,
2474 CPUWriteMemoryFunc **mem_write,
2475 void *opaque)
2477 int i;
2479 if (io_index <= 0) {
2480 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2481 return -1;
2482 io_index = io_mem_nb++;
2483 } else {
2484 if (io_index >= IO_MEM_NB_ENTRIES)
2485 return -1;
2488 for(i = 0;i < 3; i++) {
2489 io_mem_read[io_index][i] = mem_read[i];
2490 io_mem_write[io_index][i] = mem_write[i];
2492 io_mem_opaque[io_index] = opaque;
2493 return io_index << IO_MEM_SHIFT;
2496 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2498 return io_mem_write[io_index >> IO_MEM_SHIFT];
2501 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2503 return io_mem_read[io_index >> IO_MEM_SHIFT];
2506 /* physical memory access (slow version, mainly for debug) */
2507 #if defined(CONFIG_USER_ONLY)
2508 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2509 int len, int is_write)
2511 int l, flags;
2512 target_ulong page;
2513 void * p;
2515 while (len > 0) {
2516 page = addr & TARGET_PAGE_MASK;
2517 l = (page + TARGET_PAGE_SIZE) - addr;
2518 if (l > len)
2519 l = len;
2520 flags = page_get_flags(page);
2521 if (!(flags & PAGE_VALID))
2522 return;
2523 if (is_write) {
2524 if (!(flags & PAGE_WRITE))
2525 return;
2526 /* XXX: this code should not depend on lock_user */
2527 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2528 /* FIXME - should this return an error rather than just fail? */
2529 return;
2530 memcpy(p, buf, len);
2531 unlock_user(p, addr, len);
2532 } else {
2533 if (!(flags & PAGE_READ))
2534 return;
2535 /* XXX: this code should not depend on lock_user */
2536 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2537 /* FIXME - should this return an error rather than just fail? */
2538 return;
2539 memcpy(buf, p, len);
2540 unlock_user(p, addr, 0);
2542 len -= l;
2543 buf += l;
2544 addr += l;
2548 #else
2549 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2550 int len, int is_write)
2552 int l, io_index;
2553 uint8_t *ptr;
2554 uint32_t val;
2555 target_phys_addr_t page;
2556 unsigned long pd;
2557 PhysPageDesc *p;
2559 while (len > 0) {
2560 page = addr & TARGET_PAGE_MASK;
2561 l = (page + TARGET_PAGE_SIZE) - addr;
2562 if (l > len)
2563 l = len;
2564 p = phys_page_find(page >> TARGET_PAGE_BITS);
2565 if (!p) {
2566 pd = IO_MEM_UNASSIGNED;
2567 } else {
2568 pd = p->phys_offset;
2571 if (is_write) {
2572 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2573 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2574 /* XXX: could force cpu_single_env to NULL to avoid
2575 potential bugs */
2576 if (l >= 4 && ((addr & 3) == 0)) {
2577 /* 32 bit write access */
2578 val = ldl_p(buf);
2579 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2580 l = 4;
2581 } else if (l >= 2 && ((addr & 1) == 0)) {
2582 /* 16 bit write access */
2583 val = lduw_p(buf);
2584 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2585 l = 2;
2586 } else {
2587 /* 8 bit write access */
2588 val = ldub_p(buf);
2589 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2590 l = 1;
2592 } else {
2593 unsigned long addr1;
2594 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2595 /* RAM case */
2596 ptr = phys_ram_base + addr1;
2597 memcpy(ptr, buf, l);
2598 if (!cpu_physical_memory_is_dirty(addr1)) {
2599 /* invalidate code */
2600 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2601 /* set dirty bit */
2602 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2603 (0xff & ~CODE_DIRTY_FLAG);
2606 } else {
2607 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2608 !(pd & IO_MEM_ROMD)) {
2609 /* I/O case */
2610 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2611 if (l >= 4 && ((addr & 3) == 0)) {
2612 /* 32 bit read access */
2613 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2614 stl_p(buf, val);
2615 l = 4;
2616 } else if (l >= 2 && ((addr & 1) == 0)) {
2617 /* 16 bit read access */
2618 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2619 stw_p(buf, val);
2620 l = 2;
2621 } else {
2622 /* 8 bit read access */
2623 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2624 stb_p(buf, val);
2625 l = 1;
2627 } else {
2628 /* RAM case */
2629 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2630 (addr & ~TARGET_PAGE_MASK);
2631 memcpy(buf, ptr, l);
2634 len -= l;
2635 buf += l;
2636 addr += l;
2640 /* used for ROM loading : can write in RAM and ROM */
2641 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2642 const uint8_t *buf, int len)
2644 int l;
2645 uint8_t *ptr;
2646 target_phys_addr_t page;
2647 unsigned long pd;
2648 PhysPageDesc *p;
2650 while (len > 0) {
2651 page = addr & TARGET_PAGE_MASK;
2652 l = (page + TARGET_PAGE_SIZE) - addr;
2653 if (l > len)
2654 l = len;
2655 p = phys_page_find(page >> TARGET_PAGE_BITS);
2656 if (!p) {
2657 pd = IO_MEM_UNASSIGNED;
2658 } else {
2659 pd = p->phys_offset;
2662 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2663 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2664 !(pd & IO_MEM_ROMD)) {
2665 /* do nothing */
2666 } else {
2667 unsigned long addr1;
2668 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2669 /* ROM/RAM case */
2670 ptr = phys_ram_base + addr1;
2671 memcpy(ptr, buf, l);
2673 len -= l;
2674 buf += l;
2675 addr += l;
2680 /* warning: addr must be aligned */
2681 uint32_t ldl_phys(target_phys_addr_t addr)
2683 int io_index;
2684 uint8_t *ptr;
2685 uint32_t val;
2686 unsigned long pd;
2687 PhysPageDesc *p;
2689 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2690 if (!p) {
2691 pd = IO_MEM_UNASSIGNED;
2692 } else {
2693 pd = p->phys_offset;
2696 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2697 !(pd & IO_MEM_ROMD)) {
2698 /* I/O case */
2699 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2700 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2701 } else {
2702 /* RAM case */
2703 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2704 (addr & ~TARGET_PAGE_MASK);
2705 val = ldl_p(ptr);
2707 return val;
2710 /* warning: addr must be aligned */
2711 uint64_t ldq_phys(target_phys_addr_t addr)
2713 int io_index;
2714 uint8_t *ptr;
2715 uint64_t val;
2716 unsigned long pd;
2717 PhysPageDesc *p;
2719 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2720 if (!p) {
2721 pd = IO_MEM_UNASSIGNED;
2722 } else {
2723 pd = p->phys_offset;
2726 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2727 !(pd & IO_MEM_ROMD)) {
2728 /* I/O case */
2729 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2730 #ifdef TARGET_WORDS_BIGENDIAN
2731 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2732 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2733 #else
2734 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2735 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2736 #endif
2737 } else {
2738 /* RAM case */
2739 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2740 (addr & ~TARGET_PAGE_MASK);
2741 val = ldq_p(ptr);
2743 return val;
2746 /* XXX: optimize */
2747 uint32_t ldub_phys(target_phys_addr_t addr)
2749 uint8_t val;
2750 cpu_physical_memory_read(addr, &val, 1);
2751 return val;
2754 /* XXX: optimize */
2755 uint32_t lduw_phys(target_phys_addr_t addr)
2757 uint16_t val;
2758 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2759 return tswap16(val);
2762 /* warning: addr must be aligned. The ram page is not masked as dirty
2763 and the code inside is not invalidated. It is useful if the dirty
2764 bits are used to track modified PTEs */
2765 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2767 int io_index;
2768 uint8_t *ptr;
2769 unsigned long pd;
2770 PhysPageDesc *p;
2772 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2773 if (!p) {
2774 pd = IO_MEM_UNASSIGNED;
2775 } else {
2776 pd = p->phys_offset;
2779 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2780 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2781 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2782 } else {
2783 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2784 (addr & ~TARGET_PAGE_MASK);
2785 stl_p(ptr, val);
2789 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2791 int io_index;
2792 uint8_t *ptr;
2793 unsigned long pd;
2794 PhysPageDesc *p;
2796 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2797 if (!p) {
2798 pd = IO_MEM_UNASSIGNED;
2799 } else {
2800 pd = p->phys_offset;
2803 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2804 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2805 #ifdef TARGET_WORDS_BIGENDIAN
2806 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2807 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2808 #else
2809 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2810 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2811 #endif
2812 } else {
2813 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2814 (addr & ~TARGET_PAGE_MASK);
2815 stq_p(ptr, val);
2819 /* warning: addr must be aligned */
2820 void stl_phys(target_phys_addr_t addr, uint32_t val)
2822 int io_index;
2823 uint8_t *ptr;
2824 unsigned long pd;
2825 PhysPageDesc *p;
2827 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2828 if (!p) {
2829 pd = IO_MEM_UNASSIGNED;
2830 } else {
2831 pd = p->phys_offset;
2834 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2835 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2836 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2837 } else {
2838 unsigned long addr1;
2839 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2840 /* RAM case */
2841 ptr = phys_ram_base + addr1;
2842 stl_p(ptr, val);
2843 if (!cpu_physical_memory_is_dirty(addr1)) {
2844 /* invalidate code */
2845 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2846 /* set dirty bit */
2847 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2848 (0xff & ~CODE_DIRTY_FLAG);
2853 /* XXX: optimize */
2854 void stb_phys(target_phys_addr_t addr, uint32_t val)
2856 uint8_t v = val;
2857 cpu_physical_memory_write(addr, &v, 1);
2860 /* XXX: optimize */
2861 void stw_phys(target_phys_addr_t addr, uint32_t val)
2863 uint16_t v = tswap16(val);
2864 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2867 /* XXX: optimize */
2868 void stq_phys(target_phys_addr_t addr, uint64_t val)
2870 val = tswap64(val);
2871 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2874 #endif
2876 /* virtual memory access for debug */
2877 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2878 uint8_t *buf, int len, int is_write)
2880 int l;
2881 target_phys_addr_t phys_addr;
2882 target_ulong page;
2884 while (len > 0) {
2885 page = addr & TARGET_PAGE_MASK;
2886 phys_addr = cpu_get_phys_page_debug(env, page);
2887 /* if no physical page mapped, return an error */
2888 if (phys_addr == -1)
2889 return -1;
2890 l = (page + TARGET_PAGE_SIZE) - addr;
2891 if (l > len)
2892 l = len;
2893 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2894 buf, l, is_write);
2895 len -= l;
2896 buf += l;
2897 addr += l;
2899 return 0;
2902 void dump_exec_info(FILE *f,
2903 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2905 int i, target_code_size, max_target_code_size;
2906 int direct_jmp_count, direct_jmp2_count, cross_page;
2907 TranslationBlock *tb;
2909 target_code_size = 0;
2910 max_target_code_size = 0;
2911 cross_page = 0;
2912 direct_jmp_count = 0;
2913 direct_jmp2_count = 0;
2914 for(i = 0; i < nb_tbs; i++) {
2915 tb = &tbs[i];
2916 target_code_size += tb->size;
2917 if (tb->size > max_target_code_size)
2918 max_target_code_size = tb->size;
2919 if (tb->page_addr[1] != -1)
2920 cross_page++;
2921 if (tb->tb_next_offset[0] != 0xffff) {
2922 direct_jmp_count++;
2923 if (tb->tb_next_offset[1] != 0xffff) {
2924 direct_jmp2_count++;
2928 /* XXX: avoid using doubles ? */
2929 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2930 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2931 nb_tbs ? target_code_size / nb_tbs : 0,
2932 max_target_code_size);
2933 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2934 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2935 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2936 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2937 cross_page,
2938 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2939 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2940 direct_jmp_count,
2941 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2942 direct_jmp2_count,
2943 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2944 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2945 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2946 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2949 #if !defined(CONFIG_USER_ONLY)
2951 #define MMUSUFFIX _cmmu
2952 #define GETPC() NULL
2953 #define env cpu_single_env
2954 #define SOFTMMU_CODE_ACCESS
2956 #define SHIFT 0
2957 #include "softmmu_template.h"
2959 #define SHIFT 1
2960 #include "softmmu_template.h"
2962 #define SHIFT 2
2963 #include "softmmu_template.h"
2965 #define SHIFT 3
2966 #include "softmmu_template.h"
2968 #undef env
2970 #endif