2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if defined(CONFIG_USER_ONLY)
44 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
84 TranslationBlock
*tbs
;
85 int code_gen_max_blocks
;
86 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
91 uint8_t code_gen_prologue
[1024] __attribute__((aligned (32)));
92 uint8_t *code_gen_buffer
;
93 unsigned long code_gen_buffer_size
;
94 /* threshold to flush the translated code buffer */
95 unsigned long code_gen_buffer_max_size
;
96 uint8_t *code_gen_ptr
;
98 ram_addr_t phys_ram_size
;
100 uint8_t *phys_ram_base
;
101 uint8_t *phys_ram_dirty
;
102 static ram_addr_t phys_ram_alloc_offset
= 0;
105 /* current CPU in the current thread. It is only valid inside
107 CPUState
*cpu_single_env
;
109 typedef struct PageDesc
{
110 /* list of TBs intersecting this ram page */
111 TranslationBlock
*first_tb
;
112 /* in order to optimize self modifying code, we count the number
113 of lookups we do to a given page to use a bitmap */
114 unsigned int code_write_count
;
115 uint8_t *code_bitmap
;
116 #if defined(CONFIG_USER_ONLY)
121 typedef struct PhysPageDesc
{
122 /* offset in host memory of the page + io_index in the low 12 bits */
123 ram_addr_t phys_offset
;
127 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128 /* XXX: this is a temporary hack for alpha target.
129 * In the future, this is to be replaced by a multi-level table
130 * to actually be able to handle the complete 64 bits address space.
132 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
134 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
137 #define L1_SIZE (1 << L1_BITS)
138 #define L2_SIZE (1 << L2_BITS)
140 static void io_mem_init(void);
142 unsigned long qemu_real_host_page_size
;
143 unsigned long qemu_host_page_bits
;
144 unsigned long qemu_host_page_size
;
145 unsigned long qemu_host_page_mask
;
147 /* XXX: for system emulation, it could just be an array */
148 static PageDesc
*l1_map
[L1_SIZE
];
149 PhysPageDesc
**l1_phys_map
;
151 /* io memory support */
152 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
153 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
154 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
155 static int io_mem_nb
;
156 #if defined(CONFIG_SOFTMMU)
157 static int io_mem_watch
;
161 char *logfilename
= "/tmp/qemu.log";
164 static int log_append
= 0;
167 static int tlb_flush_count
;
168 static int tb_flush_count
;
169 static int tb_phys_invalidate_count
;
171 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172 typedef struct subpage_t
{
173 target_phys_addr_t base
;
174 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
175 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
176 void *opaque
[TARGET_PAGE_SIZE
][2][4];
180 static void map_exec(void *addr
, long size
)
183 VirtualProtect(addr
, size
,
184 PAGE_EXECUTE_READWRITE
, &old_protect
);
188 static void map_exec(void *addr
, long size
)
190 unsigned long start
, end
, page_size
;
192 page_size
= getpagesize();
193 start
= (unsigned long)addr
;
194 start
&= ~(page_size
- 1);
196 end
= (unsigned long)addr
+ size
;
197 end
+= page_size
- 1;
198 end
&= ~(page_size
- 1);
200 mprotect((void *)start
, end
- start
,
201 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
205 static void page_init(void)
207 /* NOTE: we can always suppose that qemu_host_page_size >=
211 SYSTEM_INFO system_info
;
214 GetSystemInfo(&system_info
);
215 qemu_real_host_page_size
= system_info
.dwPageSize
;
218 qemu_real_host_page_size
= getpagesize();
220 if (qemu_host_page_size
== 0)
221 qemu_host_page_size
= qemu_real_host_page_size
;
222 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
223 qemu_host_page_size
= TARGET_PAGE_SIZE
;
224 qemu_host_page_bits
= 0;
225 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
226 qemu_host_page_bits
++;
227 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
228 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
229 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
231 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
233 long long startaddr
, endaddr
;
238 last_brk
= (unsigned long)sbrk(0);
239 f
= fopen("/proc/self/maps", "r");
242 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
244 startaddr
= MIN(startaddr
,
245 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
246 endaddr
= MIN(endaddr
,
247 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
248 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
249 TARGET_PAGE_ALIGN(endaddr
),
260 static inline PageDesc
*page_find_alloc(target_ulong index
)
264 lp
= &l1_map
[index
>> L2_BITS
];
267 /* allocate if not found */
268 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
269 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
272 return p
+ (index
& (L2_SIZE
- 1));
275 static inline PageDesc
*page_find(target_ulong index
)
279 p
= l1_map
[index
>> L2_BITS
];
282 return p
+ (index
& (L2_SIZE
- 1));
285 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
290 p
= (void **)l1_phys_map
;
291 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
293 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
294 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
296 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
299 /* allocate if not found */
302 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
303 memset(p
, 0, sizeof(void *) * L1_SIZE
);
307 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
311 /* allocate if not found */
314 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
316 for (i
= 0; i
< L2_SIZE
; i
++)
317 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
319 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
322 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
324 return phys_page_find_alloc(index
, 0);
327 #if !defined(CONFIG_USER_ONLY)
328 static void tlb_protect_code(ram_addr_t ram_addr
);
329 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
331 #define mmap_lock() do { } while(0)
332 #define mmap_unlock() do { } while(0)
335 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
337 #if defined(CONFIG_USER_ONLY)
338 /* Currently it is not recommanded to allocate big chunks of data in
339 user mode. It will change when a dedicated libc will be used */
340 #define USE_STATIC_CODE_GEN_BUFFER
343 #ifdef USE_STATIC_CODE_GEN_BUFFER
344 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
347 void code_gen_alloc(unsigned long tb_size
)
349 #ifdef USE_STATIC_CODE_GEN_BUFFER
350 code_gen_buffer
= static_code_gen_buffer
;
351 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
352 map_exec(code_gen_buffer
, code_gen_buffer_size
);
354 code_gen_buffer_size
= tb_size
;
355 if (code_gen_buffer_size
== 0) {
356 #if defined(CONFIG_USER_ONLY)
357 /* in user mode, phys_ram_size is not meaningful */
358 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
360 /* XXX: needs ajustments */
361 code_gen_buffer_size
= (int)(phys_ram_size
/ 4);
364 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
365 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
366 /* The code gen buffer location may have constraints depending on
367 the host cpu and OS */
368 #if defined(__linux__)
371 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
372 #if defined(__x86_64__)
374 /* Cannot map more than that */
375 if (code_gen_buffer_size
> (800 * 1024 * 1024))
376 code_gen_buffer_size
= (800 * 1024 * 1024);
378 code_gen_buffer
= mmap(NULL
, code_gen_buffer_size
,
379 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
381 if (code_gen_buffer
== MAP_FAILED
) {
382 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
387 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
388 if (!code_gen_buffer
) {
389 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
392 map_exec(code_gen_buffer
, code_gen_buffer_size
);
394 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
395 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
396 code_gen_buffer_max_size
= code_gen_buffer_size
-
397 code_gen_max_block_size();
398 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
399 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
402 /* Must be called before using the QEMU cpus. 'tb_size' is the size
403 (in bytes) allocated to the translation buffer. Zero means default
405 void cpu_exec_init_all(unsigned long tb_size
)
408 code_gen_alloc(tb_size
);
409 code_gen_ptr
= code_gen_buffer
;
414 void cpu_exec_init(CPUState
*env
)
419 env
->next_cpu
= NULL
;
422 while (*penv
!= NULL
) {
423 penv
= (CPUState
**)&(*penv
)->next_cpu
;
426 env
->cpu_index
= cpu_index
;
427 env
->nb_watchpoints
= 0;
431 static inline void invalidate_page_bitmap(PageDesc
*p
)
433 if (p
->code_bitmap
) {
434 qemu_free(p
->code_bitmap
);
435 p
->code_bitmap
= NULL
;
437 p
->code_write_count
= 0;
440 /* set to NULL all the 'first_tb' fields in all PageDescs */
441 static void page_flush_tb(void)
446 for(i
= 0; i
< L1_SIZE
; i
++) {
449 for(j
= 0; j
< L2_SIZE
; j
++) {
451 invalidate_page_bitmap(p
);
458 /* flush all the translation blocks */
459 /* XXX: tb_flush is currently not thread safe */
460 void tb_flush(CPUState
*env1
)
463 #if defined(DEBUG_FLUSH)
464 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
465 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
467 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
469 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
470 cpu_abort(env1
, "Internal error: code buffer overflow\n");
474 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
475 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
478 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
481 code_gen_ptr
= code_gen_buffer
;
482 /* XXX: flush processor icache at this point if cache flush is
487 #ifdef DEBUG_TB_CHECK
489 static void tb_invalidate_check(target_ulong address
)
491 TranslationBlock
*tb
;
493 address
&= TARGET_PAGE_MASK
;
494 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
495 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
496 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
497 address
>= tb
->pc
+ tb
->size
)) {
498 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
499 address
, (long)tb
->pc
, tb
->size
);
505 /* verify that all the pages have correct rights for code */
506 static void tb_page_check(void)
508 TranslationBlock
*tb
;
509 int i
, flags1
, flags2
;
511 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
512 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
513 flags1
= page_get_flags(tb
->pc
);
514 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
515 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
516 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
517 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
523 void tb_jmp_check(TranslationBlock
*tb
)
525 TranslationBlock
*tb1
;
528 /* suppress any remaining jumps to this TB */
532 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
535 tb1
= tb1
->jmp_next
[n1
];
537 /* check end of list */
539 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
545 /* invalidate one TB */
546 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
549 TranslationBlock
*tb1
;
553 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
556 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
560 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
562 TranslationBlock
*tb1
;
568 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
570 *ptb
= tb1
->page_next
[n1
];
573 ptb
= &tb1
->page_next
[n1
];
577 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
579 TranslationBlock
*tb1
, **ptb
;
582 ptb
= &tb
->jmp_next
[n
];
585 /* find tb(n) in circular list */
589 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
590 if (n1
== n
&& tb1
== tb
)
593 ptb
= &tb1
->jmp_first
;
595 ptb
= &tb1
->jmp_next
[n1
];
598 /* now we can suppress tb(n) from the list */
599 *ptb
= tb
->jmp_next
[n
];
601 tb
->jmp_next
[n
] = NULL
;
605 /* reset the jump entry 'n' of a TB so that it is not chained to
607 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
609 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
612 static inline void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
617 target_phys_addr_t phys_pc
;
618 TranslationBlock
*tb1
, *tb2
;
620 /* remove the TB from the hash list */
621 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
622 h
= tb_phys_hash_func(phys_pc
);
623 tb_remove(&tb_phys_hash
[h
], tb
,
624 offsetof(TranslationBlock
, phys_hash_next
));
626 /* remove the TB from the page list */
627 if (tb
->page_addr
[0] != page_addr
) {
628 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
629 tb_page_remove(&p
->first_tb
, tb
);
630 invalidate_page_bitmap(p
);
632 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
633 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
634 tb_page_remove(&p
->first_tb
, tb
);
635 invalidate_page_bitmap(p
);
638 tb_invalidated_flag
= 1;
640 /* remove the TB from the hash list */
641 h
= tb_jmp_cache_hash_func(tb
->pc
);
642 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
643 if (env
->tb_jmp_cache
[h
] == tb
)
644 env
->tb_jmp_cache
[h
] = NULL
;
647 /* suppress this TB from the two jump lists */
648 tb_jmp_remove(tb
, 0);
649 tb_jmp_remove(tb
, 1);
651 /* suppress any remaining jumps to this TB */
657 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
658 tb2
= tb1
->jmp_next
[n1
];
659 tb_reset_jump(tb1
, n1
);
660 tb1
->jmp_next
[n1
] = NULL
;
663 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
665 tb_phys_invalidate_count
++;
668 static inline void set_bits(uint8_t *tab
, int start
, int len
)
674 mask
= 0xff << (start
& 7);
675 if ((start
& ~7) == (end
& ~7)) {
677 mask
&= ~(0xff << (end
& 7));
682 start
= (start
+ 8) & ~7;
684 while (start
< end1
) {
689 mask
= ~(0xff << (end
& 7));
695 static void build_page_bitmap(PageDesc
*p
)
697 int n
, tb_start
, tb_end
;
698 TranslationBlock
*tb
;
700 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
703 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
708 tb
= (TranslationBlock
*)((long)tb
& ~3);
709 /* NOTE: this is subtle as a TB may span two physical pages */
711 /* NOTE: tb_end may be after the end of the page, but
712 it is not a problem */
713 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
714 tb_end
= tb_start
+ tb
->size
;
715 if (tb_end
> TARGET_PAGE_SIZE
)
716 tb_end
= TARGET_PAGE_SIZE
;
719 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
721 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
722 tb
= tb
->page_next
[n
];
726 #ifdef TARGET_HAS_PRECISE_SMC
728 static void tb_gen_code(CPUState
*env
,
729 target_ulong pc
, target_ulong cs_base
, int flags
,
732 TranslationBlock
*tb
;
734 target_ulong phys_pc
, phys_page2
, virt_page2
;
737 phys_pc
= get_phys_addr_code(env
, pc
);
740 /* flush must be done */
742 /* cannot fail at this point */
745 tc_ptr
= code_gen_ptr
;
747 tb
->cs_base
= cs_base
;
750 cpu_gen_code(env
, tb
, &code_gen_size
);
751 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
753 /* check next page if needed */
754 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
756 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
757 phys_page2
= get_phys_addr_code(env
, virt_page2
);
759 tb_link_phys(tb
, phys_pc
, phys_page2
);
763 /* invalidate all TBs which intersect with the target physical page
764 starting in range [start;end[. NOTE: start and end must refer to
765 the same physical page. 'is_cpu_write_access' should be true if called
766 from a real cpu write access: the virtual CPU will exit the current
767 TB if code is modified inside this TB. */
768 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
769 int is_cpu_write_access
)
771 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
772 CPUState
*env
= cpu_single_env
;
774 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
775 target_ulong tb_start
, tb_end
;
776 target_ulong current_pc
, current_cs_base
;
778 p
= page_find(start
>> TARGET_PAGE_BITS
);
781 if (!p
->code_bitmap
&&
782 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
783 is_cpu_write_access
) {
784 /* build code bitmap */
785 build_page_bitmap(p
);
788 /* we remove all the TBs in the range [start, end[ */
789 /* XXX: see if in some cases it could be faster to invalidate all the code */
790 current_tb_not_found
= is_cpu_write_access
;
791 current_tb_modified
= 0;
792 current_tb
= NULL
; /* avoid warning */
793 current_pc
= 0; /* avoid warning */
794 current_cs_base
= 0; /* avoid warning */
795 current_flags
= 0; /* avoid warning */
799 tb
= (TranslationBlock
*)((long)tb
& ~3);
800 tb_next
= tb
->page_next
[n
];
801 /* NOTE: this is subtle as a TB may span two physical pages */
803 /* NOTE: tb_end may be after the end of the page, but
804 it is not a problem */
805 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
806 tb_end
= tb_start
+ tb
->size
;
808 tb_start
= tb
->page_addr
[1];
809 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
811 if (!(tb_end
<= start
|| tb_start
>= end
)) {
812 #ifdef TARGET_HAS_PRECISE_SMC
813 if (current_tb_not_found
) {
814 current_tb_not_found
= 0;
816 if (env
->mem_write_pc
) {
817 /* now we have a real cpu fault */
818 current_tb
= tb_find_pc(env
->mem_write_pc
);
821 if (current_tb
== tb
&&
822 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
823 /* If we are modifying the current TB, we must stop
824 its execution. We could be more precise by checking
825 that the modification is after the current PC, but it
826 would require a specialized function to partially
827 restore the CPU state */
829 current_tb_modified
= 1;
830 cpu_restore_state(current_tb
, env
,
831 env
->mem_write_pc
, NULL
);
832 #if defined(TARGET_I386)
833 current_flags
= env
->hflags
;
834 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
835 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
836 current_pc
= current_cs_base
+ env
->eip
;
838 #error unsupported CPU
841 #endif /* TARGET_HAS_PRECISE_SMC */
842 /* we need to do that to handle the case where a signal
843 occurs while doing tb_phys_invalidate() */
846 saved_tb
= env
->current_tb
;
847 env
->current_tb
= NULL
;
849 tb_phys_invalidate(tb
, -1);
851 env
->current_tb
= saved_tb
;
852 if (env
->interrupt_request
&& env
->current_tb
)
853 cpu_interrupt(env
, env
->interrupt_request
);
858 #if !defined(CONFIG_USER_ONLY)
859 /* if no code remaining, no need to continue to use slow writes */
861 invalidate_page_bitmap(p
);
862 if (is_cpu_write_access
) {
863 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
867 #ifdef TARGET_HAS_PRECISE_SMC
868 if (current_tb_modified
) {
869 /* we generate a block containing just the instruction
870 modifying the memory. It will ensure that it cannot modify
872 env
->current_tb
= NULL
;
873 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
875 cpu_resume_from_signal(env
, NULL
);
880 /* len must be <= 8 and start must be a multiple of len */
881 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
888 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
889 cpu_single_env
->mem_write_vaddr
, len
,
891 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
895 p
= page_find(start
>> TARGET_PAGE_BITS
);
898 if (p
->code_bitmap
) {
899 offset
= start
& ~TARGET_PAGE_MASK
;
900 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
901 if (b
& ((1 << len
) - 1))
905 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
909 #if !defined(CONFIG_SOFTMMU)
910 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
911 unsigned long pc
, void *puc
)
913 int n
, current_flags
, current_tb_modified
;
914 target_ulong current_pc
, current_cs_base
;
916 TranslationBlock
*tb
, *current_tb
;
917 #ifdef TARGET_HAS_PRECISE_SMC
918 CPUState
*env
= cpu_single_env
;
921 addr
&= TARGET_PAGE_MASK
;
922 p
= page_find(addr
>> TARGET_PAGE_BITS
);
926 current_tb_modified
= 0;
928 current_pc
= 0; /* avoid warning */
929 current_cs_base
= 0; /* avoid warning */
930 current_flags
= 0; /* avoid warning */
931 #ifdef TARGET_HAS_PRECISE_SMC
933 current_tb
= tb_find_pc(pc
);
938 tb
= (TranslationBlock
*)((long)tb
& ~3);
939 #ifdef TARGET_HAS_PRECISE_SMC
940 if (current_tb
== tb
&&
941 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
942 /* If we are modifying the current TB, we must stop
943 its execution. We could be more precise by checking
944 that the modification is after the current PC, but it
945 would require a specialized function to partially
946 restore the CPU state */
948 current_tb_modified
= 1;
949 cpu_restore_state(current_tb
, env
, pc
, puc
);
950 #if defined(TARGET_I386)
951 current_flags
= env
->hflags
;
952 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
953 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
954 current_pc
= current_cs_base
+ env
->eip
;
956 #error unsupported CPU
959 #endif /* TARGET_HAS_PRECISE_SMC */
960 tb_phys_invalidate(tb
, addr
);
961 tb
= tb
->page_next
[n
];
964 #ifdef TARGET_HAS_PRECISE_SMC
965 if (current_tb_modified
) {
966 /* we generate a block containing just the instruction
967 modifying the memory. It will ensure that it cannot modify
969 env
->current_tb
= NULL
;
970 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
972 cpu_resume_from_signal(env
, puc
);
978 /* add the tb in the target page and protect it if necessary */
979 static inline void tb_alloc_page(TranslationBlock
*tb
,
980 unsigned int n
, target_ulong page_addr
)
983 TranslationBlock
*last_first_tb
;
985 tb
->page_addr
[n
] = page_addr
;
986 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
987 tb
->page_next
[n
] = p
->first_tb
;
988 last_first_tb
= p
->first_tb
;
989 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
990 invalidate_page_bitmap(p
);
992 #if defined(TARGET_HAS_SMC) || 1
994 #if defined(CONFIG_USER_ONLY)
995 if (p
->flags
& PAGE_WRITE
) {
1000 /* force the host page as non writable (writes will have a
1001 page fault + mprotect overhead) */
1002 page_addr
&= qemu_host_page_mask
;
1004 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1005 addr
+= TARGET_PAGE_SIZE
) {
1007 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1011 p2
->flags
&= ~PAGE_WRITE
;
1012 page_get_flags(addr
);
1014 mprotect(g2h(page_addr
), qemu_host_page_size
,
1015 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1016 #ifdef DEBUG_TB_INVALIDATE
1017 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1022 /* if some code is already present, then the pages are already
1023 protected. So we handle the case where only the first TB is
1024 allocated in a physical page */
1025 if (!last_first_tb
) {
1026 tlb_protect_code(page_addr
);
1030 #endif /* TARGET_HAS_SMC */
1033 /* Allocate a new translation block. Flush the translation buffer if
1034 too many translation blocks or too much generated code. */
1035 TranslationBlock
*tb_alloc(target_ulong pc
)
1037 TranslationBlock
*tb
;
1039 if (nb_tbs
>= code_gen_max_blocks
||
1040 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1042 tb
= &tbs
[nb_tbs
++];
1048 /* add a new TB and link it to the physical page tables. phys_page2 is
1049 (-1) to indicate that only one page contains the TB. */
1050 void tb_link_phys(TranslationBlock
*tb
,
1051 target_ulong phys_pc
, target_ulong phys_page2
)
1054 TranslationBlock
**ptb
;
1056 /* Grab the mmap lock to stop another thread invalidating this TB
1057 before we are done. */
1059 /* add in the physical hash table */
1060 h
= tb_phys_hash_func(phys_pc
);
1061 ptb
= &tb_phys_hash
[h
];
1062 tb
->phys_hash_next
= *ptb
;
1065 /* add in the page list */
1066 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1067 if (phys_page2
!= -1)
1068 tb_alloc_page(tb
, 1, phys_page2
);
1070 tb
->page_addr
[1] = -1;
1072 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1073 tb
->jmp_next
[0] = NULL
;
1074 tb
->jmp_next
[1] = NULL
;
1076 /* init original jump addresses */
1077 if (tb
->tb_next_offset
[0] != 0xffff)
1078 tb_reset_jump(tb
, 0);
1079 if (tb
->tb_next_offset
[1] != 0xffff)
1080 tb_reset_jump(tb
, 1);
1082 #ifdef DEBUG_TB_CHECK
1088 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1089 tb[1].tc_ptr. Return NULL if not found */
1090 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1092 int m_min
, m_max
, m
;
1094 TranslationBlock
*tb
;
1098 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1099 tc_ptr
>= (unsigned long)code_gen_ptr
)
1101 /* binary search (cf Knuth) */
1104 while (m_min
<= m_max
) {
1105 m
= (m_min
+ m_max
) >> 1;
1107 v
= (unsigned long)tb
->tc_ptr
;
1110 else if (tc_ptr
< v
) {
1119 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1121 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1123 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1126 tb1
= tb
->jmp_next
[n
];
1128 /* find head of list */
1131 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1134 tb1
= tb1
->jmp_next
[n1
];
1136 /* we are now sure now that tb jumps to tb1 */
1139 /* remove tb from the jmp_first list */
1140 ptb
= &tb_next
->jmp_first
;
1144 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1145 if (n1
== n
&& tb1
== tb
)
1147 ptb
= &tb1
->jmp_next
[n1
];
1149 *ptb
= tb
->jmp_next
[n
];
1150 tb
->jmp_next
[n
] = NULL
;
1152 /* suppress the jump to next tb in generated code */
1153 tb_reset_jump(tb
, n
);
1155 /* suppress jumps in the tb on which we could have jumped */
1156 tb_reset_jump_recursive(tb_next
);
1160 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1162 tb_reset_jump_recursive2(tb
, 0);
1163 tb_reset_jump_recursive2(tb
, 1);
1166 #if defined(TARGET_HAS_ICE)
1167 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1169 target_phys_addr_t addr
;
1171 ram_addr_t ram_addr
;
1174 addr
= cpu_get_phys_page_debug(env
, pc
);
1175 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1177 pd
= IO_MEM_UNASSIGNED
;
1179 pd
= p
->phys_offset
;
1181 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1182 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1186 /* Add a watchpoint. */
1187 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1191 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1192 if (addr
== env
->watchpoint
[i
].vaddr
)
1195 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1198 i
= env
->nb_watchpoints
++;
1199 env
->watchpoint
[i
].vaddr
= addr
;
1200 tlb_flush_page(env
, addr
);
1201 /* FIXME: This flush is needed because of the hack to make memory ops
1202 terminate the TB. It can be removed once the proper IO trap and
1203 re-execute bits are in. */
1208 /* Remove a watchpoint. */
1209 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1213 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1214 if (addr
== env
->watchpoint
[i
].vaddr
) {
1215 env
->nb_watchpoints
--;
1216 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1217 tlb_flush_page(env
, addr
);
1224 /* Remove all watchpoints. */
1225 void cpu_watchpoint_remove_all(CPUState
*env
) {
1228 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1229 tlb_flush_page(env
, env
->watchpoint
[i
].vaddr
);
1231 env
->nb_watchpoints
= 0;
1234 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1235 breakpoint is reached */
1236 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1238 #if defined(TARGET_HAS_ICE)
1241 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1242 if (env
->breakpoints
[i
] == pc
)
1246 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1248 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1250 breakpoint_invalidate(env
, pc
);
1257 /* remove all breakpoints */
1258 void cpu_breakpoint_remove_all(CPUState
*env
) {
1259 #if defined(TARGET_HAS_ICE)
1261 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1262 breakpoint_invalidate(env
, env
->breakpoints
[i
]);
1264 env
->nb_breakpoints
= 0;
1268 /* remove a breakpoint */
1269 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1271 #if defined(TARGET_HAS_ICE)
1273 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1274 if (env
->breakpoints
[i
] == pc
)
1279 env
->nb_breakpoints
--;
1280 if (i
< env
->nb_breakpoints
)
1281 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1283 breakpoint_invalidate(env
, pc
);
1290 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1291 CPU loop after each instruction */
1292 void cpu_single_step(CPUState
*env
, int enabled
)
1294 #if defined(TARGET_HAS_ICE)
1295 if (env
->singlestep_enabled
!= enabled
) {
1296 env
->singlestep_enabled
= enabled
;
1297 /* must flush all the translated code to avoid inconsistancies */
1298 /* XXX: only flush what is necessary */
1304 /* enable or disable low levels log */
1305 void cpu_set_log(int log_flags
)
1307 loglevel
= log_flags
;
1308 if (loglevel
&& !logfile
) {
1309 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1311 perror(logfilename
);
1314 #if !defined(CONFIG_SOFTMMU)
1315 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1317 static uint8_t logfile_buf
[4096];
1318 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1321 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1325 if (!loglevel
&& logfile
) {
1331 void cpu_set_log_filename(const char *filename
)
1333 logfilename
= strdup(filename
);
1338 cpu_set_log(loglevel
);
1341 /* mask must never be zero, except for A20 change call */
1342 void cpu_interrupt(CPUState
*env
, int mask
)
1344 TranslationBlock
*tb
;
1345 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1347 env
->interrupt_request
|= mask
;
1348 /* if the cpu is currently executing code, we must unlink it and
1349 all the potentially executing TB */
1350 tb
= env
->current_tb
;
1351 if (tb
&& !testandset(&interrupt_lock
)) {
1352 env
->current_tb
= NULL
;
1353 tb_reset_jump_recursive(tb
);
1354 resetlock(&interrupt_lock
);
1358 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1360 env
->interrupt_request
&= ~mask
;
1363 CPULogItem cpu_log_items
[] = {
1364 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1365 "show generated host assembly code for each compiled TB" },
1366 { CPU_LOG_TB_IN_ASM
, "in_asm",
1367 "show target assembly code for each compiled TB" },
1368 { CPU_LOG_TB_OP
, "op",
1369 "show micro ops for each compiled TB" },
1370 { CPU_LOG_TB_OP_OPT
, "op_opt",
1373 "before eflags optimization and "
1375 "after liveness analysis" },
1376 { CPU_LOG_INT
, "int",
1377 "show interrupts/exceptions in short format" },
1378 { CPU_LOG_EXEC
, "exec",
1379 "show trace before each executed TB (lots of logs)" },
1380 { CPU_LOG_TB_CPU
, "cpu",
1381 "show CPU state before block translation" },
1383 { CPU_LOG_PCALL
, "pcall",
1384 "show protected mode far calls/returns/exceptions" },
1387 { CPU_LOG_IOPORT
, "ioport",
1388 "show all i/o ports accesses" },
1393 static int cmp1(const char *s1
, int n
, const char *s2
)
1395 if (strlen(s2
) != n
)
1397 return memcmp(s1
, s2
, n
) == 0;
1400 /* takes a comma separated list of log masks. Return 0 if error. */
1401 int cpu_str_to_log_mask(const char *str
)
1410 p1
= strchr(p
, ',');
1413 if(cmp1(p
,p1
-p
,"all")) {
1414 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1418 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1419 if (cmp1(p
, p1
- p
, item
->name
))
1433 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1440 fprintf(stderr
, "qemu: fatal: ");
1441 vfprintf(stderr
, fmt
, ap
);
1442 fprintf(stderr
, "\n");
1444 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1446 cpu_dump_state(env
, stderr
, fprintf
, 0);
1449 fprintf(logfile
, "qemu: fatal: ");
1450 vfprintf(logfile
, fmt
, ap2
);
1451 fprintf(logfile
, "\n");
1453 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1455 cpu_dump_state(env
, logfile
, fprintf
, 0);
1465 CPUState
*cpu_copy(CPUState
*env
)
1467 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1468 /* preserve chaining and index */
1469 CPUState
*next_cpu
= new_env
->next_cpu
;
1470 int cpu_index
= new_env
->cpu_index
;
1471 memcpy(new_env
, env
, sizeof(CPUState
));
1472 new_env
->next_cpu
= next_cpu
;
1473 new_env
->cpu_index
= cpu_index
;
1477 #if !defined(CONFIG_USER_ONLY)
1479 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1483 /* Discard jump cache entries for any tb which might potentially
1484 overlap the flushed page. */
1485 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1486 memset (&env
->tb_jmp_cache
[i
], 0,
1487 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1489 i
= tb_jmp_cache_hash_page(addr
);
1490 memset (&env
->tb_jmp_cache
[i
], 0,
1491 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1494 /* NOTE: if flush_global is true, also flush global entries (not
1496 void tlb_flush(CPUState
*env
, int flush_global
)
1500 #if defined(DEBUG_TLB)
1501 printf("tlb_flush:\n");
1503 /* must reset current TB so that interrupts cannot modify the
1504 links while we are modifying them */
1505 env
->current_tb
= NULL
;
1507 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1508 env
->tlb_table
[0][i
].addr_read
= -1;
1509 env
->tlb_table
[0][i
].addr_write
= -1;
1510 env
->tlb_table
[0][i
].addr_code
= -1;
1511 env
->tlb_table
[1][i
].addr_read
= -1;
1512 env
->tlb_table
[1][i
].addr_write
= -1;
1513 env
->tlb_table
[1][i
].addr_code
= -1;
1514 #if (NB_MMU_MODES >= 3)
1515 env
->tlb_table
[2][i
].addr_read
= -1;
1516 env
->tlb_table
[2][i
].addr_write
= -1;
1517 env
->tlb_table
[2][i
].addr_code
= -1;
1518 #if (NB_MMU_MODES == 4)
1519 env
->tlb_table
[3][i
].addr_read
= -1;
1520 env
->tlb_table
[3][i
].addr_write
= -1;
1521 env
->tlb_table
[3][i
].addr_code
= -1;
1526 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1528 #if !defined(CONFIG_SOFTMMU)
1529 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1532 if (env
->kqemu_enabled
) {
1533 kqemu_flush(env
, flush_global
);
1539 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1541 if (addr
== (tlb_entry
->addr_read
&
1542 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1543 addr
== (tlb_entry
->addr_write
&
1544 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1545 addr
== (tlb_entry
->addr_code
&
1546 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1547 tlb_entry
->addr_read
= -1;
1548 tlb_entry
->addr_write
= -1;
1549 tlb_entry
->addr_code
= -1;
1553 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1557 #if defined(DEBUG_TLB)
1558 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1560 /* must reset current TB so that interrupts cannot modify the
1561 links while we are modifying them */
1562 env
->current_tb
= NULL
;
1564 addr
&= TARGET_PAGE_MASK
;
1565 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1566 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1567 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1568 #if (NB_MMU_MODES >= 3)
1569 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1570 #if (NB_MMU_MODES == 4)
1571 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1575 tlb_flush_jmp_cache(env
, addr
);
1577 #if !defined(CONFIG_SOFTMMU)
1578 if (addr
< MMAP_AREA_END
)
1579 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1582 if (env
->kqemu_enabled
) {
1583 kqemu_flush_page(env
, addr
);
1588 /* update the TLBs so that writes to code in the virtual page 'addr'
1590 static void tlb_protect_code(ram_addr_t ram_addr
)
1592 cpu_physical_memory_reset_dirty(ram_addr
,
1593 ram_addr
+ TARGET_PAGE_SIZE
,
1597 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1598 tested for self modifying code */
1599 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1602 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1605 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1606 unsigned long start
, unsigned long length
)
1609 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1610 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1611 if ((addr
- start
) < length
) {
1612 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1617 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1621 unsigned long length
, start1
;
1625 start
&= TARGET_PAGE_MASK
;
1626 end
= TARGET_PAGE_ALIGN(end
);
1628 length
= end
- start
;
1631 len
= length
>> TARGET_PAGE_BITS
;
1633 /* XXX: should not depend on cpu context */
1635 if (env
->kqemu_enabled
) {
1638 for(i
= 0; i
< len
; i
++) {
1639 kqemu_set_notdirty(env
, addr
);
1640 addr
+= TARGET_PAGE_SIZE
;
1644 mask
= ~dirty_flags
;
1645 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1646 for(i
= 0; i
< len
; i
++)
1649 /* we modify the TLB cache so that the dirty bit will be set again
1650 when accessing the range */
1651 start1
= start
+ (unsigned long)phys_ram_base
;
1652 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1653 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1654 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1655 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1656 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1657 #if (NB_MMU_MODES >= 3)
1658 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1659 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1660 #if (NB_MMU_MODES == 4)
1661 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1662 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1667 #if !defined(CONFIG_SOFTMMU)
1668 /* XXX: this is expensive */
1674 for(i
= 0; i
< L1_SIZE
; i
++) {
1677 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1678 for(j
= 0; j
< L2_SIZE
; j
++) {
1679 if (p
->valid_tag
== virt_valid_tag
&&
1680 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1681 (p
->prot
& PROT_WRITE
)) {
1682 if (addr
< MMAP_AREA_END
) {
1683 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1684 p
->prot
& ~PROT_WRITE
);
1687 addr
+= TARGET_PAGE_SIZE
;
1696 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1698 ram_addr_t ram_addr
;
1700 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1701 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1702 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1703 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1704 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1709 /* update the TLB according to the current state of the dirty bits */
1710 void cpu_tlb_update_dirty(CPUState
*env
)
1713 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1714 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1715 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1716 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1717 #if (NB_MMU_MODES >= 3)
1718 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1719 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1720 #if (NB_MMU_MODES == 4)
1721 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1722 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1727 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1728 unsigned long start
)
1731 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1732 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1733 if (addr
== start
) {
1734 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1739 /* update the TLB corresponding to virtual page vaddr and phys addr
1740 addr so that it is no longer dirty */
1741 static inline void tlb_set_dirty(CPUState
*env
,
1742 unsigned long addr
, target_ulong vaddr
)
1746 addr
&= TARGET_PAGE_MASK
;
1747 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1748 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1749 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1750 #if (NB_MMU_MODES >= 3)
1751 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1752 #if (NB_MMU_MODES == 4)
1753 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1758 /* add a new TLB entry. At most one entry for a given virtual address
1759 is permitted. Return 0 if OK or 2 if the page could not be mapped
1760 (can only happen in non SOFTMMU mode for I/O pages or pages
1761 conflicting with the host address space). */
1762 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1763 target_phys_addr_t paddr
, int prot
,
1764 int mmu_idx
, int is_softmmu
)
1769 target_ulong address
;
1770 target_phys_addr_t addend
;
1775 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1777 pd
= IO_MEM_UNASSIGNED
;
1779 pd
= p
->phys_offset
;
1781 #if defined(DEBUG_TLB)
1782 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1783 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1787 #if !defined(CONFIG_SOFTMMU)
1791 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1792 /* IO memory case */
1793 address
= vaddr
| pd
;
1796 /* standard memory */
1798 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1801 /* Make accesses to pages with watchpoints go via the
1802 watchpoint trap routines. */
1803 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1804 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1805 if (address
& ~TARGET_PAGE_MASK
) {
1806 env
->watchpoint
[i
].addend
= 0;
1807 address
= vaddr
| io_mem_watch
;
1809 env
->watchpoint
[i
].addend
= pd
- paddr
+
1810 (unsigned long) phys_ram_base
;
1811 /* TODO: Figure out how to make read watchpoints coexist
1813 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1818 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1820 te
= &env
->tlb_table
[mmu_idx
][index
];
1821 te
->addend
= addend
;
1822 if (prot
& PAGE_READ
) {
1823 te
->addr_read
= address
;
1828 if (prot
& PAGE_EXEC
) {
1829 te
->addr_code
= address
;
1833 if (prot
& PAGE_WRITE
) {
1834 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1835 (pd
& IO_MEM_ROMD
)) {
1836 /* write access calls the I/O callback */
1837 te
->addr_write
= vaddr
|
1838 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1839 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1840 !cpu_physical_memory_is_dirty(pd
)) {
1841 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1843 te
->addr_write
= address
;
1846 te
->addr_write
= -1;
1849 #if !defined(CONFIG_SOFTMMU)
1851 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1852 /* IO access: no mapping is done as it will be handled by the
1854 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1859 if (vaddr
>= MMAP_AREA_END
) {
1862 if (prot
& PROT_WRITE
) {
1863 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1864 #if defined(TARGET_HAS_SMC) || 1
1867 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1868 !cpu_physical_memory_is_dirty(pd
))) {
1869 /* ROM: we do as if code was inside */
1870 /* if code is present, we only map as read only and save the
1874 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1877 vp
->valid_tag
= virt_valid_tag
;
1878 prot
&= ~PAGE_WRITE
;
1881 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1882 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1883 if (map_addr
== MAP_FAILED
) {
1884 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1894 /* called from signal handler: invalidate the code and unprotect the
1895 page. Return TRUE if the fault was succesfully handled. */
1896 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1898 #if !defined(CONFIG_SOFTMMU)
1901 #if defined(DEBUG_TLB)
1902 printf("page_unprotect: addr=0x%08x\n", addr
);
1904 addr
&= TARGET_PAGE_MASK
;
1906 /* if it is not mapped, no need to worry here */
1907 if (addr
>= MMAP_AREA_END
)
1909 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1912 /* NOTE: in this case, validate_tag is _not_ tested as it
1913 validates only the code TLB */
1914 if (vp
->valid_tag
!= virt_valid_tag
)
1916 if (!(vp
->prot
& PAGE_WRITE
))
1918 #if defined(DEBUG_TLB)
1919 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1920 addr
, vp
->phys_addr
, vp
->prot
);
1922 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1923 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1924 (unsigned long)addr
, vp
->prot
);
1925 /* set the dirty bit */
1926 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1927 /* flush the code inside */
1928 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1937 void tlb_flush(CPUState
*env
, int flush_global
)
1941 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1945 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1946 target_phys_addr_t paddr
, int prot
,
1947 int mmu_idx
, int is_softmmu
)
1952 /* dump memory mappings */
1953 void page_dump(FILE *f
)
1955 unsigned long start
, end
;
1956 int i
, j
, prot
, prot1
;
1959 fprintf(f
, "%-8s %-8s %-8s %s\n",
1960 "start", "end", "size", "prot");
1964 for(i
= 0; i
<= L1_SIZE
; i
++) {
1969 for(j
= 0;j
< L2_SIZE
; j
++) {
1974 if (prot1
!= prot
) {
1975 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1977 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1978 start
, end
, end
- start
,
1979 prot
& PAGE_READ
? 'r' : '-',
1980 prot
& PAGE_WRITE
? 'w' : '-',
1981 prot
& PAGE_EXEC
? 'x' : '-');
1995 int page_get_flags(target_ulong address
)
1999 p
= page_find(address
>> TARGET_PAGE_BITS
);
2005 /* modify the flags of a page and invalidate the code if
2006 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2007 depending on PAGE_WRITE */
2008 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2013 /* mmap_lock should already be held. */
2014 start
= start
& TARGET_PAGE_MASK
;
2015 end
= TARGET_PAGE_ALIGN(end
);
2016 if (flags
& PAGE_WRITE
)
2017 flags
|= PAGE_WRITE_ORG
;
2018 spin_lock(&tb_lock
);
2019 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2020 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2021 /* if the write protection is set, then we invalidate the code
2023 if (!(p
->flags
& PAGE_WRITE
) &&
2024 (flags
& PAGE_WRITE
) &&
2026 tb_invalidate_phys_page(addr
, 0, NULL
);
2030 spin_unlock(&tb_lock
);
2033 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2039 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2040 start
= start
& TARGET_PAGE_MASK
;
2043 /* we've wrapped around */
2045 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2046 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2049 if( !(p
->flags
& PAGE_VALID
) )
2052 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2054 if (flags
& PAGE_WRITE
) {
2055 if (!(p
->flags
& PAGE_WRITE_ORG
))
2057 /* unprotect the page if it was put read-only because it
2058 contains translated code */
2059 if (!(p
->flags
& PAGE_WRITE
)) {
2060 if (!page_unprotect(addr
, 0, NULL
))
2069 /* called from signal handler: invalidate the code and unprotect the
2070 page. Return TRUE if the fault was succesfully handled. */
2071 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2073 unsigned int page_index
, prot
, pindex
;
2075 target_ulong host_start
, host_end
, addr
;
2077 /* Technically this isn't safe inside a signal handler. However we
2078 know this only ever happens in a synchronous SEGV handler, so in
2079 practice it seems to be ok. */
2082 host_start
= address
& qemu_host_page_mask
;
2083 page_index
= host_start
>> TARGET_PAGE_BITS
;
2084 p1
= page_find(page_index
);
2089 host_end
= host_start
+ qemu_host_page_size
;
2092 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2096 /* if the page was really writable, then we change its
2097 protection back to writable */
2098 if (prot
& PAGE_WRITE_ORG
) {
2099 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2100 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2101 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2102 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2103 p1
[pindex
].flags
|= PAGE_WRITE
;
2104 /* and since the content will be modified, we must invalidate
2105 the corresponding translated code. */
2106 tb_invalidate_phys_page(address
, pc
, puc
);
2107 #ifdef DEBUG_TB_CHECK
2108 tb_invalidate_check(address
);
2118 static inline void tlb_set_dirty(CPUState
*env
,
2119 unsigned long addr
, target_ulong vaddr
)
2122 #endif /* defined(CONFIG_USER_ONLY) */
2124 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2126 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2127 ram_addr_t orig_memory
);
2128 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2131 if (addr > start_addr) \
2134 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2135 if (start_addr2 > 0) \
2139 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2140 end_addr2 = TARGET_PAGE_SIZE - 1; \
2142 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2143 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2148 /* register physical memory. 'size' must be a multiple of the target
2149 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2151 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2153 ram_addr_t phys_offset
)
2155 target_phys_addr_t addr
, end_addr
;
2158 ram_addr_t orig_size
= size
;
2162 /* XXX: should not depend on cpu context */
2164 if (env
->kqemu_enabled
) {
2165 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2168 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2169 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2170 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2171 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2172 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2173 ram_addr_t orig_memory
= p
->phys_offset
;
2174 target_phys_addr_t start_addr2
, end_addr2
;
2175 int need_subpage
= 0;
2177 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2179 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2180 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2181 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2182 &p
->phys_offset
, orig_memory
);
2184 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2187 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2189 p
->phys_offset
= phys_offset
;
2190 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2191 (phys_offset
& IO_MEM_ROMD
))
2192 phys_offset
+= TARGET_PAGE_SIZE
;
2195 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2196 p
->phys_offset
= phys_offset
;
2197 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2198 (phys_offset
& IO_MEM_ROMD
))
2199 phys_offset
+= TARGET_PAGE_SIZE
;
2201 target_phys_addr_t start_addr2
, end_addr2
;
2202 int need_subpage
= 0;
2204 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2205 end_addr2
, need_subpage
);
2207 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2208 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2209 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2210 subpage_register(subpage
, start_addr2
, end_addr2
,
2217 /* since each CPU stores ram addresses in its TLB cache, we must
2218 reset the modified entries */
2220 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2225 /* XXX: temporary until new memory mapping API */
2226 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2230 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2232 return IO_MEM_UNASSIGNED
;
2233 return p
->phys_offset
;
2236 /* XXX: better than nothing */
2237 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2240 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2241 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
"\n",
2242 (uint64_t)size
, (uint64_t)phys_ram_size
);
2245 addr
= phys_ram_alloc_offset
;
2246 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2250 void qemu_ram_free(ram_addr_t addr
)
2254 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2256 #ifdef DEBUG_UNASSIGNED
2257 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2260 do_unassigned_access(addr
, 0, 0, 0);
2262 do_unassigned_access(addr
, 0, 0, 0);
2267 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2269 #ifdef DEBUG_UNASSIGNED
2270 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2273 do_unassigned_access(addr
, 1, 0, 0);
2275 do_unassigned_access(addr
, 1, 0, 0);
2279 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2280 unassigned_mem_readb
,
2281 unassigned_mem_readb
,
2282 unassigned_mem_readb
,
2285 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2286 unassigned_mem_writeb
,
2287 unassigned_mem_writeb
,
2288 unassigned_mem_writeb
,
2291 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2293 unsigned long ram_addr
;
2295 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2296 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2297 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2298 #if !defined(CONFIG_USER_ONLY)
2299 tb_invalidate_phys_page_fast(ram_addr
, 1);
2300 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2303 stb_p((uint8_t *)(long)addr
, val
);
2305 if (cpu_single_env
->kqemu_enabled
&&
2306 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2307 kqemu_modify_page(cpu_single_env
, ram_addr
);
2309 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2310 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2311 /* we remove the notdirty callback only if the code has been
2313 if (dirty_flags
== 0xff)
2314 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2317 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2319 unsigned long ram_addr
;
2321 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2322 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2323 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2324 #if !defined(CONFIG_USER_ONLY)
2325 tb_invalidate_phys_page_fast(ram_addr
, 2);
2326 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2329 stw_p((uint8_t *)(long)addr
, val
);
2331 if (cpu_single_env
->kqemu_enabled
&&
2332 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2333 kqemu_modify_page(cpu_single_env
, ram_addr
);
2335 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2336 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2337 /* we remove the notdirty callback only if the code has been
2339 if (dirty_flags
== 0xff)
2340 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2343 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2345 unsigned long ram_addr
;
2347 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2348 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2349 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2350 #if !defined(CONFIG_USER_ONLY)
2351 tb_invalidate_phys_page_fast(ram_addr
, 4);
2352 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2355 stl_p((uint8_t *)(long)addr
, val
);
2357 if (cpu_single_env
->kqemu_enabled
&&
2358 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2359 kqemu_modify_page(cpu_single_env
, ram_addr
);
2361 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2362 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2363 /* we remove the notdirty callback only if the code has been
2365 if (dirty_flags
== 0xff)
2366 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2369 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2370 NULL
, /* never used */
2371 NULL
, /* never used */
2372 NULL
, /* never used */
2375 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2376 notdirty_mem_writeb
,
2377 notdirty_mem_writew
,
2378 notdirty_mem_writel
,
2381 #if defined(CONFIG_SOFTMMU)
2382 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2383 so these check for a hit then pass through to the normal out-of-line
2385 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2387 return ldub_phys(addr
);
2390 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2392 return lduw_phys(addr
);
2395 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2397 return ldl_phys(addr
);
2400 /* Generate a debug exception if a watchpoint has been hit.
2401 Returns the real physical address of the access. addr will be a host
2402 address in case of a RAM location. */
2403 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2405 CPUState
*env
= cpu_single_env
;
2407 target_ulong retaddr
;
2411 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2412 watch
= env
->watchpoint
[i
].vaddr
;
2413 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2414 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2415 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2416 cpu_single_env
->watchpoint_hit
= i
+ 1;
2417 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2425 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2428 addr
= check_watchpoint(addr
);
2429 stb_phys(addr
, val
);
2432 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2435 addr
= check_watchpoint(addr
);
2436 stw_phys(addr
, val
);
2439 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2442 addr
= check_watchpoint(addr
);
2443 stl_phys(addr
, val
);
2446 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2452 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2459 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2465 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2466 #if defined(DEBUG_SUBPAGE)
2467 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2468 mmio
, len
, addr
, idx
);
2470 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2475 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2476 uint32_t value
, unsigned int len
)
2480 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2481 #if defined(DEBUG_SUBPAGE)
2482 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2483 mmio
, len
, addr
, idx
, value
);
2485 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2488 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2490 #if defined(DEBUG_SUBPAGE)
2491 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2494 return subpage_readlen(opaque
, addr
, 0);
2497 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2500 #if defined(DEBUG_SUBPAGE)
2501 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2503 subpage_writelen(opaque
, addr
, value
, 0);
2506 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2508 #if defined(DEBUG_SUBPAGE)
2509 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2512 return subpage_readlen(opaque
, addr
, 1);
2515 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2518 #if defined(DEBUG_SUBPAGE)
2519 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2521 subpage_writelen(opaque
, addr
, value
, 1);
2524 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2526 #if defined(DEBUG_SUBPAGE)
2527 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2530 return subpage_readlen(opaque
, addr
, 2);
2533 static void subpage_writel (void *opaque
,
2534 target_phys_addr_t addr
, uint32_t value
)
2536 #if defined(DEBUG_SUBPAGE)
2537 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2539 subpage_writelen(opaque
, addr
, value
, 2);
2542 static CPUReadMemoryFunc
*subpage_read
[] = {
2548 static CPUWriteMemoryFunc
*subpage_write
[] = {
2554 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2560 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2562 idx
= SUBPAGE_IDX(start
);
2563 eidx
= SUBPAGE_IDX(end
);
2564 #if defined(DEBUG_SUBPAGE)
2565 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2566 mmio
, start
, end
, idx
, eidx
, memory
);
2568 memory
>>= IO_MEM_SHIFT
;
2569 for (; idx
<= eidx
; idx
++) {
2570 for (i
= 0; i
< 4; i
++) {
2571 if (io_mem_read
[memory
][i
]) {
2572 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2573 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2575 if (io_mem_write
[memory
][i
]) {
2576 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2577 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2585 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2586 ram_addr_t orig_memory
)
2591 mmio
= qemu_mallocz(sizeof(subpage_t
));
2594 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2595 #if defined(DEBUG_SUBPAGE)
2596 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2597 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2599 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2600 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2606 static void io_mem_init(void)
2608 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2609 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2610 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2613 #if defined(CONFIG_SOFTMMU)
2614 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2615 watch_mem_write
, NULL
);
2617 /* alloc dirty bits array */
2618 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2619 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2622 /* mem_read and mem_write are arrays of functions containing the
2623 function to access byte (index 0), word (index 1) and dword (index
2624 2). Functions can be omitted with a NULL function pointer. The
2625 registered functions may be modified dynamically later.
2626 If io_index is non zero, the corresponding io zone is
2627 modified. If it is zero, a new io zone is allocated. The return
2628 value can be used with cpu_register_physical_memory(). (-1) is
2629 returned if error. */
2630 int cpu_register_io_memory(int io_index
,
2631 CPUReadMemoryFunc
**mem_read
,
2632 CPUWriteMemoryFunc
**mem_write
,
2635 int i
, subwidth
= 0;
2637 if (io_index
<= 0) {
2638 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2640 io_index
= io_mem_nb
++;
2642 if (io_index
>= IO_MEM_NB_ENTRIES
)
2646 for(i
= 0;i
< 3; i
++) {
2647 if (!mem_read
[i
] || !mem_write
[i
])
2648 subwidth
= IO_MEM_SUBWIDTH
;
2649 io_mem_read
[io_index
][i
] = mem_read
[i
];
2650 io_mem_write
[io_index
][i
] = mem_write
[i
];
2652 io_mem_opaque
[io_index
] = opaque
;
2653 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2656 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2658 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2661 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2663 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2666 /* physical memory access (slow version, mainly for debug) */
2667 #if defined(CONFIG_USER_ONLY)
2668 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2669 int len
, int is_write
)
2676 page
= addr
& TARGET_PAGE_MASK
;
2677 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2680 flags
= page_get_flags(page
);
2681 if (!(flags
& PAGE_VALID
))
2684 if (!(flags
& PAGE_WRITE
))
2686 /* XXX: this code should not depend on lock_user */
2687 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2688 /* FIXME - should this return an error rather than just fail? */
2691 unlock_user(p
, addr
, l
);
2693 if (!(flags
& PAGE_READ
))
2695 /* XXX: this code should not depend on lock_user */
2696 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2697 /* FIXME - should this return an error rather than just fail? */
2700 unlock_user(p
, addr
, 0);
2709 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2710 int len
, int is_write
)
2715 target_phys_addr_t page
;
2720 page
= addr
& TARGET_PAGE_MASK
;
2721 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2724 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2726 pd
= IO_MEM_UNASSIGNED
;
2728 pd
= p
->phys_offset
;
2732 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2733 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2734 /* XXX: could force cpu_single_env to NULL to avoid
2736 if (l
>= 4 && ((addr
& 3) == 0)) {
2737 /* 32 bit write access */
2739 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2741 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2742 /* 16 bit write access */
2744 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2747 /* 8 bit write access */
2749 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2753 unsigned long addr1
;
2754 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2756 ptr
= phys_ram_base
+ addr1
;
2757 memcpy(ptr
, buf
, l
);
2758 if (!cpu_physical_memory_is_dirty(addr1
)) {
2759 /* invalidate code */
2760 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2762 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2763 (0xff & ~CODE_DIRTY_FLAG
);
2767 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2768 !(pd
& IO_MEM_ROMD
)) {
2770 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2771 if (l
>= 4 && ((addr
& 3) == 0)) {
2772 /* 32 bit read access */
2773 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2776 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2777 /* 16 bit read access */
2778 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2782 /* 8 bit read access */
2783 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2789 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2790 (addr
& ~TARGET_PAGE_MASK
);
2791 memcpy(buf
, ptr
, l
);
2800 /* used for ROM loading : can write in RAM and ROM */
2801 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2802 const uint8_t *buf
, int len
)
2806 target_phys_addr_t page
;
2811 page
= addr
& TARGET_PAGE_MASK
;
2812 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2815 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2817 pd
= IO_MEM_UNASSIGNED
;
2819 pd
= p
->phys_offset
;
2822 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2823 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2824 !(pd
& IO_MEM_ROMD
)) {
2827 unsigned long addr1
;
2828 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2830 ptr
= phys_ram_base
+ addr1
;
2831 memcpy(ptr
, buf
, l
);
2840 /* warning: addr must be aligned */
2841 uint32_t ldl_phys(target_phys_addr_t addr
)
2849 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2851 pd
= IO_MEM_UNASSIGNED
;
2853 pd
= p
->phys_offset
;
2856 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2857 !(pd
& IO_MEM_ROMD
)) {
2859 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2860 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2863 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2864 (addr
& ~TARGET_PAGE_MASK
);
2870 /* warning: addr must be aligned */
2871 uint64_t ldq_phys(target_phys_addr_t addr
)
2879 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2881 pd
= IO_MEM_UNASSIGNED
;
2883 pd
= p
->phys_offset
;
2886 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2887 !(pd
& IO_MEM_ROMD
)) {
2889 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2890 #ifdef TARGET_WORDS_BIGENDIAN
2891 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2892 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2894 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2895 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2899 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2900 (addr
& ~TARGET_PAGE_MASK
);
2907 uint32_t ldub_phys(target_phys_addr_t addr
)
2910 cpu_physical_memory_read(addr
, &val
, 1);
2915 uint32_t lduw_phys(target_phys_addr_t addr
)
2918 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2919 return tswap16(val
);
2922 /* warning: addr must be aligned. The ram page is not masked as dirty
2923 and the code inside is not invalidated. It is useful if the dirty
2924 bits are used to track modified PTEs */
2925 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2932 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2934 pd
= IO_MEM_UNASSIGNED
;
2936 pd
= p
->phys_offset
;
2939 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2940 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2941 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2943 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2944 (addr
& ~TARGET_PAGE_MASK
);
2949 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2956 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2958 pd
= IO_MEM_UNASSIGNED
;
2960 pd
= p
->phys_offset
;
2963 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2964 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2965 #ifdef TARGET_WORDS_BIGENDIAN
2966 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2967 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2969 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2970 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2973 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2974 (addr
& ~TARGET_PAGE_MASK
);
2979 /* warning: addr must be aligned */
2980 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2987 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2989 pd
= IO_MEM_UNASSIGNED
;
2991 pd
= p
->phys_offset
;
2994 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2995 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2996 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2998 unsigned long addr1
;
2999 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3001 ptr
= phys_ram_base
+ addr1
;
3003 if (!cpu_physical_memory_is_dirty(addr1
)) {
3004 /* invalidate code */
3005 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3007 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3008 (0xff & ~CODE_DIRTY_FLAG
);
3014 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3017 cpu_physical_memory_write(addr
, &v
, 1);
3021 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3023 uint16_t v
= tswap16(val
);
3024 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3028 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3031 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3036 /* virtual memory access for debug */
3037 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3038 uint8_t *buf
, int len
, int is_write
)
3041 target_phys_addr_t phys_addr
;
3045 page
= addr
& TARGET_PAGE_MASK
;
3046 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3047 /* if no physical page mapped, return an error */
3048 if (phys_addr
== -1)
3050 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3053 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3062 void dump_exec_info(FILE *f
,
3063 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3065 int i
, target_code_size
, max_target_code_size
;
3066 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3067 TranslationBlock
*tb
;
3069 target_code_size
= 0;
3070 max_target_code_size
= 0;
3072 direct_jmp_count
= 0;
3073 direct_jmp2_count
= 0;
3074 for(i
= 0; i
< nb_tbs
; i
++) {
3076 target_code_size
+= tb
->size
;
3077 if (tb
->size
> max_target_code_size
)
3078 max_target_code_size
= tb
->size
;
3079 if (tb
->page_addr
[1] != -1)
3081 if (tb
->tb_next_offset
[0] != 0xffff) {
3083 if (tb
->tb_next_offset
[1] != 0xffff) {
3084 direct_jmp2_count
++;
3088 /* XXX: avoid using doubles ? */
3089 cpu_fprintf(f
, "Translation buffer state:\n");
3090 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3091 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3092 cpu_fprintf(f
, "TB count %d/%d\n",
3093 nb_tbs
, code_gen_max_blocks
);
3094 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3095 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3096 max_target_code_size
);
3097 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3098 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3099 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3100 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3102 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3103 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3105 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3107 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3108 cpu_fprintf(f
, "\nStatistics:\n");
3109 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3110 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3111 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3112 tcg_dump_info(f
, cpu_fprintf
);
3115 #if !defined(CONFIG_USER_ONLY)
3117 #define MMUSUFFIX _cmmu
3118 #define GETPC() NULL
3119 #define env cpu_single_env
3120 #define SOFTMMU_CODE_ACCESS
3123 #include "softmmu_template.h"
3126 #include "softmmu_template.h"
3129 #include "softmmu_template.h"
3132 #include "softmmu_template.h"