2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if defined(CONFIG_USER_ONLY)
44 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
84 TranslationBlock
*tbs
;
85 int code_gen_max_blocks
;
86 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
91 uint8_t code_gen_prologue
[1024] __attribute__((aligned (32)));
92 uint8_t *code_gen_buffer
;
93 unsigned long code_gen_buffer_size
;
94 /* threshold to flush the translated code buffer */
95 unsigned long code_gen_buffer_max_size
;
96 uint8_t *code_gen_ptr
;
98 ram_addr_t phys_ram_size
;
100 uint8_t *phys_ram_base
;
101 uint8_t *phys_ram_dirty
;
102 static ram_addr_t phys_ram_alloc_offset
= 0;
105 /* current CPU in the current thread. It is only valid inside
107 CPUState
*cpu_single_env
;
109 typedef struct PageDesc
{
110 /* list of TBs intersecting this ram page */
111 TranslationBlock
*first_tb
;
112 /* in order to optimize self modifying code, we count the number
113 of lookups we do to a given page to use a bitmap */
114 unsigned int code_write_count
;
115 uint8_t *code_bitmap
;
116 #if defined(CONFIG_USER_ONLY)
121 typedef struct PhysPageDesc
{
122 /* offset in host memory of the page + io_index in the low 12 bits */
123 ram_addr_t phys_offset
;
127 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128 /* XXX: this is a temporary hack for alpha target.
129 * In the future, this is to be replaced by a multi-level table
130 * to actually be able to handle the complete 64 bits address space.
132 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
134 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
137 #define L1_SIZE (1 << L1_BITS)
138 #define L2_SIZE (1 << L2_BITS)
140 static void io_mem_init(void);
142 unsigned long qemu_real_host_page_size
;
143 unsigned long qemu_host_page_bits
;
144 unsigned long qemu_host_page_size
;
145 unsigned long qemu_host_page_mask
;
147 /* XXX: for system emulation, it could just be an array */
148 static PageDesc
*l1_map
[L1_SIZE
];
149 PhysPageDesc
**l1_phys_map
;
151 /* io memory support */
152 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
153 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
154 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
155 static int io_mem_nb
;
156 #if defined(CONFIG_SOFTMMU)
157 static int io_mem_watch
;
161 char *logfilename
= "/tmp/qemu.log";
164 static int log_append
= 0;
167 static int tlb_flush_count
;
168 static int tb_flush_count
;
169 static int tb_phys_invalidate_count
;
171 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172 typedef struct subpage_t
{
173 target_phys_addr_t base
;
174 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
175 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
176 void *opaque
[TARGET_PAGE_SIZE
][2][4];
180 static void map_exec(void *addr
, long size
)
183 VirtualProtect(addr
, size
,
184 PAGE_EXECUTE_READWRITE
, &old_protect
);
188 static void map_exec(void *addr
, long size
)
190 unsigned long start
, end
, page_size
;
192 page_size
= getpagesize();
193 start
= (unsigned long)addr
;
194 start
&= ~(page_size
- 1);
196 end
= (unsigned long)addr
+ size
;
197 end
+= page_size
- 1;
198 end
&= ~(page_size
- 1);
200 mprotect((void *)start
, end
- start
,
201 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
205 static void page_init(void)
207 /* NOTE: we can always suppose that qemu_host_page_size >=
211 SYSTEM_INFO system_info
;
214 GetSystemInfo(&system_info
);
215 qemu_real_host_page_size
= system_info
.dwPageSize
;
218 qemu_real_host_page_size
= getpagesize();
220 if (qemu_host_page_size
== 0)
221 qemu_host_page_size
= qemu_real_host_page_size
;
222 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
223 qemu_host_page_size
= TARGET_PAGE_SIZE
;
224 qemu_host_page_bits
= 0;
225 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
226 qemu_host_page_bits
++;
227 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
228 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
229 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
231 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
233 long long startaddr
, endaddr
;
238 last_brk
= (unsigned long)sbrk(0);
239 f
= fopen("/proc/self/maps", "r");
242 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
244 startaddr
= MIN(startaddr
,
245 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
246 endaddr
= MIN(endaddr
,
247 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
248 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
249 TARGET_PAGE_ALIGN(endaddr
),
260 static inline PageDesc
*page_find_alloc(target_ulong index
)
264 lp
= &l1_map
[index
>> L2_BITS
];
267 /* allocate if not found */
268 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
269 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
272 return p
+ (index
& (L2_SIZE
- 1));
275 static inline PageDesc
*page_find(target_ulong index
)
279 p
= l1_map
[index
>> L2_BITS
];
282 return p
+ (index
& (L2_SIZE
- 1));
285 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
290 p
= (void **)l1_phys_map
;
291 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
293 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
294 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
296 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
299 /* allocate if not found */
302 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
303 memset(p
, 0, sizeof(void *) * L1_SIZE
);
307 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
311 /* allocate if not found */
314 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
316 for (i
= 0; i
< L2_SIZE
; i
++)
317 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
319 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
322 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
324 return phys_page_find_alloc(index
, 0);
327 #if !defined(CONFIG_USER_ONLY)
328 static void tlb_protect_code(ram_addr_t ram_addr
);
329 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
331 #define mmap_lock() do { } while(0)
332 #define mmap_unlock() do { } while(0)
335 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
337 #if defined(CONFIG_USER_ONLY)
338 /* Currently it is not recommanded to allocate big chunks of data in
339 user mode. It will change when a dedicated libc will be used */
340 #define USE_STATIC_CODE_GEN_BUFFER
343 #ifdef USE_STATIC_CODE_GEN_BUFFER
344 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
347 void code_gen_alloc(unsigned long tb_size
)
349 #ifdef USE_STATIC_CODE_GEN_BUFFER
350 code_gen_buffer
= static_code_gen_buffer
;
351 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
352 map_exec(code_gen_buffer
, code_gen_buffer_size
);
354 code_gen_buffer_size
= tb_size
;
355 if (code_gen_buffer_size
== 0) {
356 #if defined(CONFIG_USER_ONLY)
357 /* in user mode, phys_ram_size is not meaningful */
358 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
360 /* XXX: needs ajustments */
361 code_gen_buffer_size
= (int)(phys_ram_size
/ 4);
364 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
365 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
366 /* The code gen buffer location may have constraints depending on
367 the host cpu and OS */
368 #if defined(__linux__)
371 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
372 #if defined(__x86_64__)
374 /* Cannot map more than that */
375 if (code_gen_buffer_size
> (800 * 1024 * 1024))
376 code_gen_buffer_size
= (800 * 1024 * 1024);
378 code_gen_buffer
= mmap(NULL
, code_gen_buffer_size
,
379 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
381 if (code_gen_buffer
== MAP_FAILED
) {
382 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
387 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
388 if (!code_gen_buffer
) {
389 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
392 map_exec(code_gen_buffer
, code_gen_buffer_size
);
394 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
395 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
396 code_gen_buffer_max_size
= code_gen_buffer_size
-
397 code_gen_max_block_size();
398 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
399 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
402 /* Must be called before using the QEMU cpus. 'tb_size' is the size
403 (in bytes) allocated to the translation buffer. Zero means default
405 void cpu_exec_init_all(unsigned long tb_size
)
408 code_gen_alloc(tb_size
);
409 code_gen_ptr
= code_gen_buffer
;
414 void cpu_exec_init(CPUState
*env
)
419 env
->next_cpu
= NULL
;
422 while (*penv
!= NULL
) {
423 penv
= (CPUState
**)&(*penv
)->next_cpu
;
426 env
->cpu_index
= cpu_index
;
427 env
->nb_watchpoints
= 0;
431 static inline void invalidate_page_bitmap(PageDesc
*p
)
433 if (p
->code_bitmap
) {
434 qemu_free(p
->code_bitmap
);
435 p
->code_bitmap
= NULL
;
437 p
->code_write_count
= 0;
440 /* set to NULL all the 'first_tb' fields in all PageDescs */
441 static void page_flush_tb(void)
446 for(i
= 0; i
< L1_SIZE
; i
++) {
449 for(j
= 0; j
< L2_SIZE
; j
++) {
451 invalidate_page_bitmap(p
);
458 /* flush all the translation blocks */
459 /* XXX: tb_flush is currently not thread safe */
460 void tb_flush(CPUState
*env1
)
463 #if defined(DEBUG_FLUSH)
464 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
465 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
467 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
469 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
470 cpu_abort(env1
, "Internal error: code buffer overflow\n");
474 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
475 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
478 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
481 code_gen_ptr
= code_gen_buffer
;
482 /* XXX: flush processor icache at this point if cache flush is
487 #ifdef DEBUG_TB_CHECK
489 static void tb_invalidate_check(target_ulong address
)
491 TranslationBlock
*tb
;
493 address
&= TARGET_PAGE_MASK
;
494 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
495 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
496 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
497 address
>= tb
->pc
+ tb
->size
)) {
498 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
499 address
, (long)tb
->pc
, tb
->size
);
505 /* verify that all the pages have correct rights for code */
506 static void tb_page_check(void)
508 TranslationBlock
*tb
;
509 int i
, flags1
, flags2
;
511 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
512 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
513 flags1
= page_get_flags(tb
->pc
);
514 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
515 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
516 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
517 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
523 void tb_jmp_check(TranslationBlock
*tb
)
525 TranslationBlock
*tb1
;
528 /* suppress any remaining jumps to this TB */
532 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
535 tb1
= tb1
->jmp_next
[n1
];
537 /* check end of list */
539 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
545 /* invalidate one TB */
546 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
549 TranslationBlock
*tb1
;
553 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
556 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
560 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
562 TranslationBlock
*tb1
;
568 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
570 *ptb
= tb1
->page_next
[n1
];
573 ptb
= &tb1
->page_next
[n1
];
577 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
579 TranslationBlock
*tb1
, **ptb
;
582 ptb
= &tb
->jmp_next
[n
];
585 /* find tb(n) in circular list */
589 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
590 if (n1
== n
&& tb1
== tb
)
593 ptb
= &tb1
->jmp_first
;
595 ptb
= &tb1
->jmp_next
[n1
];
598 /* now we can suppress tb(n) from the list */
599 *ptb
= tb
->jmp_next
[n
];
601 tb
->jmp_next
[n
] = NULL
;
605 /* reset the jump entry 'n' of a TB so that it is not chained to
607 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
609 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
612 static inline void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
617 target_phys_addr_t phys_pc
;
618 TranslationBlock
*tb1
, *tb2
;
620 /* remove the TB from the hash list */
621 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
622 h
= tb_phys_hash_func(phys_pc
);
623 tb_remove(&tb_phys_hash
[h
], tb
,
624 offsetof(TranslationBlock
, phys_hash_next
));
626 /* remove the TB from the page list */
627 if (tb
->page_addr
[0] != page_addr
) {
628 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
629 tb_page_remove(&p
->first_tb
, tb
);
630 invalidate_page_bitmap(p
);
632 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
633 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
634 tb_page_remove(&p
->first_tb
, tb
);
635 invalidate_page_bitmap(p
);
638 tb_invalidated_flag
= 1;
640 /* remove the TB from the hash list */
641 h
= tb_jmp_cache_hash_func(tb
->pc
);
642 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
643 if (env
->tb_jmp_cache
[h
] == tb
)
644 env
->tb_jmp_cache
[h
] = NULL
;
647 /* suppress this TB from the two jump lists */
648 tb_jmp_remove(tb
, 0);
649 tb_jmp_remove(tb
, 1);
651 /* suppress any remaining jumps to this TB */
657 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
658 tb2
= tb1
->jmp_next
[n1
];
659 tb_reset_jump(tb1
, n1
);
660 tb1
->jmp_next
[n1
] = NULL
;
663 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
665 tb_phys_invalidate_count
++;
668 static inline void set_bits(uint8_t *tab
, int start
, int len
)
674 mask
= 0xff << (start
& 7);
675 if ((start
& ~7) == (end
& ~7)) {
677 mask
&= ~(0xff << (end
& 7));
682 start
= (start
+ 8) & ~7;
684 while (start
< end1
) {
689 mask
= ~(0xff << (end
& 7));
695 static void build_page_bitmap(PageDesc
*p
)
697 int n
, tb_start
, tb_end
;
698 TranslationBlock
*tb
;
700 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
703 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
708 tb
= (TranslationBlock
*)((long)tb
& ~3);
709 /* NOTE: this is subtle as a TB may span two physical pages */
711 /* NOTE: tb_end may be after the end of the page, but
712 it is not a problem */
713 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
714 tb_end
= tb_start
+ tb
->size
;
715 if (tb_end
> TARGET_PAGE_SIZE
)
716 tb_end
= TARGET_PAGE_SIZE
;
719 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
721 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
722 tb
= tb
->page_next
[n
];
726 #ifdef TARGET_HAS_PRECISE_SMC
728 static void tb_gen_code(CPUState
*env
,
729 target_ulong pc
, target_ulong cs_base
, int flags
,
732 TranslationBlock
*tb
;
734 target_ulong phys_pc
, phys_page2
, virt_page2
;
737 phys_pc
= get_phys_addr_code(env
, pc
);
740 /* flush must be done */
742 /* cannot fail at this point */
745 tc_ptr
= code_gen_ptr
;
747 tb
->cs_base
= cs_base
;
750 cpu_gen_code(env
, tb
, &code_gen_size
);
751 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
753 /* check next page if needed */
754 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
756 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
757 phys_page2
= get_phys_addr_code(env
, virt_page2
);
759 tb_link_phys(tb
, phys_pc
, phys_page2
);
763 /* invalidate all TBs which intersect with the target physical page
764 starting in range [start;end[. NOTE: start and end must refer to
765 the same physical page. 'is_cpu_write_access' should be true if called
766 from a real cpu write access: the virtual CPU will exit the current
767 TB if code is modified inside this TB. */
768 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
769 int is_cpu_write_access
)
771 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
772 CPUState
*env
= cpu_single_env
;
774 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
775 target_ulong tb_start
, tb_end
;
776 target_ulong current_pc
, current_cs_base
;
778 p
= page_find(start
>> TARGET_PAGE_BITS
);
781 if (!p
->code_bitmap
&&
782 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
783 is_cpu_write_access
) {
784 /* build code bitmap */
785 build_page_bitmap(p
);
788 /* we remove all the TBs in the range [start, end[ */
789 /* XXX: see if in some cases it could be faster to invalidate all the code */
790 current_tb_not_found
= is_cpu_write_access
;
791 current_tb_modified
= 0;
792 current_tb
= NULL
; /* avoid warning */
793 current_pc
= 0; /* avoid warning */
794 current_cs_base
= 0; /* avoid warning */
795 current_flags
= 0; /* avoid warning */
799 tb
= (TranslationBlock
*)((long)tb
& ~3);
800 tb_next
= tb
->page_next
[n
];
801 /* NOTE: this is subtle as a TB may span two physical pages */
803 /* NOTE: tb_end may be after the end of the page, but
804 it is not a problem */
805 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
806 tb_end
= tb_start
+ tb
->size
;
808 tb_start
= tb
->page_addr
[1];
809 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
811 if (!(tb_end
<= start
|| tb_start
>= end
)) {
812 #ifdef TARGET_HAS_PRECISE_SMC
813 if (current_tb_not_found
) {
814 current_tb_not_found
= 0;
816 if (env
->mem_write_pc
) {
817 /* now we have a real cpu fault */
818 current_tb
= tb_find_pc(env
->mem_write_pc
);
821 if (current_tb
== tb
&&
822 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
823 /* If we are modifying the current TB, we must stop
824 its execution. We could be more precise by checking
825 that the modification is after the current PC, but it
826 would require a specialized function to partially
827 restore the CPU state */
829 current_tb_modified
= 1;
830 cpu_restore_state(current_tb
, env
,
831 env
->mem_write_pc
, NULL
);
832 #if defined(TARGET_I386)
833 current_flags
= env
->hflags
;
834 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
835 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
836 current_pc
= current_cs_base
+ env
->eip
;
838 #error unsupported CPU
841 #endif /* TARGET_HAS_PRECISE_SMC */
842 /* we need to do that to handle the case where a signal
843 occurs while doing tb_phys_invalidate() */
846 saved_tb
= env
->current_tb
;
847 env
->current_tb
= NULL
;
849 tb_phys_invalidate(tb
, -1);
851 env
->current_tb
= saved_tb
;
852 if (env
->interrupt_request
&& env
->current_tb
)
853 cpu_interrupt(env
, env
->interrupt_request
);
858 #if !defined(CONFIG_USER_ONLY)
859 /* if no code remaining, no need to continue to use slow writes */
861 invalidate_page_bitmap(p
);
862 if (is_cpu_write_access
) {
863 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
867 #ifdef TARGET_HAS_PRECISE_SMC
868 if (current_tb_modified
) {
869 /* we generate a block containing just the instruction
870 modifying the memory. It will ensure that it cannot modify
872 env
->current_tb
= NULL
;
873 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
875 cpu_resume_from_signal(env
, NULL
);
880 /* len must be <= 8 and start must be a multiple of len */
881 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
888 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
889 cpu_single_env
->mem_write_vaddr
, len
,
891 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
895 p
= page_find(start
>> TARGET_PAGE_BITS
);
898 if (p
->code_bitmap
) {
899 offset
= start
& ~TARGET_PAGE_MASK
;
900 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
901 if (b
& ((1 << len
) - 1))
905 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
909 #if !defined(CONFIG_SOFTMMU)
910 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
911 unsigned long pc
, void *puc
)
913 int n
, current_flags
, current_tb_modified
;
914 target_ulong current_pc
, current_cs_base
;
916 TranslationBlock
*tb
, *current_tb
;
917 #ifdef TARGET_HAS_PRECISE_SMC
918 CPUState
*env
= cpu_single_env
;
921 addr
&= TARGET_PAGE_MASK
;
922 p
= page_find(addr
>> TARGET_PAGE_BITS
);
926 current_tb_modified
= 0;
928 current_pc
= 0; /* avoid warning */
929 current_cs_base
= 0; /* avoid warning */
930 current_flags
= 0; /* avoid warning */
931 #ifdef TARGET_HAS_PRECISE_SMC
933 current_tb
= tb_find_pc(pc
);
938 tb
= (TranslationBlock
*)((long)tb
& ~3);
939 #ifdef TARGET_HAS_PRECISE_SMC
940 if (current_tb
== tb
&&
941 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
942 /* If we are modifying the current TB, we must stop
943 its execution. We could be more precise by checking
944 that the modification is after the current PC, but it
945 would require a specialized function to partially
946 restore the CPU state */
948 current_tb_modified
= 1;
949 cpu_restore_state(current_tb
, env
, pc
, puc
);
950 #if defined(TARGET_I386)
951 current_flags
= env
->hflags
;
952 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
953 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
954 current_pc
= current_cs_base
+ env
->eip
;
956 #error unsupported CPU
959 #endif /* TARGET_HAS_PRECISE_SMC */
960 tb_phys_invalidate(tb
, addr
);
961 tb
= tb
->page_next
[n
];
964 #ifdef TARGET_HAS_PRECISE_SMC
965 if (current_tb_modified
) {
966 /* we generate a block containing just the instruction
967 modifying the memory. It will ensure that it cannot modify
969 env
->current_tb
= NULL
;
970 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
972 cpu_resume_from_signal(env
, puc
);
978 /* add the tb in the target page and protect it if necessary */
979 static inline void tb_alloc_page(TranslationBlock
*tb
,
980 unsigned int n
, target_ulong page_addr
)
983 TranslationBlock
*last_first_tb
;
985 tb
->page_addr
[n
] = page_addr
;
986 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
987 tb
->page_next
[n
] = p
->first_tb
;
988 last_first_tb
= p
->first_tb
;
989 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
990 invalidate_page_bitmap(p
);
992 #if defined(TARGET_HAS_SMC) || 1
994 #if defined(CONFIG_USER_ONLY)
995 if (p
->flags
& PAGE_WRITE
) {
1000 /* force the host page as non writable (writes will have a
1001 page fault + mprotect overhead) */
1002 page_addr
&= qemu_host_page_mask
;
1004 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1005 addr
+= TARGET_PAGE_SIZE
) {
1007 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1011 p2
->flags
&= ~PAGE_WRITE
;
1012 page_get_flags(addr
);
1014 mprotect(g2h(page_addr
), qemu_host_page_size
,
1015 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1016 #ifdef DEBUG_TB_INVALIDATE
1017 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1022 /* if some code is already present, then the pages are already
1023 protected. So we handle the case where only the first TB is
1024 allocated in a physical page */
1025 if (!last_first_tb
) {
1026 tlb_protect_code(page_addr
);
1030 #endif /* TARGET_HAS_SMC */
1033 /* Allocate a new translation block. Flush the translation buffer if
1034 too many translation blocks or too much generated code. */
1035 TranslationBlock
*tb_alloc(target_ulong pc
)
1037 TranslationBlock
*tb
;
1039 if (nb_tbs
>= code_gen_max_blocks
||
1040 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1042 tb
= &tbs
[nb_tbs
++];
1048 /* add a new TB and link it to the physical page tables. phys_page2 is
1049 (-1) to indicate that only one page contains the TB. */
1050 void tb_link_phys(TranslationBlock
*tb
,
1051 target_ulong phys_pc
, target_ulong phys_page2
)
1054 TranslationBlock
**ptb
;
1056 /* Grab the mmap lock to stop another thread invalidating this TB
1057 before we are done. */
1059 /* add in the physical hash table */
1060 h
= tb_phys_hash_func(phys_pc
);
1061 ptb
= &tb_phys_hash
[h
];
1062 tb
->phys_hash_next
= *ptb
;
1065 /* add in the page list */
1066 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1067 if (phys_page2
!= -1)
1068 tb_alloc_page(tb
, 1, phys_page2
);
1070 tb
->page_addr
[1] = -1;
1072 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1073 tb
->jmp_next
[0] = NULL
;
1074 tb
->jmp_next
[1] = NULL
;
1076 /* init original jump addresses */
1077 if (tb
->tb_next_offset
[0] != 0xffff)
1078 tb_reset_jump(tb
, 0);
1079 if (tb
->tb_next_offset
[1] != 0xffff)
1080 tb_reset_jump(tb
, 1);
1082 #ifdef DEBUG_TB_CHECK
1088 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1089 tb[1].tc_ptr. Return NULL if not found */
1090 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1092 int m_min
, m_max
, m
;
1094 TranslationBlock
*tb
;
1098 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1099 tc_ptr
>= (unsigned long)code_gen_ptr
)
1101 /* binary search (cf Knuth) */
1104 while (m_min
<= m_max
) {
1105 m
= (m_min
+ m_max
) >> 1;
1107 v
= (unsigned long)tb
->tc_ptr
;
1110 else if (tc_ptr
< v
) {
1119 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1121 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1123 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1126 tb1
= tb
->jmp_next
[n
];
1128 /* find head of list */
1131 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1134 tb1
= tb1
->jmp_next
[n1
];
1136 /* we are now sure now that tb jumps to tb1 */
1139 /* remove tb from the jmp_first list */
1140 ptb
= &tb_next
->jmp_first
;
1144 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1145 if (n1
== n
&& tb1
== tb
)
1147 ptb
= &tb1
->jmp_next
[n1
];
1149 *ptb
= tb
->jmp_next
[n
];
1150 tb
->jmp_next
[n
] = NULL
;
1152 /* suppress the jump to next tb in generated code */
1153 tb_reset_jump(tb
, n
);
1155 /* suppress jumps in the tb on which we could have jumped */
1156 tb_reset_jump_recursive(tb_next
);
1160 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1162 tb_reset_jump_recursive2(tb
, 0);
1163 tb_reset_jump_recursive2(tb
, 1);
1166 #if defined(TARGET_HAS_ICE)
1167 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1169 target_phys_addr_t addr
;
1171 ram_addr_t ram_addr
;
1174 addr
= cpu_get_phys_page_debug(env
, pc
);
1175 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1177 pd
= IO_MEM_UNASSIGNED
;
1179 pd
= p
->phys_offset
;
1181 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1182 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1186 /* Add a watchpoint. */
1187 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1191 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1192 if (addr
== env
->watchpoint
[i
].vaddr
)
1195 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1198 i
= env
->nb_watchpoints
++;
1199 env
->watchpoint
[i
].vaddr
= addr
;
1200 tlb_flush_page(env
, addr
);
1201 /* FIXME: This flush is needed because of the hack to make memory ops
1202 terminate the TB. It can be removed once the proper IO trap and
1203 re-execute bits are in. */
1208 /* Remove a watchpoint. */
1209 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1213 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1214 if (addr
== env
->watchpoint
[i
].vaddr
) {
1215 env
->nb_watchpoints
--;
1216 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1217 tlb_flush_page(env
, addr
);
1224 /* Remove all watchpoints. */
1225 void cpu_watchpoint_remove_all(CPUState
*env
) {
1228 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1229 tlb_flush_page(env
, env
->watchpoint
[i
].vaddr
);
1231 env
->nb_watchpoints
= 0;
1234 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1235 breakpoint is reached */
1236 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1238 #if defined(TARGET_HAS_ICE)
1241 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1242 if (env
->breakpoints
[i
] == pc
)
1246 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1248 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1250 breakpoint_invalidate(env
, pc
);
1257 /* remove all breakpoints */
1258 void cpu_breakpoint_remove_all(CPUState
*env
) {
1259 #if defined(TARGET_HAS_ICE)
1261 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1262 breakpoint_invalidate(env
, env
->breakpoints
[i
]);
1264 env
->nb_breakpoints
= 0;
1268 /* remove a breakpoint */
1269 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1271 #if defined(TARGET_HAS_ICE)
1273 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1274 if (env
->breakpoints
[i
] == pc
)
1279 env
->nb_breakpoints
--;
1280 if (i
< env
->nb_breakpoints
)
1281 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1283 breakpoint_invalidate(env
, pc
);
1290 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1291 CPU loop after each instruction */
1292 void cpu_single_step(CPUState
*env
, int enabled
)
1294 #if defined(TARGET_HAS_ICE)
1295 if (env
->singlestep_enabled
!= enabled
) {
1296 env
->singlestep_enabled
= enabled
;
1297 /* must flush all the translated code to avoid inconsistancies */
1298 /* XXX: only flush what is necessary */
1304 /* enable or disable low levels log */
1305 void cpu_set_log(int log_flags
)
1307 loglevel
= log_flags
;
1308 if (loglevel
&& !logfile
) {
1309 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1311 perror(logfilename
);
1314 #if !defined(CONFIG_SOFTMMU)
1315 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1317 static uint8_t logfile_buf
[4096];
1318 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1321 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1325 if (!loglevel
&& logfile
) {
1331 void cpu_set_log_filename(const char *filename
)
1333 logfilename
= strdup(filename
);
1338 cpu_set_log(loglevel
);
1341 /* mask must never be zero, except for A20 change call */
1342 void cpu_interrupt(CPUState
*env
, int mask
)
1344 #if !defined(USE_NPTL)
1345 TranslationBlock
*tb
;
1346 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1349 /* FIXME: This is probably not threadsafe. A different thread could
1350 be in the mittle of a read-modify-write operation. */
1351 env
->interrupt_request
|= mask
;
1352 #if defined(USE_NPTL)
1353 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1354 problem and hope the cpu will stop of its own accord. For userspace
1355 emulation this often isn't actually as bad as it sounds. Often
1356 signals are used primarily to interrupt blocking syscalls. */
1358 /* if the cpu is currently executing code, we must unlink it and
1359 all the potentially executing TB */
1360 tb
= env
->current_tb
;
1361 if (tb
&& !testandset(&interrupt_lock
)) {
1362 env
->current_tb
= NULL
;
1363 tb_reset_jump_recursive(tb
);
1364 resetlock(&interrupt_lock
);
1369 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1371 env
->interrupt_request
&= ~mask
;
1374 CPULogItem cpu_log_items
[] = {
1375 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1376 "show generated host assembly code for each compiled TB" },
1377 { CPU_LOG_TB_IN_ASM
, "in_asm",
1378 "show target assembly code for each compiled TB" },
1379 { CPU_LOG_TB_OP
, "op",
1380 "show micro ops for each compiled TB" },
1381 { CPU_LOG_TB_OP_OPT
, "op_opt",
1384 "before eflags optimization and "
1386 "after liveness analysis" },
1387 { CPU_LOG_INT
, "int",
1388 "show interrupts/exceptions in short format" },
1389 { CPU_LOG_EXEC
, "exec",
1390 "show trace before each executed TB (lots of logs)" },
1391 { CPU_LOG_TB_CPU
, "cpu",
1392 "show CPU state before block translation" },
1394 { CPU_LOG_PCALL
, "pcall",
1395 "show protected mode far calls/returns/exceptions" },
1398 { CPU_LOG_IOPORT
, "ioport",
1399 "show all i/o ports accesses" },
1404 static int cmp1(const char *s1
, int n
, const char *s2
)
1406 if (strlen(s2
) != n
)
1408 return memcmp(s1
, s2
, n
) == 0;
1411 /* takes a comma separated list of log masks. Return 0 if error. */
1412 int cpu_str_to_log_mask(const char *str
)
1421 p1
= strchr(p
, ',');
1424 if(cmp1(p
,p1
-p
,"all")) {
1425 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1429 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1430 if (cmp1(p
, p1
- p
, item
->name
))
1444 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1451 fprintf(stderr
, "qemu: fatal: ");
1452 vfprintf(stderr
, fmt
, ap
);
1453 fprintf(stderr
, "\n");
1455 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1457 cpu_dump_state(env
, stderr
, fprintf
, 0);
1460 fprintf(logfile
, "qemu: fatal: ");
1461 vfprintf(logfile
, fmt
, ap2
);
1462 fprintf(logfile
, "\n");
1464 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1466 cpu_dump_state(env
, logfile
, fprintf
, 0);
1476 CPUState
*cpu_copy(CPUState
*env
)
1478 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1479 /* preserve chaining and index */
1480 CPUState
*next_cpu
= new_env
->next_cpu
;
1481 int cpu_index
= new_env
->cpu_index
;
1482 memcpy(new_env
, env
, sizeof(CPUState
));
1483 new_env
->next_cpu
= next_cpu
;
1484 new_env
->cpu_index
= cpu_index
;
1488 #if !defined(CONFIG_USER_ONLY)
1490 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1494 /* Discard jump cache entries for any tb which might potentially
1495 overlap the flushed page. */
1496 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1497 memset (&env
->tb_jmp_cache
[i
], 0,
1498 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1500 i
= tb_jmp_cache_hash_page(addr
);
1501 memset (&env
->tb_jmp_cache
[i
], 0,
1502 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1505 /* NOTE: if flush_global is true, also flush global entries (not
1507 void tlb_flush(CPUState
*env
, int flush_global
)
1511 #if defined(DEBUG_TLB)
1512 printf("tlb_flush:\n");
1514 /* must reset current TB so that interrupts cannot modify the
1515 links while we are modifying them */
1516 env
->current_tb
= NULL
;
1518 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1519 env
->tlb_table
[0][i
].addr_read
= -1;
1520 env
->tlb_table
[0][i
].addr_write
= -1;
1521 env
->tlb_table
[0][i
].addr_code
= -1;
1522 env
->tlb_table
[1][i
].addr_read
= -1;
1523 env
->tlb_table
[1][i
].addr_write
= -1;
1524 env
->tlb_table
[1][i
].addr_code
= -1;
1525 #if (NB_MMU_MODES >= 3)
1526 env
->tlb_table
[2][i
].addr_read
= -1;
1527 env
->tlb_table
[2][i
].addr_write
= -1;
1528 env
->tlb_table
[2][i
].addr_code
= -1;
1529 #if (NB_MMU_MODES == 4)
1530 env
->tlb_table
[3][i
].addr_read
= -1;
1531 env
->tlb_table
[3][i
].addr_write
= -1;
1532 env
->tlb_table
[3][i
].addr_code
= -1;
1537 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1539 #if !defined(CONFIG_SOFTMMU)
1540 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1543 if (env
->kqemu_enabled
) {
1544 kqemu_flush(env
, flush_global
);
1550 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1552 if (addr
== (tlb_entry
->addr_read
&
1553 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1554 addr
== (tlb_entry
->addr_write
&
1555 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1556 addr
== (tlb_entry
->addr_code
&
1557 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1558 tlb_entry
->addr_read
= -1;
1559 tlb_entry
->addr_write
= -1;
1560 tlb_entry
->addr_code
= -1;
1564 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1568 #if defined(DEBUG_TLB)
1569 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1571 /* must reset current TB so that interrupts cannot modify the
1572 links while we are modifying them */
1573 env
->current_tb
= NULL
;
1575 addr
&= TARGET_PAGE_MASK
;
1576 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1577 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1578 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1579 #if (NB_MMU_MODES >= 3)
1580 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1581 #if (NB_MMU_MODES == 4)
1582 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1586 tlb_flush_jmp_cache(env
, addr
);
1588 #if !defined(CONFIG_SOFTMMU)
1589 if (addr
< MMAP_AREA_END
)
1590 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1593 if (env
->kqemu_enabled
) {
1594 kqemu_flush_page(env
, addr
);
1599 /* update the TLBs so that writes to code in the virtual page 'addr'
1601 static void tlb_protect_code(ram_addr_t ram_addr
)
1603 cpu_physical_memory_reset_dirty(ram_addr
,
1604 ram_addr
+ TARGET_PAGE_SIZE
,
1608 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1609 tested for self modifying code */
1610 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1613 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1616 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1617 unsigned long start
, unsigned long length
)
1620 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1621 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1622 if ((addr
- start
) < length
) {
1623 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1628 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1632 unsigned long length
, start1
;
1636 start
&= TARGET_PAGE_MASK
;
1637 end
= TARGET_PAGE_ALIGN(end
);
1639 length
= end
- start
;
1642 len
= length
>> TARGET_PAGE_BITS
;
1644 /* XXX: should not depend on cpu context */
1646 if (env
->kqemu_enabled
) {
1649 for(i
= 0; i
< len
; i
++) {
1650 kqemu_set_notdirty(env
, addr
);
1651 addr
+= TARGET_PAGE_SIZE
;
1655 mask
= ~dirty_flags
;
1656 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1657 for(i
= 0; i
< len
; i
++)
1660 /* we modify the TLB cache so that the dirty bit will be set again
1661 when accessing the range */
1662 start1
= start
+ (unsigned long)phys_ram_base
;
1663 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1664 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1665 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1666 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1667 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1668 #if (NB_MMU_MODES >= 3)
1669 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1670 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1671 #if (NB_MMU_MODES == 4)
1672 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1673 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1678 #if !defined(CONFIG_SOFTMMU)
1679 /* XXX: this is expensive */
1685 for(i
= 0; i
< L1_SIZE
; i
++) {
1688 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1689 for(j
= 0; j
< L2_SIZE
; j
++) {
1690 if (p
->valid_tag
== virt_valid_tag
&&
1691 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1692 (p
->prot
& PROT_WRITE
)) {
1693 if (addr
< MMAP_AREA_END
) {
1694 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1695 p
->prot
& ~PROT_WRITE
);
1698 addr
+= TARGET_PAGE_SIZE
;
1707 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1709 ram_addr_t ram_addr
;
1711 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1712 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1713 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1714 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1715 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1720 /* update the TLB according to the current state of the dirty bits */
1721 void cpu_tlb_update_dirty(CPUState
*env
)
1724 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1725 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1726 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1727 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1728 #if (NB_MMU_MODES >= 3)
1729 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1730 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1731 #if (NB_MMU_MODES == 4)
1732 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1733 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1738 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1739 unsigned long start
)
1742 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1743 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1744 if (addr
== start
) {
1745 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1750 /* update the TLB corresponding to virtual page vaddr and phys addr
1751 addr so that it is no longer dirty */
1752 static inline void tlb_set_dirty(CPUState
*env
,
1753 unsigned long addr
, target_ulong vaddr
)
1757 addr
&= TARGET_PAGE_MASK
;
1758 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1759 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1760 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1761 #if (NB_MMU_MODES >= 3)
1762 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1763 #if (NB_MMU_MODES == 4)
1764 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1769 /* add a new TLB entry. At most one entry for a given virtual address
1770 is permitted. Return 0 if OK or 2 if the page could not be mapped
1771 (can only happen in non SOFTMMU mode for I/O pages or pages
1772 conflicting with the host address space). */
1773 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1774 target_phys_addr_t paddr
, int prot
,
1775 int mmu_idx
, int is_softmmu
)
1780 target_ulong address
;
1781 target_phys_addr_t addend
;
1786 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1788 pd
= IO_MEM_UNASSIGNED
;
1790 pd
= p
->phys_offset
;
1792 #if defined(DEBUG_TLB)
1793 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1794 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1798 #if !defined(CONFIG_SOFTMMU)
1802 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1803 /* IO memory case */
1804 address
= vaddr
| pd
;
1807 /* standard memory */
1809 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1812 /* Make accesses to pages with watchpoints go via the
1813 watchpoint trap routines. */
1814 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1815 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1816 if (address
& ~TARGET_PAGE_MASK
) {
1817 env
->watchpoint
[i
].addend
= 0;
1818 address
= vaddr
| io_mem_watch
;
1820 env
->watchpoint
[i
].addend
= pd
- paddr
+
1821 (unsigned long) phys_ram_base
;
1822 /* TODO: Figure out how to make read watchpoints coexist
1824 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1829 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1831 te
= &env
->tlb_table
[mmu_idx
][index
];
1832 te
->addend
= addend
;
1833 if (prot
& PAGE_READ
) {
1834 te
->addr_read
= address
;
1839 if (prot
& PAGE_EXEC
) {
1840 te
->addr_code
= address
;
1844 if (prot
& PAGE_WRITE
) {
1845 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1846 (pd
& IO_MEM_ROMD
)) {
1847 /* write access calls the I/O callback */
1848 te
->addr_write
= vaddr
|
1849 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1850 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1851 !cpu_physical_memory_is_dirty(pd
)) {
1852 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1854 te
->addr_write
= address
;
1857 te
->addr_write
= -1;
1860 #if !defined(CONFIG_SOFTMMU)
1862 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1863 /* IO access: no mapping is done as it will be handled by the
1865 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1870 if (vaddr
>= MMAP_AREA_END
) {
1873 if (prot
& PROT_WRITE
) {
1874 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1875 #if defined(TARGET_HAS_SMC) || 1
1878 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1879 !cpu_physical_memory_is_dirty(pd
))) {
1880 /* ROM: we do as if code was inside */
1881 /* if code is present, we only map as read only and save the
1885 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1888 vp
->valid_tag
= virt_valid_tag
;
1889 prot
&= ~PAGE_WRITE
;
1892 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1893 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1894 if (map_addr
== MAP_FAILED
) {
1895 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1905 /* called from signal handler: invalidate the code and unprotect the
1906 page. Return TRUE if the fault was succesfully handled. */
1907 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1909 #if !defined(CONFIG_SOFTMMU)
1912 #if defined(DEBUG_TLB)
1913 printf("page_unprotect: addr=0x%08x\n", addr
);
1915 addr
&= TARGET_PAGE_MASK
;
1917 /* if it is not mapped, no need to worry here */
1918 if (addr
>= MMAP_AREA_END
)
1920 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1923 /* NOTE: in this case, validate_tag is _not_ tested as it
1924 validates only the code TLB */
1925 if (vp
->valid_tag
!= virt_valid_tag
)
1927 if (!(vp
->prot
& PAGE_WRITE
))
1929 #if defined(DEBUG_TLB)
1930 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1931 addr
, vp
->phys_addr
, vp
->prot
);
1933 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1934 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1935 (unsigned long)addr
, vp
->prot
);
1936 /* set the dirty bit */
1937 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1938 /* flush the code inside */
1939 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1948 void tlb_flush(CPUState
*env
, int flush_global
)
1952 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1956 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1957 target_phys_addr_t paddr
, int prot
,
1958 int mmu_idx
, int is_softmmu
)
1963 /* dump memory mappings */
1964 void page_dump(FILE *f
)
1966 unsigned long start
, end
;
1967 int i
, j
, prot
, prot1
;
1970 fprintf(f
, "%-8s %-8s %-8s %s\n",
1971 "start", "end", "size", "prot");
1975 for(i
= 0; i
<= L1_SIZE
; i
++) {
1980 for(j
= 0;j
< L2_SIZE
; j
++) {
1985 if (prot1
!= prot
) {
1986 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1988 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1989 start
, end
, end
- start
,
1990 prot
& PAGE_READ
? 'r' : '-',
1991 prot
& PAGE_WRITE
? 'w' : '-',
1992 prot
& PAGE_EXEC
? 'x' : '-');
2006 int page_get_flags(target_ulong address
)
2010 p
= page_find(address
>> TARGET_PAGE_BITS
);
2016 /* modify the flags of a page and invalidate the code if
2017 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2018 depending on PAGE_WRITE */
2019 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2024 /* mmap_lock should already be held. */
2025 start
= start
& TARGET_PAGE_MASK
;
2026 end
= TARGET_PAGE_ALIGN(end
);
2027 if (flags
& PAGE_WRITE
)
2028 flags
|= PAGE_WRITE_ORG
;
2029 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2030 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2031 /* if the write protection is set, then we invalidate the code
2033 if (!(p
->flags
& PAGE_WRITE
) &&
2034 (flags
& PAGE_WRITE
) &&
2036 tb_invalidate_phys_page(addr
, 0, NULL
);
2042 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2048 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2049 start
= start
& TARGET_PAGE_MASK
;
2052 /* we've wrapped around */
2054 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2055 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2058 if( !(p
->flags
& PAGE_VALID
) )
2061 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2063 if (flags
& PAGE_WRITE
) {
2064 if (!(p
->flags
& PAGE_WRITE_ORG
))
2066 /* unprotect the page if it was put read-only because it
2067 contains translated code */
2068 if (!(p
->flags
& PAGE_WRITE
)) {
2069 if (!page_unprotect(addr
, 0, NULL
))
2078 /* called from signal handler: invalidate the code and unprotect the
2079 page. Return TRUE if the fault was succesfully handled. */
2080 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2082 unsigned int page_index
, prot
, pindex
;
2084 target_ulong host_start
, host_end
, addr
;
2086 /* Technically this isn't safe inside a signal handler. However we
2087 know this only ever happens in a synchronous SEGV handler, so in
2088 practice it seems to be ok. */
2091 host_start
= address
& qemu_host_page_mask
;
2092 page_index
= host_start
>> TARGET_PAGE_BITS
;
2093 p1
= page_find(page_index
);
2098 host_end
= host_start
+ qemu_host_page_size
;
2101 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2105 /* if the page was really writable, then we change its
2106 protection back to writable */
2107 if (prot
& PAGE_WRITE_ORG
) {
2108 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2109 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2110 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2111 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2112 p1
[pindex
].flags
|= PAGE_WRITE
;
2113 /* and since the content will be modified, we must invalidate
2114 the corresponding translated code. */
2115 tb_invalidate_phys_page(address
, pc
, puc
);
2116 #ifdef DEBUG_TB_CHECK
2117 tb_invalidate_check(address
);
2127 static inline void tlb_set_dirty(CPUState
*env
,
2128 unsigned long addr
, target_ulong vaddr
)
2131 #endif /* defined(CONFIG_USER_ONLY) */
2133 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2135 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2136 ram_addr_t orig_memory
);
2137 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2140 if (addr > start_addr) \
2143 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2144 if (start_addr2 > 0) \
2148 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2149 end_addr2 = TARGET_PAGE_SIZE - 1; \
2151 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2152 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2157 /* register physical memory. 'size' must be a multiple of the target
2158 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2160 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2162 ram_addr_t phys_offset
)
2164 target_phys_addr_t addr
, end_addr
;
2167 ram_addr_t orig_size
= size
;
2171 /* XXX: should not depend on cpu context */
2173 if (env
->kqemu_enabled
) {
2174 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2177 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2178 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2179 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2180 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2181 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2182 ram_addr_t orig_memory
= p
->phys_offset
;
2183 target_phys_addr_t start_addr2
, end_addr2
;
2184 int need_subpage
= 0;
2186 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2188 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2189 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2190 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2191 &p
->phys_offset
, orig_memory
);
2193 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2196 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2198 p
->phys_offset
= phys_offset
;
2199 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2200 (phys_offset
& IO_MEM_ROMD
))
2201 phys_offset
+= TARGET_PAGE_SIZE
;
2204 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2205 p
->phys_offset
= phys_offset
;
2206 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2207 (phys_offset
& IO_MEM_ROMD
))
2208 phys_offset
+= TARGET_PAGE_SIZE
;
2210 target_phys_addr_t start_addr2
, end_addr2
;
2211 int need_subpage
= 0;
2213 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2214 end_addr2
, need_subpage
);
2216 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2217 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2218 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2219 subpage_register(subpage
, start_addr2
, end_addr2
,
2226 /* since each CPU stores ram addresses in its TLB cache, we must
2227 reset the modified entries */
2229 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2234 /* XXX: temporary until new memory mapping API */
2235 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2239 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2241 return IO_MEM_UNASSIGNED
;
2242 return p
->phys_offset
;
2245 /* XXX: better than nothing */
2246 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2249 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2250 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
"\n",
2251 (uint64_t)size
, (uint64_t)phys_ram_size
);
2254 addr
= phys_ram_alloc_offset
;
2255 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2259 void qemu_ram_free(ram_addr_t addr
)
2263 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2265 #ifdef DEBUG_UNASSIGNED
2266 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2269 do_unassigned_access(addr
, 0, 0, 0);
2271 do_unassigned_access(addr
, 0, 0, 0);
2276 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2278 #ifdef DEBUG_UNASSIGNED
2279 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2282 do_unassigned_access(addr
, 1, 0, 0);
2284 do_unassigned_access(addr
, 1, 0, 0);
2288 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2289 unassigned_mem_readb
,
2290 unassigned_mem_readb
,
2291 unassigned_mem_readb
,
2294 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2295 unassigned_mem_writeb
,
2296 unassigned_mem_writeb
,
2297 unassigned_mem_writeb
,
2300 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2302 unsigned long ram_addr
;
2304 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2305 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2306 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2307 #if !defined(CONFIG_USER_ONLY)
2308 tb_invalidate_phys_page_fast(ram_addr
, 1);
2309 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2312 stb_p((uint8_t *)(long)addr
, val
);
2314 if (cpu_single_env
->kqemu_enabled
&&
2315 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2316 kqemu_modify_page(cpu_single_env
, ram_addr
);
2318 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2319 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2320 /* we remove the notdirty callback only if the code has been
2322 if (dirty_flags
== 0xff)
2323 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2326 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2328 unsigned long ram_addr
;
2330 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2331 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2332 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2333 #if !defined(CONFIG_USER_ONLY)
2334 tb_invalidate_phys_page_fast(ram_addr
, 2);
2335 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2338 stw_p((uint8_t *)(long)addr
, val
);
2340 if (cpu_single_env
->kqemu_enabled
&&
2341 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2342 kqemu_modify_page(cpu_single_env
, ram_addr
);
2344 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2345 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2346 /* we remove the notdirty callback only if the code has been
2348 if (dirty_flags
== 0xff)
2349 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2352 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2354 unsigned long ram_addr
;
2356 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2357 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2358 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2359 #if !defined(CONFIG_USER_ONLY)
2360 tb_invalidate_phys_page_fast(ram_addr
, 4);
2361 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2364 stl_p((uint8_t *)(long)addr
, val
);
2366 if (cpu_single_env
->kqemu_enabled
&&
2367 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2368 kqemu_modify_page(cpu_single_env
, ram_addr
);
2370 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2371 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2372 /* we remove the notdirty callback only if the code has been
2374 if (dirty_flags
== 0xff)
2375 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2378 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2379 NULL
, /* never used */
2380 NULL
, /* never used */
2381 NULL
, /* never used */
2384 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2385 notdirty_mem_writeb
,
2386 notdirty_mem_writew
,
2387 notdirty_mem_writel
,
2390 #if defined(CONFIG_SOFTMMU)
2391 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2392 so these check for a hit then pass through to the normal out-of-line
2394 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2396 return ldub_phys(addr
);
2399 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2401 return lduw_phys(addr
);
2404 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2406 return ldl_phys(addr
);
2409 /* Generate a debug exception if a watchpoint has been hit.
2410 Returns the real physical address of the access. addr will be a host
2411 address in case of a RAM location. */
2412 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2414 CPUState
*env
= cpu_single_env
;
2416 target_ulong retaddr
;
2420 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2421 watch
= env
->watchpoint
[i
].vaddr
;
2422 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2423 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2424 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2425 cpu_single_env
->watchpoint_hit
= i
+ 1;
2426 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2434 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2437 addr
= check_watchpoint(addr
);
2438 stb_phys(addr
, val
);
2441 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2444 addr
= check_watchpoint(addr
);
2445 stw_phys(addr
, val
);
2448 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2451 addr
= check_watchpoint(addr
);
2452 stl_phys(addr
, val
);
2455 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2461 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2468 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2474 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2475 #if defined(DEBUG_SUBPAGE)
2476 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2477 mmio
, len
, addr
, idx
);
2479 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2484 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2485 uint32_t value
, unsigned int len
)
2489 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2490 #if defined(DEBUG_SUBPAGE)
2491 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2492 mmio
, len
, addr
, idx
, value
);
2494 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2497 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2499 #if defined(DEBUG_SUBPAGE)
2500 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2503 return subpage_readlen(opaque
, addr
, 0);
2506 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2509 #if defined(DEBUG_SUBPAGE)
2510 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2512 subpage_writelen(opaque
, addr
, value
, 0);
2515 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2517 #if defined(DEBUG_SUBPAGE)
2518 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2521 return subpage_readlen(opaque
, addr
, 1);
2524 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2527 #if defined(DEBUG_SUBPAGE)
2528 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2530 subpage_writelen(opaque
, addr
, value
, 1);
2533 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2535 #if defined(DEBUG_SUBPAGE)
2536 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2539 return subpage_readlen(opaque
, addr
, 2);
2542 static void subpage_writel (void *opaque
,
2543 target_phys_addr_t addr
, uint32_t value
)
2545 #if defined(DEBUG_SUBPAGE)
2546 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2548 subpage_writelen(opaque
, addr
, value
, 2);
2551 static CPUReadMemoryFunc
*subpage_read
[] = {
2557 static CPUWriteMemoryFunc
*subpage_write
[] = {
2563 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2569 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2571 idx
= SUBPAGE_IDX(start
);
2572 eidx
= SUBPAGE_IDX(end
);
2573 #if defined(DEBUG_SUBPAGE)
2574 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2575 mmio
, start
, end
, idx
, eidx
, memory
);
2577 memory
>>= IO_MEM_SHIFT
;
2578 for (; idx
<= eidx
; idx
++) {
2579 for (i
= 0; i
< 4; i
++) {
2580 if (io_mem_read
[memory
][i
]) {
2581 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2582 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2584 if (io_mem_write
[memory
][i
]) {
2585 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2586 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2594 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2595 ram_addr_t orig_memory
)
2600 mmio
= qemu_mallocz(sizeof(subpage_t
));
2603 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2604 #if defined(DEBUG_SUBPAGE)
2605 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2606 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2608 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2609 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2615 static void io_mem_init(void)
2617 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2618 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2619 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2622 #if defined(CONFIG_SOFTMMU)
2623 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2624 watch_mem_write
, NULL
);
2626 /* alloc dirty bits array */
2627 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2628 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2631 /* mem_read and mem_write are arrays of functions containing the
2632 function to access byte (index 0), word (index 1) and dword (index
2633 2). Functions can be omitted with a NULL function pointer. The
2634 registered functions may be modified dynamically later.
2635 If io_index is non zero, the corresponding io zone is
2636 modified. If it is zero, a new io zone is allocated. The return
2637 value can be used with cpu_register_physical_memory(). (-1) is
2638 returned if error. */
2639 int cpu_register_io_memory(int io_index
,
2640 CPUReadMemoryFunc
**mem_read
,
2641 CPUWriteMemoryFunc
**mem_write
,
2644 int i
, subwidth
= 0;
2646 if (io_index
<= 0) {
2647 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2649 io_index
= io_mem_nb
++;
2651 if (io_index
>= IO_MEM_NB_ENTRIES
)
2655 for(i
= 0;i
< 3; i
++) {
2656 if (!mem_read
[i
] || !mem_write
[i
])
2657 subwidth
= IO_MEM_SUBWIDTH
;
2658 io_mem_read
[io_index
][i
] = mem_read
[i
];
2659 io_mem_write
[io_index
][i
] = mem_write
[i
];
2661 io_mem_opaque
[io_index
] = opaque
;
2662 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2665 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2667 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2670 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2672 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2675 /* physical memory access (slow version, mainly for debug) */
2676 #if defined(CONFIG_USER_ONLY)
2677 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2678 int len
, int is_write
)
2685 page
= addr
& TARGET_PAGE_MASK
;
2686 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2689 flags
= page_get_flags(page
);
2690 if (!(flags
& PAGE_VALID
))
2693 if (!(flags
& PAGE_WRITE
))
2695 /* XXX: this code should not depend on lock_user */
2696 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2697 /* FIXME - should this return an error rather than just fail? */
2700 unlock_user(p
, addr
, l
);
2702 if (!(flags
& PAGE_READ
))
2704 /* XXX: this code should not depend on lock_user */
2705 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2706 /* FIXME - should this return an error rather than just fail? */
2709 unlock_user(p
, addr
, 0);
2718 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2719 int len
, int is_write
)
2724 target_phys_addr_t page
;
2729 page
= addr
& TARGET_PAGE_MASK
;
2730 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2733 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2735 pd
= IO_MEM_UNASSIGNED
;
2737 pd
= p
->phys_offset
;
2741 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2742 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2743 /* XXX: could force cpu_single_env to NULL to avoid
2745 if (l
>= 4 && ((addr
& 3) == 0)) {
2746 /* 32 bit write access */
2748 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2750 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2751 /* 16 bit write access */
2753 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2756 /* 8 bit write access */
2758 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2762 unsigned long addr1
;
2763 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2765 ptr
= phys_ram_base
+ addr1
;
2766 memcpy(ptr
, buf
, l
);
2767 if (!cpu_physical_memory_is_dirty(addr1
)) {
2768 /* invalidate code */
2769 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2771 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2772 (0xff & ~CODE_DIRTY_FLAG
);
2776 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2777 !(pd
& IO_MEM_ROMD
)) {
2779 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2780 if (l
>= 4 && ((addr
& 3) == 0)) {
2781 /* 32 bit read access */
2782 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2785 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2786 /* 16 bit read access */
2787 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2791 /* 8 bit read access */
2792 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2798 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2799 (addr
& ~TARGET_PAGE_MASK
);
2800 memcpy(buf
, ptr
, l
);
2809 /* used for ROM loading : can write in RAM and ROM */
2810 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2811 const uint8_t *buf
, int len
)
2815 target_phys_addr_t page
;
2820 page
= addr
& TARGET_PAGE_MASK
;
2821 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2824 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2826 pd
= IO_MEM_UNASSIGNED
;
2828 pd
= p
->phys_offset
;
2831 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2832 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2833 !(pd
& IO_MEM_ROMD
)) {
2836 unsigned long addr1
;
2837 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2839 ptr
= phys_ram_base
+ addr1
;
2840 memcpy(ptr
, buf
, l
);
2849 /* warning: addr must be aligned */
2850 uint32_t ldl_phys(target_phys_addr_t addr
)
2858 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2860 pd
= IO_MEM_UNASSIGNED
;
2862 pd
= p
->phys_offset
;
2865 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2866 !(pd
& IO_MEM_ROMD
)) {
2868 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2869 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2872 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2873 (addr
& ~TARGET_PAGE_MASK
);
2879 /* warning: addr must be aligned */
2880 uint64_t ldq_phys(target_phys_addr_t addr
)
2888 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2890 pd
= IO_MEM_UNASSIGNED
;
2892 pd
= p
->phys_offset
;
2895 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2896 !(pd
& IO_MEM_ROMD
)) {
2898 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2899 #ifdef TARGET_WORDS_BIGENDIAN
2900 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2901 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2903 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2904 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2908 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2909 (addr
& ~TARGET_PAGE_MASK
);
2916 uint32_t ldub_phys(target_phys_addr_t addr
)
2919 cpu_physical_memory_read(addr
, &val
, 1);
2924 uint32_t lduw_phys(target_phys_addr_t addr
)
2927 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2928 return tswap16(val
);
2931 /* warning: addr must be aligned. The ram page is not masked as dirty
2932 and the code inside is not invalidated. It is useful if the dirty
2933 bits are used to track modified PTEs */
2934 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2941 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2943 pd
= IO_MEM_UNASSIGNED
;
2945 pd
= p
->phys_offset
;
2948 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2949 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2950 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2952 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2953 (addr
& ~TARGET_PAGE_MASK
);
2958 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2965 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2967 pd
= IO_MEM_UNASSIGNED
;
2969 pd
= p
->phys_offset
;
2972 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2973 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2974 #ifdef TARGET_WORDS_BIGENDIAN
2975 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2976 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2978 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2979 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2982 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2983 (addr
& ~TARGET_PAGE_MASK
);
2988 /* warning: addr must be aligned */
2989 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2996 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2998 pd
= IO_MEM_UNASSIGNED
;
3000 pd
= p
->phys_offset
;
3003 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3004 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3005 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3007 unsigned long addr1
;
3008 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3010 ptr
= phys_ram_base
+ addr1
;
3012 if (!cpu_physical_memory_is_dirty(addr1
)) {
3013 /* invalidate code */
3014 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3016 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3017 (0xff & ~CODE_DIRTY_FLAG
);
3023 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3026 cpu_physical_memory_write(addr
, &v
, 1);
3030 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3032 uint16_t v
= tswap16(val
);
3033 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3037 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3040 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3045 /* virtual memory access for debug */
3046 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3047 uint8_t *buf
, int len
, int is_write
)
3050 target_phys_addr_t phys_addr
;
3054 page
= addr
& TARGET_PAGE_MASK
;
3055 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3056 /* if no physical page mapped, return an error */
3057 if (phys_addr
== -1)
3059 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3062 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3071 void dump_exec_info(FILE *f
,
3072 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3074 int i
, target_code_size
, max_target_code_size
;
3075 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3076 TranslationBlock
*tb
;
3078 target_code_size
= 0;
3079 max_target_code_size
= 0;
3081 direct_jmp_count
= 0;
3082 direct_jmp2_count
= 0;
3083 for(i
= 0; i
< nb_tbs
; i
++) {
3085 target_code_size
+= tb
->size
;
3086 if (tb
->size
> max_target_code_size
)
3087 max_target_code_size
= tb
->size
;
3088 if (tb
->page_addr
[1] != -1)
3090 if (tb
->tb_next_offset
[0] != 0xffff) {
3092 if (tb
->tb_next_offset
[1] != 0xffff) {
3093 direct_jmp2_count
++;
3097 /* XXX: avoid using doubles ? */
3098 cpu_fprintf(f
, "Translation buffer state:\n");
3099 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3100 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3101 cpu_fprintf(f
, "TB count %d/%d\n",
3102 nb_tbs
, code_gen_max_blocks
);
3103 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3104 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3105 max_target_code_size
);
3106 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3107 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3108 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3109 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3111 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3112 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3114 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3116 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3117 cpu_fprintf(f
, "\nStatistics:\n");
3118 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3119 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3120 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3121 tcg_dump_info(f
, cpu_fprintf
);
3124 #if !defined(CONFIG_USER_ONLY)
3126 #define MMUSUFFIX _cmmu
3127 #define GETPC() NULL
3128 #define env cpu_single_env
3129 #define SOFTMMU_CODE_ACCESS
3132 #include "softmmu_template.h"
3135 #include "softmmu_template.h"
3138 #include "softmmu_template.h"
3141 #include "softmmu_template.h"