2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
43 #if defined(CONFIG_USER_ONLY)
47 //#define DEBUG_TB_INVALIDATE
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 static TranslationBlock
*tbs
;
88 int code_gen_max_blocks
;
89 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
102 #define code_gen_section \
103 __attribute__((aligned (32)))
106 uint8_t code_gen_prologue
[1024] code_gen_section
;
107 static uint8_t *code_gen_buffer
;
108 static unsigned long code_gen_buffer_size
;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size
;
111 uint8_t *code_gen_ptr
;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size
;
116 uint8_t *phys_ram_base
;
117 uint8_t *phys_ram_dirty
;
118 static int in_migration
;
119 static ram_addr_t phys_ram_alloc_offset
= 0;
123 /* current CPU in the current thread. It is only valid inside
125 CPUState
*cpu_single_env
;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 typedef struct PhysPageDesc
{
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset
;
149 ram_addr_t region_offset
;
153 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154 /* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
158 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
160 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
163 #define L1_SIZE (1 << L1_BITS)
164 #define L2_SIZE (1 << L2_BITS)
166 unsigned long qemu_real_host_page_size
;
167 unsigned long qemu_host_page_bits
;
168 unsigned long qemu_host_page_size
;
169 unsigned long qemu_host_page_mask
;
171 /* XXX: for system emulation, it could just be an array */
172 static PageDesc
*l1_map
[L1_SIZE
];
173 static PhysPageDesc
**l1_phys_map
;
175 #if !defined(CONFIG_USER_ONLY)
176 static void io_mem_init(void);
178 /* io memory support */
179 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
180 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
181 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
182 static int io_mem_nb
;
183 static int io_mem_watch
;
187 static const char *logfilename
= "/tmp/qemu.log";
190 static int log_append
= 0;
193 static int tlb_flush_count
;
194 static int tb_flush_count
;
195 static int tb_phys_invalidate_count
;
197 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198 typedef struct subpage_t
{
199 target_phys_addr_t base
;
200 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
201 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
202 void *opaque
[TARGET_PAGE_SIZE
][2][4];
203 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
207 static void map_exec(void *addr
, long size
)
210 VirtualProtect(addr
, size
,
211 PAGE_EXECUTE_READWRITE
, &old_protect
);
215 static void map_exec(void *addr
, long size
)
217 unsigned long start
, end
, page_size
;
219 page_size
= getpagesize();
220 start
= (unsigned long)addr
;
221 start
&= ~(page_size
- 1);
223 end
= (unsigned long)addr
+ size
;
224 end
+= page_size
- 1;
225 end
&= ~(page_size
- 1);
227 mprotect((void *)start
, end
- start
,
228 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
232 static void page_init(void)
234 /* NOTE: we can always suppose that qemu_host_page_size >=
238 SYSTEM_INFO system_info
;
240 GetSystemInfo(&system_info
);
241 qemu_real_host_page_size
= system_info
.dwPageSize
;
244 qemu_real_host_page_size
= getpagesize();
246 if (qemu_host_page_size
== 0)
247 qemu_host_page_size
= qemu_real_host_page_size
;
248 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
249 qemu_host_page_size
= TARGET_PAGE_SIZE
;
250 qemu_host_page_bits
= 0;
251 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
252 qemu_host_page_bits
++;
253 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
254 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
255 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
257 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
259 long long startaddr
, endaddr
;
264 last_brk
= (unsigned long)sbrk(0);
265 f
= fopen("/proc/self/maps", "r");
268 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
270 startaddr
= MIN(startaddr
,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
272 endaddr
= MIN(endaddr
,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
274 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
275 TARGET_PAGE_ALIGN(endaddr
),
286 static inline PageDesc
**page_l1_map(target_ulong index
)
288 #if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
291 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
294 return &l1_map
[index
>> L2_BITS
];
297 static inline PageDesc
*page_find_alloc(target_ulong index
)
300 lp
= page_l1_map(index
);
306 /* allocate if not found */
307 #if defined(CONFIG_USER_ONLY)
309 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
310 /* Don't use qemu_malloc because it may recurse. */
311 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
312 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
315 if (addr
== (target_ulong
)addr
) {
316 page_set_flags(addr
& TARGET_PAGE_MASK
,
317 TARGET_PAGE_ALIGN(addr
+ len
),
321 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
325 return p
+ (index
& (L2_SIZE
- 1));
328 static inline PageDesc
*page_find(target_ulong index
)
331 lp
= page_l1_map(index
);
338 return p
+ (index
& (L2_SIZE
- 1));
341 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
346 p
= (void **)l1_phys_map
;
347 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
349 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
350 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
352 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
355 /* allocate if not found */
358 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
359 memset(p
, 0, sizeof(void *) * L1_SIZE
);
363 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
367 /* allocate if not found */
370 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
372 for (i
= 0; i
< L2_SIZE
; i
++)
373 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
375 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
378 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
380 return phys_page_find_alloc(index
, 0);
383 #if !defined(CONFIG_USER_ONLY)
384 static void tlb_protect_code(ram_addr_t ram_addr
);
385 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
387 #define mmap_lock() do { } while(0)
388 #define mmap_unlock() do { } while(0)
391 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
393 #if defined(CONFIG_USER_ONLY)
394 /* Currently it is not recommanded to allocate big chunks of data in
395 user mode. It will change when a dedicated libc will be used */
396 #define USE_STATIC_CODE_GEN_BUFFER
399 #ifdef USE_STATIC_CODE_GEN_BUFFER
400 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
403 static void code_gen_alloc(unsigned long tb_size
)
405 #ifdef USE_STATIC_CODE_GEN_BUFFER
406 code_gen_buffer
= static_code_gen_buffer
;
407 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
408 map_exec(code_gen_buffer
, code_gen_buffer_size
);
410 code_gen_buffer_size
= tb_size
;
411 if (code_gen_buffer_size
== 0) {
412 #if defined(CONFIG_USER_ONLY)
413 /* in user mode, phys_ram_size is not meaningful */
414 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
416 /* XXX: needs ajustments */
417 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
420 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
421 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
422 /* The code gen buffer location may have constraints depending on
423 the host cpu and OS */
424 #if defined(__linux__)
429 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
430 #if defined(__x86_64__)
432 /* Cannot map more than that */
433 if (code_gen_buffer_size
> (800 * 1024 * 1024))
434 code_gen_buffer_size
= (800 * 1024 * 1024);
435 #elif defined(__sparc_v9__)
436 // Map the buffer below 2G, so we can use direct calls and branches
438 start
= (void *) 0x60000000UL
;
439 if (code_gen_buffer_size
> (512 * 1024 * 1024))
440 code_gen_buffer_size
= (512 * 1024 * 1024);
441 #elif defined(__arm__)
442 /* Map the buffer below 32M, so we can use direct calls and branches */
444 start
= (void *) 0x01000000UL
;
445 if (code_gen_buffer_size
> 16 * 1024 * 1024)
446 code_gen_buffer_size
= 16 * 1024 * 1024;
448 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
449 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
451 if (code_gen_buffer
== MAP_FAILED
) {
452 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
456 #elif defined(__FreeBSD__)
460 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
461 #if defined(__x86_64__)
462 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
463 * 0x40000000 is free */
465 addr
= (void *)0x40000000;
466 /* Cannot map more than that */
467 if (code_gen_buffer_size
> (800 * 1024 * 1024))
468 code_gen_buffer_size
= (800 * 1024 * 1024);
470 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
471 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
473 if (code_gen_buffer
== MAP_FAILED
) {
474 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
479 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
480 if (!code_gen_buffer
) {
481 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
484 map_exec(code_gen_buffer
, code_gen_buffer_size
);
486 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
487 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
488 code_gen_buffer_max_size
= code_gen_buffer_size
-
489 code_gen_max_block_size();
490 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
491 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
494 /* Must be called before using the QEMU cpus. 'tb_size' is the size
495 (in bytes) allocated to the translation buffer. Zero means default
497 void cpu_exec_init_all(unsigned long tb_size
)
500 code_gen_alloc(tb_size
);
501 code_gen_ptr
= code_gen_buffer
;
503 #if !defined(CONFIG_USER_ONLY)
508 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
510 #define CPU_COMMON_SAVE_VERSION 1
512 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
514 CPUState
*env
= opaque
;
516 qemu_put_be32s(f
, &env
->halted
);
517 qemu_put_be32s(f
, &env
->interrupt_request
);
520 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
522 CPUState
*env
= opaque
;
524 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
527 qemu_get_be32s(f
, &env
->halted
);
528 qemu_get_be32s(f
, &env
->interrupt_request
);
535 void cpu_exec_init(CPUState
*env
)
540 env
->next_cpu
= NULL
;
543 while (*penv
!= NULL
) {
544 penv
= (CPUState
**)&(*penv
)->next_cpu
;
547 env
->cpu_index
= cpu_index
;
548 TAILQ_INIT(&env
->breakpoints
);
549 TAILQ_INIT(&env
->watchpoints
);
551 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
552 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
553 cpu_common_save
, cpu_common_load
, env
);
554 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
555 cpu_save
, cpu_load
, env
);
559 static inline void invalidate_page_bitmap(PageDesc
*p
)
561 if (p
->code_bitmap
) {
562 qemu_free(p
->code_bitmap
);
563 p
->code_bitmap
= NULL
;
565 p
->code_write_count
= 0;
568 /* set to NULL all the 'first_tb' fields in all PageDescs */
569 static void page_flush_tb(void)
574 for(i
= 0; i
< L1_SIZE
; i
++) {
577 for(j
= 0; j
< L2_SIZE
; j
++) {
579 invalidate_page_bitmap(p
);
586 /* flush all the translation blocks */
587 /* XXX: tb_flush is currently not thread safe */
588 void tb_flush(CPUState
*env1
)
591 #if defined(DEBUG_FLUSH)
592 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
593 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
595 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
597 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
598 cpu_abort(env1
, "Internal error: code buffer overflow\n");
602 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
603 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
606 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
609 code_gen_ptr
= code_gen_buffer
;
610 /* XXX: flush processor icache at this point if cache flush is
615 #ifdef DEBUG_TB_CHECK
617 static void tb_invalidate_check(target_ulong address
)
619 TranslationBlock
*tb
;
621 address
&= TARGET_PAGE_MASK
;
622 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
623 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
624 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
625 address
>= tb
->pc
+ tb
->size
)) {
626 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
627 address
, (long)tb
->pc
, tb
->size
);
633 /* verify that all the pages have correct rights for code */
634 static void tb_page_check(void)
636 TranslationBlock
*tb
;
637 int i
, flags1
, flags2
;
639 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
640 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
641 flags1
= page_get_flags(tb
->pc
);
642 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
643 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
644 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
645 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
651 static void tb_jmp_check(TranslationBlock
*tb
)
653 TranslationBlock
*tb1
;
656 /* suppress any remaining jumps to this TB */
660 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
663 tb1
= tb1
->jmp_next
[n1
];
665 /* check end of list */
667 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
673 /* invalidate one TB */
674 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
677 TranslationBlock
*tb1
;
681 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
684 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
688 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
690 TranslationBlock
*tb1
;
696 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
698 *ptb
= tb1
->page_next
[n1
];
701 ptb
= &tb1
->page_next
[n1
];
705 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
707 TranslationBlock
*tb1
, **ptb
;
710 ptb
= &tb
->jmp_next
[n
];
713 /* find tb(n) in circular list */
717 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
718 if (n1
== n
&& tb1
== tb
)
721 ptb
= &tb1
->jmp_first
;
723 ptb
= &tb1
->jmp_next
[n1
];
726 /* now we can suppress tb(n) from the list */
727 *ptb
= tb
->jmp_next
[n
];
729 tb
->jmp_next
[n
] = NULL
;
733 /* reset the jump entry 'n' of a TB so that it is not chained to
735 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
737 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
740 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
745 target_phys_addr_t phys_pc
;
746 TranslationBlock
*tb1
, *tb2
;
748 /* remove the TB from the hash list */
749 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
750 h
= tb_phys_hash_func(phys_pc
);
751 tb_remove(&tb_phys_hash
[h
], tb
,
752 offsetof(TranslationBlock
, phys_hash_next
));
754 /* remove the TB from the page list */
755 if (tb
->page_addr
[0] != page_addr
) {
756 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
757 tb_page_remove(&p
->first_tb
, tb
);
758 invalidate_page_bitmap(p
);
760 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
761 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
762 tb_page_remove(&p
->first_tb
, tb
);
763 invalidate_page_bitmap(p
);
766 tb_invalidated_flag
= 1;
768 /* remove the TB from the hash list */
769 h
= tb_jmp_cache_hash_func(tb
->pc
);
770 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
771 if (env
->tb_jmp_cache
[h
] == tb
)
772 env
->tb_jmp_cache
[h
] = NULL
;
775 /* suppress this TB from the two jump lists */
776 tb_jmp_remove(tb
, 0);
777 tb_jmp_remove(tb
, 1);
779 /* suppress any remaining jumps to this TB */
785 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
786 tb2
= tb1
->jmp_next
[n1
];
787 tb_reset_jump(tb1
, n1
);
788 tb1
->jmp_next
[n1
] = NULL
;
791 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
793 tb_phys_invalidate_count
++;
796 static inline void set_bits(uint8_t *tab
, int start
, int len
)
802 mask
= 0xff << (start
& 7);
803 if ((start
& ~7) == (end
& ~7)) {
805 mask
&= ~(0xff << (end
& 7));
810 start
= (start
+ 8) & ~7;
812 while (start
< end1
) {
817 mask
= ~(0xff << (end
& 7));
823 static void build_page_bitmap(PageDesc
*p
)
825 int n
, tb_start
, tb_end
;
826 TranslationBlock
*tb
;
828 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
835 tb
= (TranslationBlock
*)((long)tb
& ~3);
836 /* NOTE: this is subtle as a TB may span two physical pages */
838 /* NOTE: tb_end may be after the end of the page, but
839 it is not a problem */
840 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
841 tb_end
= tb_start
+ tb
->size
;
842 if (tb_end
> TARGET_PAGE_SIZE
)
843 tb_end
= TARGET_PAGE_SIZE
;
846 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
848 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
849 tb
= tb
->page_next
[n
];
853 TranslationBlock
*tb_gen_code(CPUState
*env
,
854 target_ulong pc
, target_ulong cs_base
,
855 int flags
, int cflags
)
857 TranslationBlock
*tb
;
859 target_ulong phys_pc
, phys_page2
, virt_page2
;
862 phys_pc
= get_phys_addr_code(env
, pc
);
865 /* flush must be done */
867 /* cannot fail at this point */
869 /* Don't forget to invalidate previous TB info. */
870 tb_invalidated_flag
= 1;
872 tc_ptr
= code_gen_ptr
;
874 tb
->cs_base
= cs_base
;
877 cpu_gen_code(env
, tb
, &code_gen_size
);
878 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
880 /* check next page if needed */
881 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
883 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
884 phys_page2
= get_phys_addr_code(env
, virt_page2
);
886 tb_link_phys(tb
, phys_pc
, phys_page2
);
890 /* invalidate all TBs which intersect with the target physical page
891 starting in range [start;end[. NOTE: start and end must refer to
892 the same physical page. 'is_cpu_write_access' should be true if called
893 from a real cpu write access: the virtual CPU will exit the current
894 TB if code is modified inside this TB. */
895 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
896 int is_cpu_write_access
)
898 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
899 CPUState
*env
= cpu_single_env
;
900 target_ulong tb_start
, tb_end
;
903 #ifdef TARGET_HAS_PRECISE_SMC
904 int current_tb_not_found
= is_cpu_write_access
;
905 TranslationBlock
*current_tb
= NULL
;
906 int current_tb_modified
= 0;
907 target_ulong current_pc
= 0;
908 target_ulong current_cs_base
= 0;
909 int current_flags
= 0;
910 #endif /* TARGET_HAS_PRECISE_SMC */
912 p
= page_find(start
>> TARGET_PAGE_BITS
);
915 if (!p
->code_bitmap
&&
916 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
917 is_cpu_write_access
) {
918 /* build code bitmap */
919 build_page_bitmap(p
);
922 /* we remove all the TBs in the range [start, end[ */
923 /* XXX: see if in some cases it could be faster to invalidate all the code */
927 tb
= (TranslationBlock
*)((long)tb
& ~3);
928 tb_next
= tb
->page_next
[n
];
929 /* NOTE: this is subtle as a TB may span two physical pages */
931 /* NOTE: tb_end may be after the end of the page, but
932 it is not a problem */
933 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
934 tb_end
= tb_start
+ tb
->size
;
936 tb_start
= tb
->page_addr
[1];
937 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
939 if (!(tb_end
<= start
|| tb_start
>= end
)) {
940 #ifdef TARGET_HAS_PRECISE_SMC
941 if (current_tb_not_found
) {
942 current_tb_not_found
= 0;
944 if (env
->mem_io_pc
) {
945 /* now we have a real cpu fault */
946 current_tb
= tb_find_pc(env
->mem_io_pc
);
949 if (current_tb
== tb
&&
950 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
951 /* If we are modifying the current TB, we must stop
952 its execution. We could be more precise by checking
953 that the modification is after the current PC, but it
954 would require a specialized function to partially
955 restore the CPU state */
957 current_tb_modified
= 1;
958 cpu_restore_state(current_tb
, env
,
959 env
->mem_io_pc
, NULL
);
960 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
963 #endif /* TARGET_HAS_PRECISE_SMC */
964 /* we need to do that to handle the case where a signal
965 occurs while doing tb_phys_invalidate() */
968 saved_tb
= env
->current_tb
;
969 env
->current_tb
= NULL
;
971 tb_phys_invalidate(tb
, -1);
973 env
->current_tb
= saved_tb
;
974 if (env
->interrupt_request
&& env
->current_tb
)
975 cpu_interrupt(env
, env
->interrupt_request
);
980 #if !defined(CONFIG_USER_ONLY)
981 /* if no code remaining, no need to continue to use slow writes */
983 invalidate_page_bitmap(p
);
984 if (is_cpu_write_access
) {
985 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
989 #ifdef TARGET_HAS_PRECISE_SMC
990 if (current_tb_modified
) {
991 /* we generate a block containing just the instruction
992 modifying the memory. It will ensure that it cannot modify
994 env
->current_tb
= NULL
;
995 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
996 cpu_resume_from_signal(env
, NULL
);
1001 /* len must be <= 8 and start must be a multiple of len */
1002 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1009 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1010 cpu_single_env
->mem_io_vaddr
, len
,
1011 cpu_single_env
->eip
,
1012 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1016 p
= page_find(start
>> TARGET_PAGE_BITS
);
1019 if (p
->code_bitmap
) {
1020 offset
= start
& ~TARGET_PAGE_MASK
;
1021 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1022 if (b
& ((1 << len
) - 1))
1026 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1030 #if !defined(CONFIG_SOFTMMU)
1031 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1032 unsigned long pc
, void *puc
)
1034 TranslationBlock
*tb
;
1037 #ifdef TARGET_HAS_PRECISE_SMC
1038 TranslationBlock
*current_tb
= NULL
;
1039 CPUState
*env
= cpu_single_env
;
1040 int current_tb_modified
= 0;
1041 target_ulong current_pc
= 0;
1042 target_ulong current_cs_base
= 0;
1043 int current_flags
= 0;
1046 addr
&= TARGET_PAGE_MASK
;
1047 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1051 #ifdef TARGET_HAS_PRECISE_SMC
1052 if (tb
&& pc
!= 0) {
1053 current_tb
= tb_find_pc(pc
);
1056 while (tb
!= NULL
) {
1058 tb
= (TranslationBlock
*)((long)tb
& ~3);
1059 #ifdef TARGET_HAS_PRECISE_SMC
1060 if (current_tb
== tb
&&
1061 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1062 /* If we are modifying the current TB, we must stop
1063 its execution. We could be more precise by checking
1064 that the modification is after the current PC, but it
1065 would require a specialized function to partially
1066 restore the CPU state */
1068 current_tb_modified
= 1;
1069 cpu_restore_state(current_tb
, env
, pc
, puc
);
1070 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1073 #endif /* TARGET_HAS_PRECISE_SMC */
1074 tb_phys_invalidate(tb
, addr
);
1075 tb
= tb
->page_next
[n
];
1078 #ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_modified
) {
1080 /* we generate a block containing just the instruction
1081 modifying the memory. It will ensure that it cannot modify
1083 env
->current_tb
= NULL
;
1084 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1085 cpu_resume_from_signal(env
, puc
);
1091 /* add the tb in the target page and protect it if necessary */
1092 static inline void tb_alloc_page(TranslationBlock
*tb
,
1093 unsigned int n
, target_ulong page_addr
)
1096 TranslationBlock
*last_first_tb
;
1098 tb
->page_addr
[n
] = page_addr
;
1099 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1100 tb
->page_next
[n
] = p
->first_tb
;
1101 last_first_tb
= p
->first_tb
;
1102 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1103 invalidate_page_bitmap(p
);
1105 #if defined(TARGET_HAS_SMC) || 1
1107 #if defined(CONFIG_USER_ONLY)
1108 if (p
->flags
& PAGE_WRITE
) {
1113 /* force the host page as non writable (writes will have a
1114 page fault + mprotect overhead) */
1115 page_addr
&= qemu_host_page_mask
;
1117 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1118 addr
+= TARGET_PAGE_SIZE
) {
1120 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1124 p2
->flags
&= ~PAGE_WRITE
;
1125 page_get_flags(addr
);
1127 mprotect(g2h(page_addr
), qemu_host_page_size
,
1128 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1129 #ifdef DEBUG_TB_INVALIDATE
1130 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1135 /* if some code is already present, then the pages are already
1136 protected. So we handle the case where only the first TB is
1137 allocated in a physical page */
1138 if (!last_first_tb
) {
1139 tlb_protect_code(page_addr
);
1143 #endif /* TARGET_HAS_SMC */
1146 /* Allocate a new translation block. Flush the translation buffer if
1147 too many translation blocks or too much generated code. */
1148 TranslationBlock
*tb_alloc(target_ulong pc
)
1150 TranslationBlock
*tb
;
1152 if (nb_tbs
>= code_gen_max_blocks
||
1153 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1155 tb
= &tbs
[nb_tbs
++];
1161 void tb_free(TranslationBlock
*tb
)
1163 /* In practice this is mostly used for single use temporary TB
1164 Ignore the hard cases and just back up if this TB happens to
1165 be the last one generated. */
1166 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1167 code_gen_ptr
= tb
->tc_ptr
;
1172 /* add a new TB and link it to the physical page tables. phys_page2 is
1173 (-1) to indicate that only one page contains the TB. */
1174 void tb_link_phys(TranslationBlock
*tb
,
1175 target_ulong phys_pc
, target_ulong phys_page2
)
1178 TranslationBlock
**ptb
;
1180 /* Grab the mmap lock to stop another thread invalidating this TB
1181 before we are done. */
1183 /* add in the physical hash table */
1184 h
= tb_phys_hash_func(phys_pc
);
1185 ptb
= &tb_phys_hash
[h
];
1186 tb
->phys_hash_next
= *ptb
;
1189 /* add in the page list */
1190 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1191 if (phys_page2
!= -1)
1192 tb_alloc_page(tb
, 1, phys_page2
);
1194 tb
->page_addr
[1] = -1;
1196 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1197 tb
->jmp_next
[0] = NULL
;
1198 tb
->jmp_next
[1] = NULL
;
1200 /* init original jump addresses */
1201 if (tb
->tb_next_offset
[0] != 0xffff)
1202 tb_reset_jump(tb
, 0);
1203 if (tb
->tb_next_offset
[1] != 0xffff)
1204 tb_reset_jump(tb
, 1);
1206 #ifdef DEBUG_TB_CHECK
1212 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1213 tb[1].tc_ptr. Return NULL if not found */
1214 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1216 int m_min
, m_max
, m
;
1218 TranslationBlock
*tb
;
1222 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1223 tc_ptr
>= (unsigned long)code_gen_ptr
)
1225 /* binary search (cf Knuth) */
1228 while (m_min
<= m_max
) {
1229 m
= (m_min
+ m_max
) >> 1;
1231 v
= (unsigned long)tb
->tc_ptr
;
1234 else if (tc_ptr
< v
) {
1243 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1245 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1247 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1250 tb1
= tb
->jmp_next
[n
];
1252 /* find head of list */
1255 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1258 tb1
= tb1
->jmp_next
[n1
];
1260 /* we are now sure now that tb jumps to tb1 */
1263 /* remove tb from the jmp_first list */
1264 ptb
= &tb_next
->jmp_first
;
1268 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1269 if (n1
== n
&& tb1
== tb
)
1271 ptb
= &tb1
->jmp_next
[n1
];
1273 *ptb
= tb
->jmp_next
[n
];
1274 tb
->jmp_next
[n
] = NULL
;
1276 /* suppress the jump to next tb in generated code */
1277 tb_reset_jump(tb
, n
);
1279 /* suppress jumps in the tb on which we could have jumped */
1280 tb_reset_jump_recursive(tb_next
);
1284 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1286 tb_reset_jump_recursive2(tb
, 0);
1287 tb_reset_jump_recursive2(tb
, 1);
1290 #if defined(TARGET_HAS_ICE)
1291 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1293 target_phys_addr_t addr
;
1295 ram_addr_t ram_addr
;
1298 addr
= cpu_get_phys_page_debug(env
, pc
);
1299 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1301 pd
= IO_MEM_UNASSIGNED
;
1303 pd
= p
->phys_offset
;
1305 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1306 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1310 /* Add a watchpoint. */
1311 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1312 int flags
, CPUWatchpoint
**watchpoint
)
1314 target_ulong len_mask
= ~(len
- 1);
1317 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1318 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1319 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1320 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1323 wp
= qemu_malloc(sizeof(*wp
));
1328 wp
->len_mask
= len_mask
;
1331 /* keep all GDB-injected watchpoints in front */
1333 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1335 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1337 tlb_flush_page(env
, addr
);
1344 /* Remove a specific watchpoint. */
1345 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1348 target_ulong len_mask
= ~(len
- 1);
1351 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1352 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1353 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1354 cpu_watchpoint_remove_by_ref(env
, wp
);
1361 /* Remove a specific watchpoint by reference. */
1362 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1364 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1366 tlb_flush_page(env
, watchpoint
->vaddr
);
1368 qemu_free(watchpoint
);
1371 /* Remove all matching watchpoints. */
1372 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1374 CPUWatchpoint
*wp
, *next
;
1376 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1377 if (wp
->flags
& mask
)
1378 cpu_watchpoint_remove_by_ref(env
, wp
);
1382 /* Add a breakpoint. */
1383 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1384 CPUBreakpoint
**breakpoint
)
1386 #if defined(TARGET_HAS_ICE)
1389 bp
= qemu_malloc(sizeof(*bp
));
1396 /* keep all GDB-injected breakpoints in front */
1398 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1400 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1402 breakpoint_invalidate(env
, pc
);
1412 /* Remove a specific breakpoint. */
1413 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1415 #if defined(TARGET_HAS_ICE)
1418 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1419 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1420 cpu_breakpoint_remove_by_ref(env
, bp
);
1430 /* Remove a specific breakpoint by reference. */
1431 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1433 #if defined(TARGET_HAS_ICE)
1434 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1436 breakpoint_invalidate(env
, breakpoint
->pc
);
1438 qemu_free(breakpoint
);
1442 /* Remove all matching breakpoints. */
1443 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1445 #if defined(TARGET_HAS_ICE)
1446 CPUBreakpoint
*bp
, *next
;
1448 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1449 if (bp
->flags
& mask
)
1450 cpu_breakpoint_remove_by_ref(env
, bp
);
1455 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1456 CPU loop after each instruction */
1457 void cpu_single_step(CPUState
*env
, int enabled
)
1459 #if defined(TARGET_HAS_ICE)
1460 if (env
->singlestep_enabled
!= enabled
) {
1461 env
->singlestep_enabled
= enabled
;
1462 /* must flush all the translated code to avoid inconsistancies */
1463 /* XXX: only flush what is necessary */
1469 /* enable or disable low levels log */
1470 void cpu_set_log(int log_flags
)
1472 loglevel
= log_flags
;
1473 if (loglevel
&& !logfile
) {
1474 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1476 perror(logfilename
);
1479 #if !defined(CONFIG_SOFTMMU)
1480 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1482 static char logfile_buf
[4096];
1483 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1486 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1490 if (!loglevel
&& logfile
) {
1496 void cpu_set_log_filename(const char *filename
)
1498 logfilename
= strdup(filename
);
1503 cpu_set_log(loglevel
);
1506 /* mask must never be zero, except for A20 change call */
1507 void cpu_interrupt(CPUState
*env
, int mask
)
1509 #if !defined(USE_NPTL)
1510 TranslationBlock
*tb
;
1511 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1515 old_mask
= env
->interrupt_request
;
1516 /* FIXME: This is probably not threadsafe. A different thread could
1517 be in the middle of a read-modify-write operation. */
1518 env
->interrupt_request
|= mask
;
1519 #if defined(USE_NPTL)
1520 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1521 problem and hope the cpu will stop of its own accord. For userspace
1522 emulation this often isn't actually as bad as it sounds. Often
1523 signals are used primarily to interrupt blocking syscalls. */
1526 env
->icount_decr
.u16
.high
= 0xffff;
1527 #ifndef CONFIG_USER_ONLY
1528 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1529 an async event happened and we need to process it. */
1531 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1532 cpu_abort(env
, "Raised interrupt while not in I/O function");
1536 tb
= env
->current_tb
;
1537 /* if the cpu is currently executing code, we must unlink it and
1538 all the potentially executing TB */
1539 if (tb
&& !testandset(&interrupt_lock
)) {
1540 env
->current_tb
= NULL
;
1541 tb_reset_jump_recursive(tb
);
1542 resetlock(&interrupt_lock
);
1548 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1550 env
->interrupt_request
&= ~mask
;
1553 const CPULogItem cpu_log_items
[] = {
1554 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1555 "show generated host assembly code for each compiled TB" },
1556 { CPU_LOG_TB_IN_ASM
, "in_asm",
1557 "show target assembly code for each compiled TB" },
1558 { CPU_LOG_TB_OP
, "op",
1559 "show micro ops for each compiled TB" },
1560 { CPU_LOG_TB_OP_OPT
, "op_opt",
1563 "before eflags optimization and "
1565 "after liveness analysis" },
1566 { CPU_LOG_INT
, "int",
1567 "show interrupts/exceptions in short format" },
1568 { CPU_LOG_EXEC
, "exec",
1569 "show trace before each executed TB (lots of logs)" },
1570 { CPU_LOG_TB_CPU
, "cpu",
1571 "show CPU state before block translation" },
1573 { CPU_LOG_PCALL
, "pcall",
1574 "show protected mode far calls/returns/exceptions" },
1577 { CPU_LOG_IOPORT
, "ioport",
1578 "show all i/o ports accesses" },
1583 static int cmp1(const char *s1
, int n
, const char *s2
)
1585 if (strlen(s2
) != n
)
1587 return memcmp(s1
, s2
, n
) == 0;
1590 /* takes a comma separated list of log masks. Return 0 if error. */
1591 int cpu_str_to_log_mask(const char *str
)
1593 const CPULogItem
*item
;
1600 p1
= strchr(p
, ',');
1603 if(cmp1(p
,p1
-p
,"all")) {
1604 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1608 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1609 if (cmp1(p
, p1
- p
, item
->name
))
1623 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1630 fprintf(stderr
, "qemu: fatal: ");
1631 vfprintf(stderr
, fmt
, ap
);
1632 fprintf(stderr
, "\n");
1634 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1636 cpu_dump_state(env
, stderr
, fprintf
, 0);
1639 fprintf(logfile
, "qemu: fatal: ");
1640 vfprintf(logfile
, fmt
, ap2
);
1641 fprintf(logfile
, "\n");
1643 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1645 cpu_dump_state(env
, logfile
, fprintf
, 0);
1655 CPUState
*cpu_copy(CPUState
*env
)
1657 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1658 /* preserve chaining and index */
1659 CPUState
*next_cpu
= new_env
->next_cpu
;
1660 int cpu_index
= new_env
->cpu_index
;
1661 memcpy(new_env
, env
, sizeof(CPUState
));
1662 new_env
->next_cpu
= next_cpu
;
1663 new_env
->cpu_index
= cpu_index
;
1667 #if !defined(CONFIG_USER_ONLY)
1669 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1673 /* Discard jump cache entries for any tb which might potentially
1674 overlap the flushed page. */
1675 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1676 memset (&env
->tb_jmp_cache
[i
], 0,
1677 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1679 i
= tb_jmp_cache_hash_page(addr
);
1680 memset (&env
->tb_jmp_cache
[i
], 0,
1681 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1684 /* NOTE: if flush_global is true, also flush global entries (not
1686 void tlb_flush(CPUState
*env
, int flush_global
)
1690 #if defined(DEBUG_TLB)
1691 printf("tlb_flush:\n");
1693 /* must reset current TB so that interrupts cannot modify the
1694 links while we are modifying them */
1695 env
->current_tb
= NULL
;
1697 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1698 env
->tlb_table
[0][i
].addr_read
= -1;
1699 env
->tlb_table
[0][i
].addr_write
= -1;
1700 env
->tlb_table
[0][i
].addr_code
= -1;
1701 env
->tlb_table
[1][i
].addr_read
= -1;
1702 env
->tlb_table
[1][i
].addr_write
= -1;
1703 env
->tlb_table
[1][i
].addr_code
= -1;
1704 #if (NB_MMU_MODES >= 3)
1705 env
->tlb_table
[2][i
].addr_read
= -1;
1706 env
->tlb_table
[2][i
].addr_write
= -1;
1707 env
->tlb_table
[2][i
].addr_code
= -1;
1708 #if (NB_MMU_MODES == 4)
1709 env
->tlb_table
[3][i
].addr_read
= -1;
1710 env
->tlb_table
[3][i
].addr_write
= -1;
1711 env
->tlb_table
[3][i
].addr_code
= -1;
1716 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1719 if (env
->kqemu_enabled
) {
1720 kqemu_flush(env
, flush_global
);
1726 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1728 if (addr
== (tlb_entry
->addr_read
&
1729 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1730 addr
== (tlb_entry
->addr_write
&
1731 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1732 addr
== (tlb_entry
->addr_code
&
1733 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1734 tlb_entry
->addr_read
= -1;
1735 tlb_entry
->addr_write
= -1;
1736 tlb_entry
->addr_code
= -1;
1740 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1744 #if defined(DEBUG_TLB)
1745 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1747 /* must reset current TB so that interrupts cannot modify the
1748 links while we are modifying them */
1749 env
->current_tb
= NULL
;
1751 addr
&= TARGET_PAGE_MASK
;
1752 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1753 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1754 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1755 #if (NB_MMU_MODES >= 3)
1756 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1757 #if (NB_MMU_MODES == 4)
1758 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1762 tlb_flush_jmp_cache(env
, addr
);
1765 if (env
->kqemu_enabled
) {
1766 kqemu_flush_page(env
, addr
);
1771 /* update the TLBs so that writes to code in the virtual page 'addr'
1773 static void tlb_protect_code(ram_addr_t ram_addr
)
1775 cpu_physical_memory_reset_dirty(ram_addr
,
1776 ram_addr
+ TARGET_PAGE_SIZE
,
1780 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1781 tested for self modifying code */
1782 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1785 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1788 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1789 unsigned long start
, unsigned long length
)
1792 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1793 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1794 if ((addr
- start
) < length
) {
1795 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1800 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1804 unsigned long length
, start1
;
1808 start
&= TARGET_PAGE_MASK
;
1809 end
= TARGET_PAGE_ALIGN(end
);
1811 length
= end
- start
;
1814 len
= length
>> TARGET_PAGE_BITS
;
1816 /* XXX: should not depend on cpu context */
1818 if (env
->kqemu_enabled
) {
1821 for(i
= 0; i
< len
; i
++) {
1822 kqemu_set_notdirty(env
, addr
);
1823 addr
+= TARGET_PAGE_SIZE
;
1827 mask
= ~dirty_flags
;
1828 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1829 for(i
= 0; i
< len
; i
++)
1832 /* we modify the TLB cache so that the dirty bit will be set again
1833 when accessing the range */
1834 start1
= start
+ (unsigned long)phys_ram_base
;
1835 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1836 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1837 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1838 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1839 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1840 #if (NB_MMU_MODES >= 3)
1841 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1842 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1843 #if (NB_MMU_MODES == 4)
1844 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1845 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1851 int cpu_physical_memory_set_dirty_tracking(int enable
)
1853 in_migration
= enable
;
1857 int cpu_physical_memory_get_dirty_tracking(void)
1859 return in_migration
;
1862 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1865 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1868 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1870 ram_addr_t ram_addr
;
1872 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1873 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1874 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1875 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1876 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1881 /* update the TLB according to the current state of the dirty bits */
1882 void cpu_tlb_update_dirty(CPUState
*env
)
1885 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1886 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1887 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1888 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1889 #if (NB_MMU_MODES >= 3)
1890 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1891 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1892 #if (NB_MMU_MODES == 4)
1893 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1894 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1899 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1901 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1902 tlb_entry
->addr_write
= vaddr
;
1905 /* update the TLB corresponding to virtual page vaddr
1906 so that it is no longer dirty */
1907 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1911 vaddr
&= TARGET_PAGE_MASK
;
1912 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1913 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1914 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1915 #if (NB_MMU_MODES >= 3)
1916 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1917 #if (NB_MMU_MODES == 4)
1918 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1923 /* add a new TLB entry. At most one entry for a given virtual address
1924 is permitted. Return 0 if OK or 2 if the page could not be mapped
1925 (can only happen in non SOFTMMU mode for I/O pages or pages
1926 conflicting with the host address space). */
1927 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1928 target_phys_addr_t paddr
, int prot
,
1929 int mmu_idx
, int is_softmmu
)
1934 target_ulong address
;
1935 target_ulong code_address
;
1936 target_phys_addr_t addend
;
1940 target_phys_addr_t iotlb
;
1942 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1944 pd
= IO_MEM_UNASSIGNED
;
1946 pd
= p
->phys_offset
;
1948 #if defined(DEBUG_TLB)
1949 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1950 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1955 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1956 /* IO memory case (romd handled later) */
1957 address
|= TLB_MMIO
;
1959 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1960 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1962 iotlb
= pd
& TARGET_PAGE_MASK
;
1963 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1964 iotlb
|= IO_MEM_NOTDIRTY
;
1966 iotlb
|= IO_MEM_ROM
;
1968 /* IO handlers are currently passed a phsical address.
1969 It would be nice to pass an offset from the base address
1970 of that region. This would avoid having to special case RAM,
1971 and avoid full address decoding in every device.
1972 We can't use the high bits of pd for this because
1973 IO_MEM_ROMD uses these as a ram address. */
1974 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
1976 iotlb
+= p
->region_offset
;
1982 code_address
= address
;
1983 /* Make accesses to pages with watchpoints go via the
1984 watchpoint trap routines. */
1985 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1986 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
1987 iotlb
= io_mem_watch
+ paddr
;
1988 /* TODO: The memory case can be optimized by not trapping
1989 reads of pages with a write breakpoint. */
1990 address
|= TLB_MMIO
;
1994 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1995 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
1996 te
= &env
->tlb_table
[mmu_idx
][index
];
1997 te
->addend
= addend
- vaddr
;
1998 if (prot
& PAGE_READ
) {
1999 te
->addr_read
= address
;
2004 if (prot
& PAGE_EXEC
) {
2005 te
->addr_code
= code_address
;
2009 if (prot
& PAGE_WRITE
) {
2010 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2011 (pd
& IO_MEM_ROMD
)) {
2012 /* Write access calls the I/O callback. */
2013 te
->addr_write
= address
| TLB_MMIO
;
2014 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2015 !cpu_physical_memory_is_dirty(pd
)) {
2016 te
->addr_write
= address
| TLB_NOTDIRTY
;
2018 te
->addr_write
= address
;
2021 te
->addr_write
= -1;
2028 void tlb_flush(CPUState
*env
, int flush_global
)
2032 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2036 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2037 target_phys_addr_t paddr
, int prot
,
2038 int mmu_idx
, int is_softmmu
)
2043 /* dump memory mappings */
2044 void page_dump(FILE *f
)
2046 unsigned long start
, end
;
2047 int i
, j
, prot
, prot1
;
2050 fprintf(f
, "%-8s %-8s %-8s %s\n",
2051 "start", "end", "size", "prot");
2055 for(i
= 0; i
<= L1_SIZE
; i
++) {
2060 for(j
= 0;j
< L2_SIZE
; j
++) {
2065 if (prot1
!= prot
) {
2066 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2068 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2069 start
, end
, end
- start
,
2070 prot
& PAGE_READ
? 'r' : '-',
2071 prot
& PAGE_WRITE
? 'w' : '-',
2072 prot
& PAGE_EXEC
? 'x' : '-');
2086 int page_get_flags(target_ulong address
)
2090 p
= page_find(address
>> TARGET_PAGE_BITS
);
2096 /* modify the flags of a page and invalidate the code if
2097 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2098 depending on PAGE_WRITE */
2099 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2104 /* mmap_lock should already be held. */
2105 start
= start
& TARGET_PAGE_MASK
;
2106 end
= TARGET_PAGE_ALIGN(end
);
2107 if (flags
& PAGE_WRITE
)
2108 flags
|= PAGE_WRITE_ORG
;
2109 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2110 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2111 /* We may be called for host regions that are outside guest
2115 /* if the write protection is set, then we invalidate the code
2117 if (!(p
->flags
& PAGE_WRITE
) &&
2118 (flags
& PAGE_WRITE
) &&
2120 tb_invalidate_phys_page(addr
, 0, NULL
);
2126 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2132 if (start
+ len
< start
)
2133 /* we've wrapped around */
2136 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2137 start
= start
& TARGET_PAGE_MASK
;
2139 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2140 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2143 if( !(p
->flags
& PAGE_VALID
) )
2146 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2148 if (flags
& PAGE_WRITE
) {
2149 if (!(p
->flags
& PAGE_WRITE_ORG
))
2151 /* unprotect the page if it was put read-only because it
2152 contains translated code */
2153 if (!(p
->flags
& PAGE_WRITE
)) {
2154 if (!page_unprotect(addr
, 0, NULL
))
2163 /* called from signal handler: invalidate the code and unprotect the
2164 page. Return TRUE if the fault was succesfully handled. */
2165 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2167 unsigned int page_index
, prot
, pindex
;
2169 target_ulong host_start
, host_end
, addr
;
2171 /* Technically this isn't safe inside a signal handler. However we
2172 know this only ever happens in a synchronous SEGV handler, so in
2173 practice it seems to be ok. */
2176 host_start
= address
& qemu_host_page_mask
;
2177 page_index
= host_start
>> TARGET_PAGE_BITS
;
2178 p1
= page_find(page_index
);
2183 host_end
= host_start
+ qemu_host_page_size
;
2186 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2190 /* if the page was really writable, then we change its
2191 protection back to writable */
2192 if (prot
& PAGE_WRITE_ORG
) {
2193 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2194 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2195 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2196 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2197 p1
[pindex
].flags
|= PAGE_WRITE
;
2198 /* and since the content will be modified, we must invalidate
2199 the corresponding translated code. */
2200 tb_invalidate_phys_page(address
, pc
, puc
);
2201 #ifdef DEBUG_TB_CHECK
2202 tb_invalidate_check(address
);
2212 static inline void tlb_set_dirty(CPUState
*env
,
2213 unsigned long addr
, target_ulong vaddr
)
2216 #endif /* defined(CONFIG_USER_ONLY) */
2218 #if !defined(CONFIG_USER_ONLY)
2220 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2221 ram_addr_t memory
, ram_addr_t region_offset
);
2222 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2223 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2224 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2227 if (addr > start_addr) \
2230 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2231 if (start_addr2 > 0) \
2235 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2236 end_addr2 = TARGET_PAGE_SIZE - 1; \
2238 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2239 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2244 /* register physical memory. 'size' must be a multiple of the target
2245 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2246 io memory page. The address used when calling the IO function is
2247 the offset from the start of the region, plus region_offset. Both
2248 start_region and regon_offset are rounded down to a page boundary
2249 before calculating this offset. This should not be a problem unless
2250 the low bits of start_addr and region_offset differ. */
2251 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2253 ram_addr_t phys_offset
,
2254 ram_addr_t region_offset
)
2256 target_phys_addr_t addr
, end_addr
;
2259 ram_addr_t orig_size
= size
;
2263 /* XXX: should not depend on cpu context */
2265 if (env
->kqemu_enabled
) {
2266 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2270 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2272 region_offset
&= TARGET_PAGE_MASK
;
2273 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2274 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2275 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2276 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2277 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2278 ram_addr_t orig_memory
= p
->phys_offset
;
2279 target_phys_addr_t start_addr2
, end_addr2
;
2280 int need_subpage
= 0;
2282 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2284 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2285 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2286 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2287 &p
->phys_offset
, orig_memory
,
2290 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2293 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2295 p
->region_offset
= 0;
2297 p
->phys_offset
= phys_offset
;
2298 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2299 (phys_offset
& IO_MEM_ROMD
))
2300 phys_offset
+= TARGET_PAGE_SIZE
;
2303 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2304 p
->phys_offset
= phys_offset
;
2305 p
->region_offset
= region_offset
;
2306 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2307 (phys_offset
& IO_MEM_ROMD
)) {
2308 phys_offset
+= TARGET_PAGE_SIZE
;
2310 target_phys_addr_t start_addr2
, end_addr2
;
2311 int need_subpage
= 0;
2313 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2314 end_addr2
, need_subpage
);
2316 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2317 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2318 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2320 subpage_register(subpage
, start_addr2
, end_addr2
,
2321 phys_offset
, region_offset
);
2322 p
->region_offset
= 0;
2326 region_offset
+= TARGET_PAGE_SIZE
;
2329 /* since each CPU stores ram addresses in its TLB cache, we must
2330 reset the modified entries */
2332 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2337 /* XXX: temporary until new memory mapping API */
2338 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2342 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2344 return IO_MEM_UNASSIGNED
;
2345 return p
->phys_offset
;
2348 /* XXX: better than nothing */
2349 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2352 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2353 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2354 (uint64_t)size
, (uint64_t)phys_ram_size
);
2357 addr
= phys_ram_alloc_offset
;
2358 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2362 void qemu_ram_free(ram_addr_t addr
)
2366 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2368 #ifdef DEBUG_UNASSIGNED
2369 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2371 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2372 do_unassigned_access(addr
, 0, 0, 0, 1);
2377 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2379 #ifdef DEBUG_UNASSIGNED
2380 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2382 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2383 do_unassigned_access(addr
, 0, 0, 0, 2);
2388 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2390 #ifdef DEBUG_UNASSIGNED
2391 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2393 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2394 do_unassigned_access(addr
, 0, 0, 0, 4);
2399 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2401 #ifdef DEBUG_UNASSIGNED
2402 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2404 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2405 do_unassigned_access(addr
, 1, 0, 0, 1);
2409 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2411 #ifdef DEBUG_UNASSIGNED
2412 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2414 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2415 do_unassigned_access(addr
, 1, 0, 0, 2);
2419 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2421 #ifdef DEBUG_UNASSIGNED
2422 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2424 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2425 do_unassigned_access(addr
, 1, 0, 0, 4);
2429 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2430 unassigned_mem_readb
,
2431 unassigned_mem_readw
,
2432 unassigned_mem_readl
,
2435 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2436 unassigned_mem_writeb
,
2437 unassigned_mem_writew
,
2438 unassigned_mem_writel
,
2441 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2445 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2446 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2447 #if !defined(CONFIG_USER_ONLY)
2448 tb_invalidate_phys_page_fast(ram_addr
, 1);
2449 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2452 stb_p(phys_ram_base
+ ram_addr
, val
);
2454 if (cpu_single_env
->kqemu_enabled
&&
2455 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2456 kqemu_modify_page(cpu_single_env
, ram_addr
);
2458 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2459 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2460 /* we remove the notdirty callback only if the code has been
2462 if (dirty_flags
== 0xff)
2463 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2466 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2470 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2471 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2472 #if !defined(CONFIG_USER_ONLY)
2473 tb_invalidate_phys_page_fast(ram_addr
, 2);
2474 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2477 stw_p(phys_ram_base
+ ram_addr
, val
);
2479 if (cpu_single_env
->kqemu_enabled
&&
2480 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2481 kqemu_modify_page(cpu_single_env
, ram_addr
);
2483 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2484 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2485 /* we remove the notdirty callback only if the code has been
2487 if (dirty_flags
== 0xff)
2488 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2491 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2495 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2496 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2497 #if !defined(CONFIG_USER_ONLY)
2498 tb_invalidate_phys_page_fast(ram_addr
, 4);
2499 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2502 stl_p(phys_ram_base
+ ram_addr
, val
);
2504 if (cpu_single_env
->kqemu_enabled
&&
2505 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2506 kqemu_modify_page(cpu_single_env
, ram_addr
);
2508 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2509 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2510 /* we remove the notdirty callback only if the code has been
2512 if (dirty_flags
== 0xff)
2513 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2516 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2517 NULL
, /* never used */
2518 NULL
, /* never used */
2519 NULL
, /* never used */
2522 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2523 notdirty_mem_writeb
,
2524 notdirty_mem_writew
,
2525 notdirty_mem_writel
,
2528 /* Generate a debug exception if a watchpoint has been hit. */
2529 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2531 CPUState
*env
= cpu_single_env
;
2532 target_ulong pc
, cs_base
;
2533 TranslationBlock
*tb
;
2538 if (env
->watchpoint_hit
) {
2539 /* We re-entered the check after replacing the TB. Now raise
2540 * the debug interrupt so that is will trigger after the
2541 * current instruction. */
2542 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2545 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2546 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2547 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2548 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2549 wp
->flags
|= BP_WATCHPOINT_HIT
;
2550 if (!env
->watchpoint_hit
) {
2551 env
->watchpoint_hit
= wp
;
2552 tb
= tb_find_pc(env
->mem_io_pc
);
2554 cpu_abort(env
, "check_watchpoint: could not find TB for "
2555 "pc=%p", (void *)env
->mem_io_pc
);
2557 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2558 tb_phys_invalidate(tb
, -1);
2559 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2560 env
->exception_index
= EXCP_DEBUG
;
2562 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2563 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2565 cpu_resume_from_signal(env
, NULL
);
2568 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2573 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2574 so these check for a hit then pass through to the normal out-of-line
2576 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2578 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2579 return ldub_phys(addr
);
2582 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2584 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2585 return lduw_phys(addr
);
2588 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2590 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2591 return ldl_phys(addr
);
2594 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2597 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2598 stb_phys(addr
, val
);
2601 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2604 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2605 stw_phys(addr
, val
);
2608 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2611 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2612 stl_phys(addr
, val
);
2615 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2621 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2627 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2633 idx
= SUBPAGE_IDX(addr
);
2634 #if defined(DEBUG_SUBPAGE)
2635 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2636 mmio
, len
, addr
, idx
);
2638 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2639 addr
+ mmio
->region_offset
[idx
][0][len
]);
2644 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2645 uint32_t value
, unsigned int len
)
2649 idx
= SUBPAGE_IDX(addr
);
2650 #if defined(DEBUG_SUBPAGE)
2651 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2652 mmio
, len
, addr
, idx
, value
);
2654 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2655 addr
+ mmio
->region_offset
[idx
][1][len
],
2659 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2661 #if defined(DEBUG_SUBPAGE)
2662 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2665 return subpage_readlen(opaque
, addr
, 0);
2668 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2671 #if defined(DEBUG_SUBPAGE)
2672 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2674 subpage_writelen(opaque
, addr
, value
, 0);
2677 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2679 #if defined(DEBUG_SUBPAGE)
2680 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2683 return subpage_readlen(opaque
, addr
, 1);
2686 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2689 #if defined(DEBUG_SUBPAGE)
2690 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2692 subpage_writelen(opaque
, addr
, value
, 1);
2695 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2697 #if defined(DEBUG_SUBPAGE)
2698 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2701 return subpage_readlen(opaque
, addr
, 2);
2704 static void subpage_writel (void *opaque
,
2705 target_phys_addr_t addr
, uint32_t value
)
2707 #if defined(DEBUG_SUBPAGE)
2708 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2710 subpage_writelen(opaque
, addr
, value
, 2);
2713 static CPUReadMemoryFunc
*subpage_read
[] = {
2719 static CPUWriteMemoryFunc
*subpage_write
[] = {
2725 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2726 ram_addr_t memory
, ram_addr_t region_offset
)
2731 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2733 idx
= SUBPAGE_IDX(start
);
2734 eidx
= SUBPAGE_IDX(end
);
2735 #if defined(DEBUG_SUBPAGE)
2736 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2737 mmio
, start
, end
, idx
, eidx
, memory
);
2739 memory
>>= IO_MEM_SHIFT
;
2740 for (; idx
<= eidx
; idx
++) {
2741 for (i
= 0; i
< 4; i
++) {
2742 if (io_mem_read
[memory
][i
]) {
2743 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2744 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2745 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2747 if (io_mem_write
[memory
][i
]) {
2748 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2749 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2750 mmio
->region_offset
[idx
][1][i
] = region_offset
;
2758 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2759 ram_addr_t orig_memory
, ram_addr_t region_offset
)
2764 mmio
= qemu_mallocz(sizeof(subpage_t
));
2767 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2768 #if defined(DEBUG_SUBPAGE)
2769 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2770 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2772 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2773 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
2780 static void io_mem_init(void)
2782 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2783 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2784 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2787 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2788 watch_mem_write
, NULL
);
2789 /* alloc dirty bits array */
2790 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2791 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2794 /* mem_read and mem_write are arrays of functions containing the
2795 function to access byte (index 0), word (index 1) and dword (index
2796 2). Functions can be omitted with a NULL function pointer. The
2797 registered functions may be modified dynamically later.
2798 If io_index is non zero, the corresponding io zone is
2799 modified. If it is zero, a new io zone is allocated. The return
2800 value can be used with cpu_register_physical_memory(). (-1) is
2801 returned if error. */
2802 int cpu_register_io_memory(int io_index
,
2803 CPUReadMemoryFunc
**mem_read
,
2804 CPUWriteMemoryFunc
**mem_write
,
2807 int i
, subwidth
= 0;
2809 if (io_index
<= 0) {
2810 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2812 io_index
= io_mem_nb
++;
2814 if (io_index
>= IO_MEM_NB_ENTRIES
)
2818 for(i
= 0;i
< 3; i
++) {
2819 if (!mem_read
[i
] || !mem_write
[i
])
2820 subwidth
= IO_MEM_SUBWIDTH
;
2821 io_mem_read
[io_index
][i
] = mem_read
[i
];
2822 io_mem_write
[io_index
][i
] = mem_write
[i
];
2824 io_mem_opaque
[io_index
] = opaque
;
2825 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2828 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2830 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2833 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2835 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2838 #endif /* !defined(CONFIG_USER_ONLY) */
2840 /* physical memory access (slow version, mainly for debug) */
2841 #if defined(CONFIG_USER_ONLY)
2842 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2843 int len
, int is_write
)
2850 page
= addr
& TARGET_PAGE_MASK
;
2851 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2854 flags
= page_get_flags(page
);
2855 if (!(flags
& PAGE_VALID
))
2858 if (!(flags
& PAGE_WRITE
))
2860 /* XXX: this code should not depend on lock_user */
2861 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2862 /* FIXME - should this return an error rather than just fail? */
2865 unlock_user(p
, addr
, l
);
2867 if (!(flags
& PAGE_READ
))
2869 /* XXX: this code should not depend on lock_user */
2870 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2871 /* FIXME - should this return an error rather than just fail? */
2874 unlock_user(p
, addr
, 0);
2883 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2884 int len
, int is_write
)
2889 target_phys_addr_t page
;
2894 page
= addr
& TARGET_PAGE_MASK
;
2895 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2898 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2900 pd
= IO_MEM_UNASSIGNED
;
2902 pd
= p
->phys_offset
;
2906 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2907 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2909 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
2910 /* XXX: could force cpu_single_env to NULL to avoid
2912 if (l
>= 4 && ((addr
& 3) == 0)) {
2913 /* 32 bit write access */
2915 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2917 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2918 /* 16 bit write access */
2920 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2923 /* 8 bit write access */
2925 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2929 unsigned long addr1
;
2930 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2932 ptr
= phys_ram_base
+ addr1
;
2933 memcpy(ptr
, buf
, l
);
2934 if (!cpu_physical_memory_is_dirty(addr1
)) {
2935 /* invalidate code */
2936 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2938 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2939 (0xff & ~CODE_DIRTY_FLAG
);
2943 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2944 !(pd
& IO_MEM_ROMD
)) {
2946 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2948 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
2949 if (l
>= 4 && ((addr
& 3) == 0)) {
2950 /* 32 bit read access */
2951 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2954 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2955 /* 16 bit read access */
2956 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2960 /* 8 bit read access */
2961 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2967 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2968 (addr
& ~TARGET_PAGE_MASK
);
2969 memcpy(buf
, ptr
, l
);
2978 /* used for ROM loading : can write in RAM and ROM */
2979 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2980 const uint8_t *buf
, int len
)
2984 target_phys_addr_t page
;
2989 page
= addr
& TARGET_PAGE_MASK
;
2990 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2993 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2995 pd
= IO_MEM_UNASSIGNED
;
2997 pd
= p
->phys_offset
;
3000 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3001 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3002 !(pd
& IO_MEM_ROMD
)) {
3005 unsigned long addr1
;
3006 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3008 ptr
= phys_ram_base
+ addr1
;
3009 memcpy(ptr
, buf
, l
);
3018 /* warning: addr must be aligned */
3019 uint32_t ldl_phys(target_phys_addr_t addr
)
3027 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3029 pd
= IO_MEM_UNASSIGNED
;
3031 pd
= p
->phys_offset
;
3034 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3035 !(pd
& IO_MEM_ROMD
)) {
3037 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3039 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3040 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3043 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3044 (addr
& ~TARGET_PAGE_MASK
);
3050 /* warning: addr must be aligned */
3051 uint64_t ldq_phys(target_phys_addr_t addr
)
3059 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3061 pd
= IO_MEM_UNASSIGNED
;
3063 pd
= p
->phys_offset
;
3066 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3067 !(pd
& IO_MEM_ROMD
)) {
3069 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3071 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3072 #ifdef TARGET_WORDS_BIGENDIAN
3073 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3074 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3076 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3077 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3081 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3082 (addr
& ~TARGET_PAGE_MASK
);
3089 uint32_t ldub_phys(target_phys_addr_t addr
)
3092 cpu_physical_memory_read(addr
, &val
, 1);
3097 uint32_t lduw_phys(target_phys_addr_t addr
)
3100 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3101 return tswap16(val
);
3104 /* warning: addr must be aligned. The ram page is not masked as dirty
3105 and the code inside is not invalidated. It is useful if the dirty
3106 bits are used to track modified PTEs */
3107 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3114 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3116 pd
= IO_MEM_UNASSIGNED
;
3118 pd
= p
->phys_offset
;
3121 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3122 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3124 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3125 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3127 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3128 ptr
= phys_ram_base
+ addr1
;
3131 if (unlikely(in_migration
)) {
3132 if (!cpu_physical_memory_is_dirty(addr1
)) {
3133 /* invalidate code */
3134 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3136 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3137 (0xff & ~CODE_DIRTY_FLAG
);
3143 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3150 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3152 pd
= IO_MEM_UNASSIGNED
;
3154 pd
= p
->phys_offset
;
3157 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3158 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3160 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3161 #ifdef TARGET_WORDS_BIGENDIAN
3162 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3163 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3165 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3166 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3169 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3170 (addr
& ~TARGET_PAGE_MASK
);
3175 /* warning: addr must be aligned */
3176 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3183 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3185 pd
= IO_MEM_UNASSIGNED
;
3187 pd
= p
->phys_offset
;
3190 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3191 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3193 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3194 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3196 unsigned long addr1
;
3197 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3199 ptr
= phys_ram_base
+ addr1
;
3201 if (!cpu_physical_memory_is_dirty(addr1
)) {
3202 /* invalidate code */
3203 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3205 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3206 (0xff & ~CODE_DIRTY_FLAG
);
3212 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3215 cpu_physical_memory_write(addr
, &v
, 1);
3219 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3221 uint16_t v
= tswap16(val
);
3222 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3226 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3229 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3234 /* virtual memory access for debug */
3235 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3236 uint8_t *buf
, int len
, int is_write
)
3239 target_phys_addr_t phys_addr
;
3243 page
= addr
& TARGET_PAGE_MASK
;
3244 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3245 /* if no physical page mapped, return an error */
3246 if (phys_addr
== -1)
3248 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3251 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3260 /* in deterministic execution mode, instructions doing device I/Os
3261 must be at the end of the TB */
3262 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3264 TranslationBlock
*tb
;
3266 target_ulong pc
, cs_base
;
3269 tb
= tb_find_pc((unsigned long)retaddr
);
3271 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3274 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3275 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3276 /* Calculate how many instructions had been executed before the fault
3278 n
= n
- env
->icount_decr
.u16
.low
;
3279 /* Generate a new TB ending on the I/O insn. */
3281 /* On MIPS and SH, delay slot instructions can only be restarted if
3282 they were already the first instruction in the TB. If this is not
3283 the first instruction in a TB then re-execute the preceding
3285 #if defined(TARGET_MIPS)
3286 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3287 env
->active_tc
.PC
-= 4;
3288 env
->icount_decr
.u16
.low
++;
3289 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3291 #elif defined(TARGET_SH4)
3292 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3295 env
->icount_decr
.u16
.low
++;
3296 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3299 /* This should never happen. */
3300 if (n
> CF_COUNT_MASK
)
3301 cpu_abort(env
, "TB too big during recompile");
3303 cflags
= n
| CF_LAST_IO
;
3305 cs_base
= tb
->cs_base
;
3307 tb_phys_invalidate(tb
, -1);
3308 /* FIXME: In theory this could raise an exception. In practice
3309 we have already translated the block once so it's probably ok. */
3310 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3311 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3312 the first in the TB) then we end up generating a whole new TB and
3313 repeating the fault, which is horribly inefficient.
3314 Better would be to execute just this insn uncached, or generate a
3316 cpu_resume_from_signal(env
, NULL
);
3319 void dump_exec_info(FILE *f
,
3320 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3322 int i
, target_code_size
, max_target_code_size
;
3323 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3324 TranslationBlock
*tb
;
3326 target_code_size
= 0;
3327 max_target_code_size
= 0;
3329 direct_jmp_count
= 0;
3330 direct_jmp2_count
= 0;
3331 for(i
= 0; i
< nb_tbs
; i
++) {
3333 target_code_size
+= tb
->size
;
3334 if (tb
->size
> max_target_code_size
)
3335 max_target_code_size
= tb
->size
;
3336 if (tb
->page_addr
[1] != -1)
3338 if (tb
->tb_next_offset
[0] != 0xffff) {
3340 if (tb
->tb_next_offset
[1] != 0xffff) {
3341 direct_jmp2_count
++;
3345 /* XXX: avoid using doubles ? */
3346 cpu_fprintf(f
, "Translation buffer state:\n");
3347 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3348 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3349 cpu_fprintf(f
, "TB count %d/%d\n",
3350 nb_tbs
, code_gen_max_blocks
);
3351 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3352 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3353 max_target_code_size
);
3354 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3355 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3356 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3357 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3359 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3360 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3362 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3364 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3365 cpu_fprintf(f
, "\nStatistics:\n");
3366 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3367 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3368 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3369 tcg_dump_info(f
, cpu_fprintf
);
3372 #if !defined(CONFIG_USER_ONLY)
3374 #define MMUSUFFIX _cmmu
3375 #define GETPC() NULL
3376 #define env cpu_single_env
3377 #define SOFTMMU_CODE_ACCESS
3380 #include "softmmu_template.h"
3383 #include "softmmu_template.h"
3386 #include "softmmu_template.h"
3389 #include "softmmu_template.h"