2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
43 #if defined(CONFIG_USER_ONLY)
47 //#define DEBUG_TB_INVALIDATE
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 static TranslationBlock
*tbs
;
88 int code_gen_max_blocks
;
89 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
102 #define code_gen_section \
103 __attribute__((aligned (32)))
106 uint8_t code_gen_prologue
[1024] code_gen_section
;
107 static uint8_t *code_gen_buffer
;
108 static unsigned long code_gen_buffer_size
;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size
;
111 uint8_t *code_gen_ptr
;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size
;
116 uint8_t *phys_ram_base
;
117 uint8_t *phys_ram_dirty
;
118 static int in_migration
;
119 static ram_addr_t phys_ram_alloc_offset
= 0;
123 /* current CPU in the current thread. It is only valid inside
125 CPUState
*cpu_single_env
;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 typedef struct PhysPageDesc
{
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset
;
149 ram_addr_t region_offset
;
153 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154 /* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
158 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
160 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
163 #define L1_SIZE (1 << L1_BITS)
164 #define L2_SIZE (1 << L2_BITS)
166 unsigned long qemu_real_host_page_size
;
167 unsigned long qemu_host_page_bits
;
168 unsigned long qemu_host_page_size
;
169 unsigned long qemu_host_page_mask
;
171 /* XXX: for system emulation, it could just be an array */
172 static PageDesc
*l1_map
[L1_SIZE
];
173 static PhysPageDesc
**l1_phys_map
;
175 #if !defined(CONFIG_USER_ONLY)
176 static void io_mem_init(void);
178 /* io memory support */
179 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
180 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
181 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
182 static int io_mem_nb
;
183 static int io_mem_watch
;
187 static const char *logfilename
= "/tmp/qemu.log";
190 static int log_append
= 0;
193 static int tlb_flush_count
;
194 static int tb_flush_count
;
195 static int tb_phys_invalidate_count
;
197 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198 typedef struct subpage_t
{
199 target_phys_addr_t base
;
200 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
201 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
202 void *opaque
[TARGET_PAGE_SIZE
][2][4];
203 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
207 static void map_exec(void *addr
, long size
)
210 VirtualProtect(addr
, size
,
211 PAGE_EXECUTE_READWRITE
, &old_protect
);
215 static void map_exec(void *addr
, long size
)
217 unsigned long start
, end
, page_size
;
219 page_size
= getpagesize();
220 start
= (unsigned long)addr
;
221 start
&= ~(page_size
- 1);
223 end
= (unsigned long)addr
+ size
;
224 end
+= page_size
- 1;
225 end
&= ~(page_size
- 1);
227 mprotect((void *)start
, end
- start
,
228 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
232 static void page_init(void)
234 /* NOTE: we can always suppose that qemu_host_page_size >=
238 SYSTEM_INFO system_info
;
240 GetSystemInfo(&system_info
);
241 qemu_real_host_page_size
= system_info
.dwPageSize
;
244 qemu_real_host_page_size
= getpagesize();
246 if (qemu_host_page_size
== 0)
247 qemu_host_page_size
= qemu_real_host_page_size
;
248 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
249 qemu_host_page_size
= TARGET_PAGE_SIZE
;
250 qemu_host_page_bits
= 0;
251 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
252 qemu_host_page_bits
++;
253 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
254 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
255 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
257 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
259 long long startaddr
, endaddr
;
264 last_brk
= (unsigned long)sbrk(0);
265 f
= fopen("/proc/self/maps", "r");
268 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
270 startaddr
= MIN(startaddr
,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
272 endaddr
= MIN(endaddr
,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
274 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
275 TARGET_PAGE_ALIGN(endaddr
),
286 static inline PageDesc
**page_l1_map(target_ulong index
)
288 #if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
291 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
294 return &l1_map
[index
>> L2_BITS
];
297 static inline PageDesc
*page_find_alloc(target_ulong index
)
300 lp
= page_l1_map(index
);
306 /* allocate if not found */
307 #if defined(CONFIG_USER_ONLY)
309 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
310 /* Don't use qemu_malloc because it may recurse. */
311 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
312 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
315 if (addr
== (target_ulong
)addr
) {
316 page_set_flags(addr
& TARGET_PAGE_MASK
,
317 TARGET_PAGE_ALIGN(addr
+ len
),
321 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
325 return p
+ (index
& (L2_SIZE
- 1));
328 static inline PageDesc
*page_find(target_ulong index
)
331 lp
= page_l1_map(index
);
338 return p
+ (index
& (L2_SIZE
- 1));
341 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
346 p
= (void **)l1_phys_map
;
347 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
349 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
350 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
352 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
355 /* allocate if not found */
358 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
359 memset(p
, 0, sizeof(void *) * L1_SIZE
);
363 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
367 /* allocate if not found */
370 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
372 for (i
= 0; i
< L2_SIZE
; i
++)
373 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
375 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
378 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
380 return phys_page_find_alloc(index
, 0);
383 #if !defined(CONFIG_USER_ONLY)
384 static void tlb_protect_code(ram_addr_t ram_addr
);
385 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
387 #define mmap_lock() do { } while(0)
388 #define mmap_unlock() do { } while(0)
391 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
393 #if defined(CONFIG_USER_ONLY)
394 /* Currently it is not recommanded to allocate big chunks of data in
395 user mode. It will change when a dedicated libc will be used */
396 #define USE_STATIC_CODE_GEN_BUFFER
399 #ifdef USE_STATIC_CODE_GEN_BUFFER
400 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
403 static void code_gen_alloc(unsigned long tb_size
)
405 #ifdef USE_STATIC_CODE_GEN_BUFFER
406 code_gen_buffer
= static_code_gen_buffer
;
407 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
408 map_exec(code_gen_buffer
, code_gen_buffer_size
);
410 code_gen_buffer_size
= tb_size
;
411 if (code_gen_buffer_size
== 0) {
412 #if defined(CONFIG_USER_ONLY)
413 /* in user mode, phys_ram_size is not meaningful */
414 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
416 /* XXX: needs ajustments */
417 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
420 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
421 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
422 /* The code gen buffer location may have constraints depending on
423 the host cpu and OS */
424 #if defined(__linux__)
429 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
430 #if defined(__x86_64__)
432 /* Cannot map more than that */
433 if (code_gen_buffer_size
> (800 * 1024 * 1024))
434 code_gen_buffer_size
= (800 * 1024 * 1024);
435 #elif defined(__sparc_v9__)
436 // Map the buffer below 2G, so we can use direct calls and branches
438 start
= (void *) 0x60000000UL
;
439 if (code_gen_buffer_size
> (512 * 1024 * 1024))
440 code_gen_buffer_size
= (512 * 1024 * 1024);
441 #elif defined(__arm__)
442 /* Map the buffer below 32M, so we can use direct calls and branches */
444 start
= (void *) 0x01000000UL
;
445 if (code_gen_buffer_size
> 16 * 1024 * 1024)
446 code_gen_buffer_size
= 16 * 1024 * 1024;
448 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
449 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
451 if (code_gen_buffer
== MAP_FAILED
) {
452 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
456 #elif defined(__FreeBSD__)
460 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
461 #if defined(__x86_64__)
462 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
463 * 0x40000000 is free */
465 addr
= (void *)0x40000000;
466 /* Cannot map more than that */
467 if (code_gen_buffer_size
> (800 * 1024 * 1024))
468 code_gen_buffer_size
= (800 * 1024 * 1024);
470 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
471 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
473 if (code_gen_buffer
== MAP_FAILED
) {
474 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
479 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
480 if (!code_gen_buffer
) {
481 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
484 map_exec(code_gen_buffer
, code_gen_buffer_size
);
486 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
487 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
488 code_gen_buffer_max_size
= code_gen_buffer_size
-
489 code_gen_max_block_size();
490 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
491 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
494 /* Must be called before using the QEMU cpus. 'tb_size' is the size
495 (in bytes) allocated to the translation buffer. Zero means default
497 void cpu_exec_init_all(unsigned long tb_size
)
500 code_gen_alloc(tb_size
);
501 code_gen_ptr
= code_gen_buffer
;
503 #if !defined(CONFIG_USER_ONLY)
508 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
510 #define CPU_COMMON_SAVE_VERSION 1
512 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
514 CPUState
*env
= opaque
;
516 qemu_put_be32s(f
, &env
->halted
);
517 qemu_put_be32s(f
, &env
->interrupt_request
);
520 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
522 CPUState
*env
= opaque
;
524 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
527 qemu_get_be32s(f
, &env
->halted
);
528 qemu_get_be32s(f
, &env
->interrupt_request
);
535 void cpu_exec_init(CPUState
*env
)
540 env
->next_cpu
= NULL
;
543 while (*penv
!= NULL
) {
544 penv
= (CPUState
**)&(*penv
)->next_cpu
;
547 env
->cpu_index
= cpu_index
;
548 TAILQ_INIT(&env
->breakpoints
);
549 TAILQ_INIT(&env
->watchpoints
);
551 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
552 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
553 cpu_common_save
, cpu_common_load
, env
);
554 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
555 cpu_save
, cpu_load
, env
);
559 static inline void invalidate_page_bitmap(PageDesc
*p
)
561 if (p
->code_bitmap
) {
562 qemu_free(p
->code_bitmap
);
563 p
->code_bitmap
= NULL
;
565 p
->code_write_count
= 0;
568 /* set to NULL all the 'first_tb' fields in all PageDescs */
569 static void page_flush_tb(void)
574 for(i
= 0; i
< L1_SIZE
; i
++) {
577 for(j
= 0; j
< L2_SIZE
; j
++) {
579 invalidate_page_bitmap(p
);
586 /* flush all the translation blocks */
587 /* XXX: tb_flush is currently not thread safe */
588 void tb_flush(CPUState
*env1
)
591 #if defined(DEBUG_FLUSH)
592 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
593 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
595 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
597 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
598 cpu_abort(env1
, "Internal error: code buffer overflow\n");
602 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
603 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
606 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
609 code_gen_ptr
= code_gen_buffer
;
610 /* XXX: flush processor icache at this point if cache flush is
615 #ifdef DEBUG_TB_CHECK
617 static void tb_invalidate_check(target_ulong address
)
619 TranslationBlock
*tb
;
621 address
&= TARGET_PAGE_MASK
;
622 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
623 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
624 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
625 address
>= tb
->pc
+ tb
->size
)) {
626 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
627 address
, (long)tb
->pc
, tb
->size
);
633 /* verify that all the pages have correct rights for code */
634 static void tb_page_check(void)
636 TranslationBlock
*tb
;
637 int i
, flags1
, flags2
;
639 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
640 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
641 flags1
= page_get_flags(tb
->pc
);
642 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
643 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
644 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
645 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
651 static void tb_jmp_check(TranslationBlock
*tb
)
653 TranslationBlock
*tb1
;
656 /* suppress any remaining jumps to this TB */
660 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
663 tb1
= tb1
->jmp_next
[n1
];
665 /* check end of list */
667 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
673 /* invalidate one TB */
674 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
677 TranslationBlock
*tb1
;
681 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
684 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
688 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
690 TranslationBlock
*tb1
;
696 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
698 *ptb
= tb1
->page_next
[n1
];
701 ptb
= &tb1
->page_next
[n1
];
705 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
707 TranslationBlock
*tb1
, **ptb
;
710 ptb
= &tb
->jmp_next
[n
];
713 /* find tb(n) in circular list */
717 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
718 if (n1
== n
&& tb1
== tb
)
721 ptb
= &tb1
->jmp_first
;
723 ptb
= &tb1
->jmp_next
[n1
];
726 /* now we can suppress tb(n) from the list */
727 *ptb
= tb
->jmp_next
[n
];
729 tb
->jmp_next
[n
] = NULL
;
733 /* reset the jump entry 'n' of a TB so that it is not chained to
735 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
737 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
740 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
745 target_phys_addr_t phys_pc
;
746 TranslationBlock
*tb1
, *tb2
;
748 /* remove the TB from the hash list */
749 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
750 h
= tb_phys_hash_func(phys_pc
);
751 tb_remove(&tb_phys_hash
[h
], tb
,
752 offsetof(TranslationBlock
, phys_hash_next
));
754 /* remove the TB from the page list */
755 if (tb
->page_addr
[0] != page_addr
) {
756 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
757 tb_page_remove(&p
->first_tb
, tb
);
758 invalidate_page_bitmap(p
);
760 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
761 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
762 tb_page_remove(&p
->first_tb
, tb
);
763 invalidate_page_bitmap(p
);
766 tb_invalidated_flag
= 1;
768 /* remove the TB from the hash list */
769 h
= tb_jmp_cache_hash_func(tb
->pc
);
770 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
771 if (env
->tb_jmp_cache
[h
] == tb
)
772 env
->tb_jmp_cache
[h
] = NULL
;
775 /* suppress this TB from the two jump lists */
776 tb_jmp_remove(tb
, 0);
777 tb_jmp_remove(tb
, 1);
779 /* suppress any remaining jumps to this TB */
785 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
786 tb2
= tb1
->jmp_next
[n1
];
787 tb_reset_jump(tb1
, n1
);
788 tb1
->jmp_next
[n1
] = NULL
;
791 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
793 tb_phys_invalidate_count
++;
796 static inline void set_bits(uint8_t *tab
, int start
, int len
)
802 mask
= 0xff << (start
& 7);
803 if ((start
& ~7) == (end
& ~7)) {
805 mask
&= ~(0xff << (end
& 7));
810 start
= (start
+ 8) & ~7;
812 while (start
< end1
) {
817 mask
= ~(0xff << (end
& 7));
823 static void build_page_bitmap(PageDesc
*p
)
825 int n
, tb_start
, tb_end
;
826 TranslationBlock
*tb
;
828 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
835 tb
= (TranslationBlock
*)((long)tb
& ~3);
836 /* NOTE: this is subtle as a TB may span two physical pages */
838 /* NOTE: tb_end may be after the end of the page, but
839 it is not a problem */
840 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
841 tb_end
= tb_start
+ tb
->size
;
842 if (tb_end
> TARGET_PAGE_SIZE
)
843 tb_end
= TARGET_PAGE_SIZE
;
846 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
848 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
849 tb
= tb
->page_next
[n
];
853 TranslationBlock
*tb_gen_code(CPUState
*env
,
854 target_ulong pc
, target_ulong cs_base
,
855 int flags
, int cflags
)
857 TranslationBlock
*tb
;
859 target_ulong phys_pc
, phys_page2
, virt_page2
;
862 phys_pc
= get_phys_addr_code(env
, pc
);
865 /* flush must be done */
867 /* cannot fail at this point */
869 /* Don't forget to invalidate previous TB info. */
870 tb_invalidated_flag
= 1;
872 tc_ptr
= code_gen_ptr
;
874 tb
->cs_base
= cs_base
;
877 cpu_gen_code(env
, tb
, &code_gen_size
);
878 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
880 /* check next page if needed */
881 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
883 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
884 phys_page2
= get_phys_addr_code(env
, virt_page2
);
886 tb_link_phys(tb
, phys_pc
, phys_page2
);
890 /* invalidate all TBs which intersect with the target physical page
891 starting in range [start;end[. NOTE: start and end must refer to
892 the same physical page. 'is_cpu_write_access' should be true if called
893 from a real cpu write access: the virtual CPU will exit the current
894 TB if code is modified inside this TB. */
895 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
896 int is_cpu_write_access
)
898 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
899 CPUState
*env
= cpu_single_env
;
900 target_ulong tb_start
, tb_end
;
903 #ifdef TARGET_HAS_PRECISE_SMC
904 int current_tb_not_found
= is_cpu_write_access
;
905 TranslationBlock
*current_tb
= NULL
;
906 int current_tb_modified
= 0;
907 target_ulong current_pc
= 0;
908 target_ulong current_cs_base
= 0;
909 int current_flags
= 0;
910 #endif /* TARGET_HAS_PRECISE_SMC */
912 p
= page_find(start
>> TARGET_PAGE_BITS
);
915 if (!p
->code_bitmap
&&
916 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
917 is_cpu_write_access
) {
918 /* build code bitmap */
919 build_page_bitmap(p
);
922 /* we remove all the TBs in the range [start, end[ */
923 /* XXX: see if in some cases it could be faster to invalidate all the code */
927 tb
= (TranslationBlock
*)((long)tb
& ~3);
928 tb_next
= tb
->page_next
[n
];
929 /* NOTE: this is subtle as a TB may span two physical pages */
931 /* NOTE: tb_end may be after the end of the page, but
932 it is not a problem */
933 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
934 tb_end
= tb_start
+ tb
->size
;
936 tb_start
= tb
->page_addr
[1];
937 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
939 if (!(tb_end
<= start
|| tb_start
>= end
)) {
940 #ifdef TARGET_HAS_PRECISE_SMC
941 if (current_tb_not_found
) {
942 current_tb_not_found
= 0;
944 if (env
->mem_io_pc
) {
945 /* now we have a real cpu fault */
946 current_tb
= tb_find_pc(env
->mem_io_pc
);
949 if (current_tb
== tb
&&
950 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
951 /* If we are modifying the current TB, we must stop
952 its execution. We could be more precise by checking
953 that the modification is after the current PC, but it
954 would require a specialized function to partially
955 restore the CPU state */
957 current_tb_modified
= 1;
958 cpu_restore_state(current_tb
, env
,
959 env
->mem_io_pc
, NULL
);
960 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
963 #endif /* TARGET_HAS_PRECISE_SMC */
964 /* we need to do that to handle the case where a signal
965 occurs while doing tb_phys_invalidate() */
968 saved_tb
= env
->current_tb
;
969 env
->current_tb
= NULL
;
971 tb_phys_invalidate(tb
, -1);
973 env
->current_tb
= saved_tb
;
974 if (env
->interrupt_request
&& env
->current_tb
)
975 cpu_interrupt(env
, env
->interrupt_request
);
980 #if !defined(CONFIG_USER_ONLY)
981 /* if no code remaining, no need to continue to use slow writes */
983 invalidate_page_bitmap(p
);
984 if (is_cpu_write_access
) {
985 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
989 #ifdef TARGET_HAS_PRECISE_SMC
990 if (current_tb_modified
) {
991 /* we generate a block containing just the instruction
992 modifying the memory. It will ensure that it cannot modify
994 env
->current_tb
= NULL
;
995 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
996 cpu_resume_from_signal(env
, NULL
);
1001 /* len must be <= 8 and start must be a multiple of len */
1002 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1009 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1010 cpu_single_env
->mem_io_vaddr
, len
,
1011 cpu_single_env
->eip
,
1012 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1016 p
= page_find(start
>> TARGET_PAGE_BITS
);
1019 if (p
->code_bitmap
) {
1020 offset
= start
& ~TARGET_PAGE_MASK
;
1021 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1022 if (b
& ((1 << len
) - 1))
1026 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1030 #if !defined(CONFIG_SOFTMMU)
1031 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1032 unsigned long pc
, void *puc
)
1034 TranslationBlock
*tb
;
1037 #ifdef TARGET_HAS_PRECISE_SMC
1038 TranslationBlock
*current_tb
= NULL
;
1039 CPUState
*env
= cpu_single_env
;
1040 int current_tb_modified
= 0;
1041 target_ulong current_pc
= 0;
1042 target_ulong current_cs_base
= 0;
1043 int current_flags
= 0;
1046 addr
&= TARGET_PAGE_MASK
;
1047 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1051 #ifdef TARGET_HAS_PRECISE_SMC
1052 if (tb
&& pc
!= 0) {
1053 current_tb
= tb_find_pc(pc
);
1056 while (tb
!= NULL
) {
1058 tb
= (TranslationBlock
*)((long)tb
& ~3);
1059 #ifdef TARGET_HAS_PRECISE_SMC
1060 if (current_tb
== tb
&&
1061 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1062 /* If we are modifying the current TB, we must stop
1063 its execution. We could be more precise by checking
1064 that the modification is after the current PC, but it
1065 would require a specialized function to partially
1066 restore the CPU state */
1068 current_tb_modified
= 1;
1069 cpu_restore_state(current_tb
, env
, pc
, puc
);
1070 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1073 #endif /* TARGET_HAS_PRECISE_SMC */
1074 tb_phys_invalidate(tb
, addr
);
1075 tb
= tb
->page_next
[n
];
1078 #ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_modified
) {
1080 /* we generate a block containing just the instruction
1081 modifying the memory. It will ensure that it cannot modify
1083 env
->current_tb
= NULL
;
1084 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1085 cpu_resume_from_signal(env
, puc
);
1091 /* add the tb in the target page and protect it if necessary */
1092 static inline void tb_alloc_page(TranslationBlock
*tb
,
1093 unsigned int n
, target_ulong page_addr
)
1096 TranslationBlock
*last_first_tb
;
1098 tb
->page_addr
[n
] = page_addr
;
1099 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1100 tb
->page_next
[n
] = p
->first_tb
;
1101 last_first_tb
= p
->first_tb
;
1102 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1103 invalidate_page_bitmap(p
);
1105 #if defined(TARGET_HAS_SMC) || 1
1107 #if defined(CONFIG_USER_ONLY)
1108 if (p
->flags
& PAGE_WRITE
) {
1113 /* force the host page as non writable (writes will have a
1114 page fault + mprotect overhead) */
1115 page_addr
&= qemu_host_page_mask
;
1117 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1118 addr
+= TARGET_PAGE_SIZE
) {
1120 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1124 p2
->flags
&= ~PAGE_WRITE
;
1125 page_get_flags(addr
);
1127 mprotect(g2h(page_addr
), qemu_host_page_size
,
1128 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1129 #ifdef DEBUG_TB_INVALIDATE
1130 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1135 /* if some code is already present, then the pages are already
1136 protected. So we handle the case where only the first TB is
1137 allocated in a physical page */
1138 if (!last_first_tb
) {
1139 tlb_protect_code(page_addr
);
1143 #endif /* TARGET_HAS_SMC */
1146 /* Allocate a new translation block. Flush the translation buffer if
1147 too many translation blocks or too much generated code. */
1148 TranslationBlock
*tb_alloc(target_ulong pc
)
1150 TranslationBlock
*tb
;
1152 if (nb_tbs
>= code_gen_max_blocks
||
1153 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1155 tb
= &tbs
[nb_tbs
++];
1161 void tb_free(TranslationBlock
*tb
)
1163 /* In practice this is mostly used for single use temporary TB
1164 Ignore the hard cases and just back up if this TB happens to
1165 be the last one generated. */
1166 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1167 code_gen_ptr
= tb
->tc_ptr
;
1172 /* add a new TB and link it to the physical page tables. phys_page2 is
1173 (-1) to indicate that only one page contains the TB. */
1174 void tb_link_phys(TranslationBlock
*tb
,
1175 target_ulong phys_pc
, target_ulong phys_page2
)
1178 TranslationBlock
**ptb
;
1180 /* Grab the mmap lock to stop another thread invalidating this TB
1181 before we are done. */
1183 /* add in the physical hash table */
1184 h
= tb_phys_hash_func(phys_pc
);
1185 ptb
= &tb_phys_hash
[h
];
1186 tb
->phys_hash_next
= *ptb
;
1189 /* add in the page list */
1190 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1191 if (phys_page2
!= -1)
1192 tb_alloc_page(tb
, 1, phys_page2
);
1194 tb
->page_addr
[1] = -1;
1196 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1197 tb
->jmp_next
[0] = NULL
;
1198 tb
->jmp_next
[1] = NULL
;
1200 /* init original jump addresses */
1201 if (tb
->tb_next_offset
[0] != 0xffff)
1202 tb_reset_jump(tb
, 0);
1203 if (tb
->tb_next_offset
[1] != 0xffff)
1204 tb_reset_jump(tb
, 1);
1206 #ifdef DEBUG_TB_CHECK
1212 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1213 tb[1].tc_ptr. Return NULL if not found */
1214 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1216 int m_min
, m_max
, m
;
1218 TranslationBlock
*tb
;
1222 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1223 tc_ptr
>= (unsigned long)code_gen_ptr
)
1225 /* binary search (cf Knuth) */
1228 while (m_min
<= m_max
) {
1229 m
= (m_min
+ m_max
) >> 1;
1231 v
= (unsigned long)tb
->tc_ptr
;
1234 else if (tc_ptr
< v
) {
1243 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1245 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1247 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1250 tb1
= tb
->jmp_next
[n
];
1252 /* find head of list */
1255 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1258 tb1
= tb1
->jmp_next
[n1
];
1260 /* we are now sure now that tb jumps to tb1 */
1263 /* remove tb from the jmp_first list */
1264 ptb
= &tb_next
->jmp_first
;
1268 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1269 if (n1
== n
&& tb1
== tb
)
1271 ptb
= &tb1
->jmp_next
[n1
];
1273 *ptb
= tb
->jmp_next
[n
];
1274 tb
->jmp_next
[n
] = NULL
;
1276 /* suppress the jump to next tb in generated code */
1277 tb_reset_jump(tb
, n
);
1279 /* suppress jumps in the tb on which we could have jumped */
1280 tb_reset_jump_recursive(tb_next
);
1284 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1286 tb_reset_jump_recursive2(tb
, 0);
1287 tb_reset_jump_recursive2(tb
, 1);
1290 #if defined(TARGET_HAS_ICE)
1291 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1293 target_phys_addr_t addr
;
1295 ram_addr_t ram_addr
;
1298 addr
= cpu_get_phys_page_debug(env
, pc
);
1299 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1301 pd
= IO_MEM_UNASSIGNED
;
1303 pd
= p
->phys_offset
;
1305 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1306 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1310 /* Add a watchpoint. */
1311 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1312 int flags
, CPUWatchpoint
**watchpoint
)
1314 target_ulong len_mask
= ~(len
- 1);
1317 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1318 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1319 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1320 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1323 wp
= qemu_malloc(sizeof(*wp
));
1328 wp
->len_mask
= len_mask
;
1331 /* keep all GDB-injected watchpoints in front */
1333 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1335 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1337 tlb_flush_page(env
, addr
);
1344 /* Remove a specific watchpoint. */
1345 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1348 target_ulong len_mask
= ~(len
- 1);
1351 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1352 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1353 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1354 cpu_watchpoint_remove_by_ref(env
, wp
);
1361 /* Remove a specific watchpoint by reference. */
1362 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1364 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1366 tlb_flush_page(env
, watchpoint
->vaddr
);
1368 qemu_free(watchpoint
);
1371 /* Remove all matching watchpoints. */
1372 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1374 CPUWatchpoint
*wp
, *next
;
1376 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1377 if (wp
->flags
& mask
)
1378 cpu_watchpoint_remove_by_ref(env
, wp
);
1382 /* Add a breakpoint. */
1383 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1384 CPUBreakpoint
**breakpoint
)
1386 #if defined(TARGET_HAS_ICE)
1389 bp
= qemu_malloc(sizeof(*bp
));
1396 /* keep all GDB-injected breakpoints in front */
1398 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1400 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1402 breakpoint_invalidate(env
, pc
);
1412 /* Remove a specific breakpoint. */
1413 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1415 #if defined(TARGET_HAS_ICE)
1418 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1419 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1420 cpu_breakpoint_remove_by_ref(env
, bp
);
1430 /* Remove a specific breakpoint by reference. */
1431 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1433 #if defined(TARGET_HAS_ICE)
1434 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1436 breakpoint_invalidate(env
, breakpoint
->pc
);
1438 qemu_free(breakpoint
);
1442 /* Remove all matching breakpoints. */
1443 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1445 #if defined(TARGET_HAS_ICE)
1446 CPUBreakpoint
*bp
, *next
;
1448 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1449 if (bp
->flags
& mask
)
1450 cpu_breakpoint_remove_by_ref(env
, bp
);
1455 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1456 CPU loop after each instruction */
1457 void cpu_single_step(CPUState
*env
, int enabled
)
1459 #if defined(TARGET_HAS_ICE)
1460 if (env
->singlestep_enabled
!= enabled
) {
1461 env
->singlestep_enabled
= enabled
;
1462 /* must flush all the translated code to avoid inconsistancies */
1463 /* XXX: only flush what is necessary */
1469 /* enable or disable low levels log */
1470 void cpu_set_log(int log_flags
)
1472 loglevel
= log_flags
;
1473 if (loglevel
&& !logfile
) {
1474 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1476 perror(logfilename
);
1479 #if !defined(CONFIG_SOFTMMU)
1480 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1482 static char logfile_buf
[4096];
1483 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1486 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1490 if (!loglevel
&& logfile
) {
1496 void cpu_set_log_filename(const char *filename
)
1498 logfilename
= strdup(filename
);
1503 cpu_set_log(loglevel
);
1506 /* mask must never be zero, except for A20 change call */
1507 void cpu_interrupt(CPUState
*env
, int mask
)
1509 #if !defined(USE_NPTL)
1510 TranslationBlock
*tb
;
1511 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1515 old_mask
= env
->interrupt_request
;
1516 /* FIXME: This is probably not threadsafe. A different thread could
1517 be in the middle of a read-modify-write operation. */
1518 env
->interrupt_request
|= mask
;
1519 #if defined(USE_NPTL)
1520 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1521 problem and hope the cpu will stop of its own accord. For userspace
1522 emulation this often isn't actually as bad as it sounds. Often
1523 signals are used primarily to interrupt blocking syscalls. */
1526 env
->icount_decr
.u16
.high
= 0xffff;
1527 #ifndef CONFIG_USER_ONLY
1528 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1529 an async event happened and we need to process it. */
1531 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1532 cpu_abort(env
, "Raised interrupt while not in I/O function");
1536 tb
= env
->current_tb
;
1537 /* if the cpu is currently executing code, we must unlink it and
1538 all the potentially executing TB */
1539 if (tb
&& !testandset(&interrupt_lock
)) {
1540 env
->current_tb
= NULL
;
1541 tb_reset_jump_recursive(tb
);
1542 resetlock(&interrupt_lock
);
1548 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1550 env
->interrupt_request
&= ~mask
;
1553 const CPULogItem cpu_log_items
[] = {
1554 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1555 "show generated host assembly code for each compiled TB" },
1556 { CPU_LOG_TB_IN_ASM
, "in_asm",
1557 "show target assembly code for each compiled TB" },
1558 { CPU_LOG_TB_OP
, "op",
1559 "show micro ops for each compiled TB" },
1560 { CPU_LOG_TB_OP_OPT
, "op_opt",
1563 "before eflags optimization and "
1565 "after liveness analysis" },
1566 { CPU_LOG_INT
, "int",
1567 "show interrupts/exceptions in short format" },
1568 { CPU_LOG_EXEC
, "exec",
1569 "show trace before each executed TB (lots of logs)" },
1570 { CPU_LOG_TB_CPU
, "cpu",
1571 "show CPU state before block translation" },
1573 { CPU_LOG_PCALL
, "pcall",
1574 "show protected mode far calls/returns/exceptions" },
1577 { CPU_LOG_IOPORT
, "ioport",
1578 "show all i/o ports accesses" },
1583 static int cmp1(const char *s1
, int n
, const char *s2
)
1585 if (strlen(s2
) != n
)
1587 return memcmp(s1
, s2
, n
) == 0;
1590 /* takes a comma separated list of log masks. Return 0 if error. */
1591 int cpu_str_to_log_mask(const char *str
)
1593 const CPULogItem
*item
;
1600 p1
= strchr(p
, ',');
1603 if(cmp1(p
,p1
-p
,"all")) {
1604 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1608 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1609 if (cmp1(p
, p1
- p
, item
->name
))
1623 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1630 fprintf(stderr
, "qemu: fatal: ");
1631 vfprintf(stderr
, fmt
, ap
);
1632 fprintf(stderr
, "\n");
1634 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1636 cpu_dump_state(env
, stderr
, fprintf
, 0);
1639 fprintf(logfile
, "qemu: fatal: ");
1640 vfprintf(logfile
, fmt
, ap2
);
1641 fprintf(logfile
, "\n");
1643 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1645 cpu_dump_state(env
, logfile
, fprintf
, 0);
1655 CPUState
*cpu_copy(CPUState
*env
)
1657 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1658 /* preserve chaining and index */
1659 CPUState
*next_cpu
= new_env
->next_cpu
;
1660 int cpu_index
= new_env
->cpu_index
;
1661 memcpy(new_env
, env
, sizeof(CPUState
));
1662 new_env
->next_cpu
= next_cpu
;
1663 new_env
->cpu_index
= cpu_index
;
1667 #if !defined(CONFIG_USER_ONLY)
1669 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1673 /* Discard jump cache entries for any tb which might potentially
1674 overlap the flushed page. */
1675 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1676 memset (&env
->tb_jmp_cache
[i
], 0,
1677 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1679 i
= tb_jmp_cache_hash_page(addr
);
1680 memset (&env
->tb_jmp_cache
[i
], 0,
1681 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1684 /* NOTE: if flush_global is true, also flush global entries (not
1686 void tlb_flush(CPUState
*env
, int flush_global
)
1690 #if defined(DEBUG_TLB)
1691 printf("tlb_flush:\n");
1693 /* must reset current TB so that interrupts cannot modify the
1694 links while we are modifying them */
1695 env
->current_tb
= NULL
;
1697 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1698 env
->tlb_table
[0][i
].addr_read
= -1;
1699 env
->tlb_table
[0][i
].addr_write
= -1;
1700 env
->tlb_table
[0][i
].addr_code
= -1;
1701 env
->tlb_table
[1][i
].addr_read
= -1;
1702 env
->tlb_table
[1][i
].addr_write
= -1;
1703 env
->tlb_table
[1][i
].addr_code
= -1;
1704 #if (NB_MMU_MODES >= 3)
1705 env
->tlb_table
[2][i
].addr_read
= -1;
1706 env
->tlb_table
[2][i
].addr_write
= -1;
1707 env
->tlb_table
[2][i
].addr_code
= -1;
1708 #if (NB_MMU_MODES == 4)
1709 env
->tlb_table
[3][i
].addr_read
= -1;
1710 env
->tlb_table
[3][i
].addr_write
= -1;
1711 env
->tlb_table
[3][i
].addr_code
= -1;
1716 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1719 if (env
->kqemu_enabled
) {
1720 kqemu_flush(env
, flush_global
);
1726 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1728 if (addr
== (tlb_entry
->addr_read
&
1729 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1730 addr
== (tlb_entry
->addr_write
&
1731 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1732 addr
== (tlb_entry
->addr_code
&
1733 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1734 tlb_entry
->addr_read
= -1;
1735 tlb_entry
->addr_write
= -1;
1736 tlb_entry
->addr_code
= -1;
1740 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1744 #if defined(DEBUG_TLB)
1745 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1747 /* must reset current TB so that interrupts cannot modify the
1748 links while we are modifying them */
1749 env
->current_tb
= NULL
;
1751 addr
&= TARGET_PAGE_MASK
;
1752 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1753 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1754 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1755 #if (NB_MMU_MODES >= 3)
1756 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1757 #if (NB_MMU_MODES == 4)
1758 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1762 tlb_flush_jmp_cache(env
, addr
);
1765 if (env
->kqemu_enabled
) {
1766 kqemu_flush_page(env
, addr
);
1771 /* update the TLBs so that writes to code in the virtual page 'addr'
1773 static void tlb_protect_code(ram_addr_t ram_addr
)
1775 cpu_physical_memory_reset_dirty(ram_addr
,
1776 ram_addr
+ TARGET_PAGE_SIZE
,
1780 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1781 tested for self modifying code */
1782 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1785 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1788 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1789 unsigned long start
, unsigned long length
)
1792 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1793 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1794 if ((addr
- start
) < length
) {
1795 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1800 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1804 unsigned long length
, start1
;
1808 start
&= TARGET_PAGE_MASK
;
1809 end
= TARGET_PAGE_ALIGN(end
);
1811 length
= end
- start
;
1814 len
= length
>> TARGET_PAGE_BITS
;
1816 /* XXX: should not depend on cpu context */
1818 if (env
->kqemu_enabled
) {
1821 for(i
= 0; i
< len
; i
++) {
1822 kqemu_set_notdirty(env
, addr
);
1823 addr
+= TARGET_PAGE_SIZE
;
1827 mask
= ~dirty_flags
;
1828 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1829 for(i
= 0; i
< len
; i
++)
1832 /* we modify the TLB cache so that the dirty bit will be set again
1833 when accessing the range */
1834 start1
= start
+ (unsigned long)phys_ram_base
;
1835 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1836 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1837 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1838 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1839 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1840 #if (NB_MMU_MODES >= 3)
1841 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1842 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1843 #if (NB_MMU_MODES == 4)
1844 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1845 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1851 int cpu_physical_memory_set_dirty_tracking(int enable
)
1853 in_migration
= enable
;
1857 int cpu_physical_memory_get_dirty_tracking(void)
1859 return in_migration
;
1862 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1865 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1868 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1870 ram_addr_t ram_addr
;
1872 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1873 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1874 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1875 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1876 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1881 /* update the TLB according to the current state of the dirty bits */
1882 void cpu_tlb_update_dirty(CPUState
*env
)
1885 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1886 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1887 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1888 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1889 #if (NB_MMU_MODES >= 3)
1890 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1891 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1892 #if (NB_MMU_MODES == 4)
1893 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1894 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1899 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1901 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1902 tlb_entry
->addr_write
= vaddr
;
1905 /* update the TLB corresponding to virtual page vaddr
1906 so that it is no longer dirty */
1907 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1911 vaddr
&= TARGET_PAGE_MASK
;
1912 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1913 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1914 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1915 #if (NB_MMU_MODES >= 3)
1916 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1917 #if (NB_MMU_MODES == 4)
1918 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1923 /* add a new TLB entry. At most one entry for a given virtual address
1924 is permitted. Return 0 if OK or 2 if the page could not be mapped
1925 (can only happen in non SOFTMMU mode for I/O pages or pages
1926 conflicting with the host address space). */
1927 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1928 target_phys_addr_t paddr
, int prot
,
1929 int mmu_idx
, int is_softmmu
)
1934 target_ulong address
;
1935 target_ulong code_address
;
1936 target_phys_addr_t addend
;
1940 target_phys_addr_t iotlb
;
1942 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1944 pd
= IO_MEM_UNASSIGNED
;
1946 pd
= p
->phys_offset
;
1948 #if defined(DEBUG_TLB)
1949 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1950 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1955 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1956 /* IO memory case (romd handled later) */
1957 address
|= TLB_MMIO
;
1959 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1960 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1962 iotlb
= pd
& TARGET_PAGE_MASK
;
1963 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1964 iotlb
|= IO_MEM_NOTDIRTY
;
1966 iotlb
|= IO_MEM_ROM
;
1968 /* IO handlers are currently passed a phsical address.
1969 It would be nice to pass an offset from the base address
1970 of that region. This would avoid having to special case RAM,
1971 and avoid full address decoding in every device.
1972 We can't use the high bits of pd for this because
1973 IO_MEM_ROMD uses these as a ram address. */
1974 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
1976 /* FIXME: What if this isn't page aligned? */
1977 iotlb
+= p
->region_offset
;
1983 code_address
= address
;
1984 /* Make accesses to pages with watchpoints go via the
1985 watchpoint trap routines. */
1986 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1987 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
1988 iotlb
= io_mem_watch
+ paddr
;
1989 /* TODO: The memory case can be optimized by not trapping
1990 reads of pages with a write breakpoint. */
1991 address
|= TLB_MMIO
;
1995 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1996 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
1997 te
= &env
->tlb_table
[mmu_idx
][index
];
1998 te
->addend
= addend
- vaddr
;
1999 if (prot
& PAGE_READ
) {
2000 te
->addr_read
= address
;
2005 if (prot
& PAGE_EXEC
) {
2006 te
->addr_code
= code_address
;
2010 if (prot
& PAGE_WRITE
) {
2011 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2012 (pd
& IO_MEM_ROMD
)) {
2013 /* Write access calls the I/O callback. */
2014 te
->addr_write
= address
| TLB_MMIO
;
2015 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2016 !cpu_physical_memory_is_dirty(pd
)) {
2017 te
->addr_write
= address
| TLB_NOTDIRTY
;
2019 te
->addr_write
= address
;
2022 te
->addr_write
= -1;
2029 void tlb_flush(CPUState
*env
, int flush_global
)
2033 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2037 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2038 target_phys_addr_t paddr
, int prot
,
2039 int mmu_idx
, int is_softmmu
)
2044 /* dump memory mappings */
2045 void page_dump(FILE *f
)
2047 unsigned long start
, end
;
2048 int i
, j
, prot
, prot1
;
2051 fprintf(f
, "%-8s %-8s %-8s %s\n",
2052 "start", "end", "size", "prot");
2056 for(i
= 0; i
<= L1_SIZE
; i
++) {
2061 for(j
= 0;j
< L2_SIZE
; j
++) {
2066 if (prot1
!= prot
) {
2067 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2069 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2070 start
, end
, end
- start
,
2071 prot
& PAGE_READ
? 'r' : '-',
2072 prot
& PAGE_WRITE
? 'w' : '-',
2073 prot
& PAGE_EXEC
? 'x' : '-');
2087 int page_get_flags(target_ulong address
)
2091 p
= page_find(address
>> TARGET_PAGE_BITS
);
2097 /* modify the flags of a page and invalidate the code if
2098 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2099 depending on PAGE_WRITE */
2100 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2105 /* mmap_lock should already be held. */
2106 start
= start
& TARGET_PAGE_MASK
;
2107 end
= TARGET_PAGE_ALIGN(end
);
2108 if (flags
& PAGE_WRITE
)
2109 flags
|= PAGE_WRITE_ORG
;
2110 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2111 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2112 /* We may be called for host regions that are outside guest
2116 /* if the write protection is set, then we invalidate the code
2118 if (!(p
->flags
& PAGE_WRITE
) &&
2119 (flags
& PAGE_WRITE
) &&
2121 tb_invalidate_phys_page(addr
, 0, NULL
);
2127 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2133 if (start
+ len
< start
)
2134 /* we've wrapped around */
2137 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2138 start
= start
& TARGET_PAGE_MASK
;
2140 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2141 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2144 if( !(p
->flags
& PAGE_VALID
) )
2147 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2149 if (flags
& PAGE_WRITE
) {
2150 if (!(p
->flags
& PAGE_WRITE_ORG
))
2152 /* unprotect the page if it was put read-only because it
2153 contains translated code */
2154 if (!(p
->flags
& PAGE_WRITE
)) {
2155 if (!page_unprotect(addr
, 0, NULL
))
2164 /* called from signal handler: invalidate the code and unprotect the
2165 page. Return TRUE if the fault was succesfully handled. */
2166 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2168 unsigned int page_index
, prot
, pindex
;
2170 target_ulong host_start
, host_end
, addr
;
2172 /* Technically this isn't safe inside a signal handler. However we
2173 know this only ever happens in a synchronous SEGV handler, so in
2174 practice it seems to be ok. */
2177 host_start
= address
& qemu_host_page_mask
;
2178 page_index
= host_start
>> TARGET_PAGE_BITS
;
2179 p1
= page_find(page_index
);
2184 host_end
= host_start
+ qemu_host_page_size
;
2187 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2191 /* if the page was really writable, then we change its
2192 protection back to writable */
2193 if (prot
& PAGE_WRITE_ORG
) {
2194 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2195 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2196 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2197 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2198 p1
[pindex
].flags
|= PAGE_WRITE
;
2199 /* and since the content will be modified, we must invalidate
2200 the corresponding translated code. */
2201 tb_invalidate_phys_page(address
, pc
, puc
);
2202 #ifdef DEBUG_TB_CHECK
2203 tb_invalidate_check(address
);
2213 static inline void tlb_set_dirty(CPUState
*env
,
2214 unsigned long addr
, target_ulong vaddr
)
2217 #endif /* defined(CONFIG_USER_ONLY) */
2219 #if !defined(CONFIG_USER_ONLY)
2221 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2222 ram_addr_t memory
, ram_addr_t region_offset
);
2223 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2224 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2225 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2228 if (addr > start_addr) \
2231 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2232 if (start_addr2 > 0) \
2236 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2237 end_addr2 = TARGET_PAGE_SIZE - 1; \
2239 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2240 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2245 /* register physical memory. 'size' must be a multiple of the target
2246 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2247 io memory page. The address used when calling the IO function is
2248 the offset from the start of the region, plus region_offset. Both
2249 start_region and regon_offset are rounded down to a page boundary
2250 before calculating this offset. This should not be a problem unless
2251 the low bits of start_addr and region_offset differ. */
2252 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2254 ram_addr_t phys_offset
,
2255 ram_addr_t region_offset
)
2257 target_phys_addr_t addr
, end_addr
;
2260 ram_addr_t orig_size
= size
;
2264 /* XXX: should not depend on cpu context */
2266 if (env
->kqemu_enabled
) {
2267 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2271 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2273 region_offset
&= TARGET_PAGE_MASK
;
2274 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2275 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2276 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2277 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2278 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2279 ram_addr_t orig_memory
= p
->phys_offset
;
2280 target_phys_addr_t start_addr2
, end_addr2
;
2281 int need_subpage
= 0;
2283 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2285 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2286 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2287 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2288 &p
->phys_offset
, orig_memory
,
2291 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2294 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2296 p
->region_offset
= 0;
2298 p
->phys_offset
= phys_offset
;
2299 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2300 (phys_offset
& IO_MEM_ROMD
))
2301 phys_offset
+= TARGET_PAGE_SIZE
;
2304 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2305 p
->phys_offset
= phys_offset
;
2306 p
->region_offset
= region_offset
;
2307 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2308 (phys_offset
& IO_MEM_ROMD
)) {
2309 phys_offset
+= TARGET_PAGE_SIZE
;
2311 target_phys_addr_t start_addr2
, end_addr2
;
2312 int need_subpage
= 0;
2314 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2315 end_addr2
, need_subpage
);
2317 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2318 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2319 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2321 subpage_register(subpage
, start_addr2
, end_addr2
,
2322 phys_offset
, region_offset
);
2323 p
->region_offset
= 0;
2327 region_offset
+= TARGET_PAGE_SIZE
;
2330 /* since each CPU stores ram addresses in its TLB cache, we must
2331 reset the modified entries */
2333 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2338 /* XXX: temporary until new memory mapping API */
2339 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2343 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2345 return IO_MEM_UNASSIGNED
;
2346 return p
->phys_offset
;
2349 /* XXX: better than nothing */
2350 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2353 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2354 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2355 (uint64_t)size
, (uint64_t)phys_ram_size
);
2358 addr
= phys_ram_alloc_offset
;
2359 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2363 void qemu_ram_free(ram_addr_t addr
)
2367 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2369 #ifdef DEBUG_UNASSIGNED
2370 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2372 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2373 do_unassigned_access(addr
, 0, 0, 0, 1);
2378 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2380 #ifdef DEBUG_UNASSIGNED
2381 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2383 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2384 do_unassigned_access(addr
, 0, 0, 0, 2);
2389 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2391 #ifdef DEBUG_UNASSIGNED
2392 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2394 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2395 do_unassigned_access(addr
, 0, 0, 0, 4);
2400 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2402 #ifdef DEBUG_UNASSIGNED
2403 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2405 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2406 do_unassigned_access(addr
, 1, 0, 0, 1);
2410 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2412 #ifdef DEBUG_UNASSIGNED
2413 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2415 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2416 do_unassigned_access(addr
, 1, 0, 0, 2);
2420 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2422 #ifdef DEBUG_UNASSIGNED
2423 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2425 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2426 do_unassigned_access(addr
, 1, 0, 0, 4);
2430 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2431 unassigned_mem_readb
,
2432 unassigned_mem_readw
,
2433 unassigned_mem_readl
,
2436 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2437 unassigned_mem_writeb
,
2438 unassigned_mem_writew
,
2439 unassigned_mem_writel
,
2442 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2446 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2447 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2448 #if !defined(CONFIG_USER_ONLY)
2449 tb_invalidate_phys_page_fast(ram_addr
, 1);
2450 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2453 stb_p(phys_ram_base
+ ram_addr
, val
);
2455 if (cpu_single_env
->kqemu_enabled
&&
2456 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2457 kqemu_modify_page(cpu_single_env
, ram_addr
);
2459 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2460 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2461 /* we remove the notdirty callback only if the code has been
2463 if (dirty_flags
== 0xff)
2464 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2467 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2471 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2472 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2473 #if !defined(CONFIG_USER_ONLY)
2474 tb_invalidate_phys_page_fast(ram_addr
, 2);
2475 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2478 stw_p(phys_ram_base
+ ram_addr
, val
);
2480 if (cpu_single_env
->kqemu_enabled
&&
2481 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2482 kqemu_modify_page(cpu_single_env
, ram_addr
);
2484 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2485 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2486 /* we remove the notdirty callback only if the code has been
2488 if (dirty_flags
== 0xff)
2489 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2492 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2496 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2497 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2498 #if !defined(CONFIG_USER_ONLY)
2499 tb_invalidate_phys_page_fast(ram_addr
, 4);
2500 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2503 stl_p(phys_ram_base
+ ram_addr
, val
);
2505 if (cpu_single_env
->kqemu_enabled
&&
2506 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2507 kqemu_modify_page(cpu_single_env
, ram_addr
);
2509 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2510 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2511 /* we remove the notdirty callback only if the code has been
2513 if (dirty_flags
== 0xff)
2514 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2517 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2518 NULL
, /* never used */
2519 NULL
, /* never used */
2520 NULL
, /* never used */
2523 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2524 notdirty_mem_writeb
,
2525 notdirty_mem_writew
,
2526 notdirty_mem_writel
,
2529 /* Generate a debug exception if a watchpoint has been hit. */
2530 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2532 CPUState
*env
= cpu_single_env
;
2533 target_ulong pc
, cs_base
;
2534 TranslationBlock
*tb
;
2539 if (env
->watchpoint_hit
) {
2540 /* We re-entered the check after replacing the TB. Now raise
2541 * the debug interrupt so that is will trigger after the
2542 * current instruction. */
2543 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2546 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2547 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2548 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2549 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2550 wp
->flags
|= BP_WATCHPOINT_HIT
;
2551 if (!env
->watchpoint_hit
) {
2552 env
->watchpoint_hit
= wp
;
2553 tb
= tb_find_pc(env
->mem_io_pc
);
2555 cpu_abort(env
, "check_watchpoint: could not find TB for "
2556 "pc=%p", (void *)env
->mem_io_pc
);
2558 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2559 tb_phys_invalidate(tb
, -1);
2560 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2561 env
->exception_index
= EXCP_DEBUG
;
2563 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2564 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2566 cpu_resume_from_signal(env
, NULL
);
2569 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2574 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2575 so these check for a hit then pass through to the normal out-of-line
2577 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2579 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2580 return ldub_phys(addr
);
2583 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2585 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2586 return lduw_phys(addr
);
2589 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2591 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2592 return ldl_phys(addr
);
2595 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2598 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2599 stb_phys(addr
, val
);
2602 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2605 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2606 stw_phys(addr
, val
);
2609 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2612 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2613 stl_phys(addr
, val
);
2616 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2622 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2628 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2634 idx
= SUBPAGE_IDX(addr
);
2635 #if defined(DEBUG_SUBPAGE)
2636 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2637 mmio
, len
, addr
, idx
);
2639 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2640 addr
+ mmio
->region_offset
[idx
][0][len
]);
2645 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2646 uint32_t value
, unsigned int len
)
2650 idx
= SUBPAGE_IDX(addr
);
2651 #if defined(DEBUG_SUBPAGE)
2652 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2653 mmio
, len
, addr
, idx
, value
);
2655 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2656 addr
+ mmio
->region_offset
[idx
][1][len
],
2660 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2662 #if defined(DEBUG_SUBPAGE)
2663 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2666 return subpage_readlen(opaque
, addr
, 0);
2669 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2672 #if defined(DEBUG_SUBPAGE)
2673 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2675 subpage_writelen(opaque
, addr
, value
, 0);
2678 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2680 #if defined(DEBUG_SUBPAGE)
2681 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2684 return subpage_readlen(opaque
, addr
, 1);
2687 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2690 #if defined(DEBUG_SUBPAGE)
2691 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2693 subpage_writelen(opaque
, addr
, value
, 1);
2696 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2698 #if defined(DEBUG_SUBPAGE)
2699 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2702 return subpage_readlen(opaque
, addr
, 2);
2705 static void subpage_writel (void *opaque
,
2706 target_phys_addr_t addr
, uint32_t value
)
2708 #if defined(DEBUG_SUBPAGE)
2709 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2711 subpage_writelen(opaque
, addr
, value
, 2);
2714 static CPUReadMemoryFunc
*subpage_read
[] = {
2720 static CPUWriteMemoryFunc
*subpage_write
[] = {
2726 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2727 ram_addr_t memory
, ram_addr_t region_offset
)
2732 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2734 idx
= SUBPAGE_IDX(start
);
2735 eidx
= SUBPAGE_IDX(end
);
2736 #if defined(DEBUG_SUBPAGE)
2737 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2738 mmio
, start
, end
, idx
, eidx
, memory
);
2740 memory
>>= IO_MEM_SHIFT
;
2741 for (; idx
<= eidx
; idx
++) {
2742 for (i
= 0; i
< 4; i
++) {
2743 if (io_mem_read
[memory
][i
]) {
2744 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2745 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2746 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2748 if (io_mem_write
[memory
][i
]) {
2749 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2750 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2751 mmio
->region_offset
[idx
][1][i
] = region_offset
;
2759 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2760 ram_addr_t orig_memory
, ram_addr_t region_offset
)
2765 mmio
= qemu_mallocz(sizeof(subpage_t
));
2768 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2769 #if defined(DEBUG_SUBPAGE)
2770 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2771 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2773 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2774 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
2781 static void io_mem_init(void)
2783 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2784 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2785 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2788 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2789 watch_mem_write
, NULL
);
2790 /* alloc dirty bits array */
2791 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2792 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2795 /* mem_read and mem_write are arrays of functions containing the
2796 function to access byte (index 0), word (index 1) and dword (index
2797 2). Functions can be omitted with a NULL function pointer. The
2798 registered functions may be modified dynamically later.
2799 If io_index is non zero, the corresponding io zone is
2800 modified. If it is zero, a new io zone is allocated. The return
2801 value can be used with cpu_register_physical_memory(). (-1) is
2802 returned if error. */
2803 int cpu_register_io_memory(int io_index
,
2804 CPUReadMemoryFunc
**mem_read
,
2805 CPUWriteMemoryFunc
**mem_write
,
2808 int i
, subwidth
= 0;
2810 if (io_index
<= 0) {
2811 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2813 io_index
= io_mem_nb
++;
2815 if (io_index
>= IO_MEM_NB_ENTRIES
)
2819 for(i
= 0;i
< 3; i
++) {
2820 if (!mem_read
[i
] || !mem_write
[i
])
2821 subwidth
= IO_MEM_SUBWIDTH
;
2822 io_mem_read
[io_index
][i
] = mem_read
[i
];
2823 io_mem_write
[io_index
][i
] = mem_write
[i
];
2825 io_mem_opaque
[io_index
] = opaque
;
2826 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2829 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2831 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2834 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2836 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2839 #endif /* !defined(CONFIG_USER_ONLY) */
2841 /* physical memory access (slow version, mainly for debug) */
2842 #if defined(CONFIG_USER_ONLY)
2843 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2844 int len
, int is_write
)
2851 page
= addr
& TARGET_PAGE_MASK
;
2852 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2855 flags
= page_get_flags(page
);
2856 if (!(flags
& PAGE_VALID
))
2859 if (!(flags
& PAGE_WRITE
))
2861 /* XXX: this code should not depend on lock_user */
2862 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2863 /* FIXME - should this return an error rather than just fail? */
2866 unlock_user(p
, addr
, l
);
2868 if (!(flags
& PAGE_READ
))
2870 /* XXX: this code should not depend on lock_user */
2871 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2872 /* FIXME - should this return an error rather than just fail? */
2875 unlock_user(p
, addr
, 0);
2884 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2885 int len
, int is_write
)
2890 target_phys_addr_t page
;
2895 page
= addr
& TARGET_PAGE_MASK
;
2896 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2899 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2901 pd
= IO_MEM_UNASSIGNED
;
2903 pd
= p
->phys_offset
;
2907 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2908 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2910 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
2911 /* XXX: could force cpu_single_env to NULL to avoid
2913 if (l
>= 4 && ((addr
& 3) == 0)) {
2914 /* 32 bit write access */
2916 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2918 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2919 /* 16 bit write access */
2921 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2924 /* 8 bit write access */
2926 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2930 unsigned long addr1
;
2931 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2933 ptr
= phys_ram_base
+ addr1
;
2934 memcpy(ptr
, buf
, l
);
2935 if (!cpu_physical_memory_is_dirty(addr1
)) {
2936 /* invalidate code */
2937 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2939 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2940 (0xff & ~CODE_DIRTY_FLAG
);
2944 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2945 !(pd
& IO_MEM_ROMD
)) {
2947 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2949 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
2950 if (l
>= 4 && ((addr
& 3) == 0)) {
2951 /* 32 bit read access */
2952 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2955 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2956 /* 16 bit read access */
2957 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2961 /* 8 bit read access */
2962 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2968 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2969 (addr
& ~TARGET_PAGE_MASK
);
2970 memcpy(buf
, ptr
, l
);
2979 /* used for ROM loading : can write in RAM and ROM */
2980 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2981 const uint8_t *buf
, int len
)
2985 target_phys_addr_t page
;
2990 page
= addr
& TARGET_PAGE_MASK
;
2991 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2994 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2996 pd
= IO_MEM_UNASSIGNED
;
2998 pd
= p
->phys_offset
;
3001 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3002 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3003 !(pd
& IO_MEM_ROMD
)) {
3006 unsigned long addr1
;
3007 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3009 ptr
= phys_ram_base
+ addr1
;
3010 memcpy(ptr
, buf
, l
);
3019 /* warning: addr must be aligned */
3020 uint32_t ldl_phys(target_phys_addr_t addr
)
3028 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3030 pd
= IO_MEM_UNASSIGNED
;
3032 pd
= p
->phys_offset
;
3035 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3036 !(pd
& IO_MEM_ROMD
)) {
3038 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3040 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3041 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3044 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3045 (addr
& ~TARGET_PAGE_MASK
);
3051 /* warning: addr must be aligned */
3052 uint64_t ldq_phys(target_phys_addr_t addr
)
3060 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3062 pd
= IO_MEM_UNASSIGNED
;
3064 pd
= p
->phys_offset
;
3067 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3068 !(pd
& IO_MEM_ROMD
)) {
3070 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3072 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3073 #ifdef TARGET_WORDS_BIGENDIAN
3074 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3075 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3077 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3078 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3082 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3083 (addr
& ~TARGET_PAGE_MASK
);
3090 uint32_t ldub_phys(target_phys_addr_t addr
)
3093 cpu_physical_memory_read(addr
, &val
, 1);
3098 uint32_t lduw_phys(target_phys_addr_t addr
)
3101 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3102 return tswap16(val
);
3105 /* warning: addr must be aligned. The ram page is not masked as dirty
3106 and the code inside is not invalidated. It is useful if the dirty
3107 bits are used to track modified PTEs */
3108 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3115 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3117 pd
= IO_MEM_UNASSIGNED
;
3119 pd
= p
->phys_offset
;
3122 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3123 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3125 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3126 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3128 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3129 ptr
= phys_ram_base
+ addr1
;
3132 if (unlikely(in_migration
)) {
3133 if (!cpu_physical_memory_is_dirty(addr1
)) {
3134 /* invalidate code */
3135 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3137 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3138 (0xff & ~CODE_DIRTY_FLAG
);
3144 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3151 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3153 pd
= IO_MEM_UNASSIGNED
;
3155 pd
= p
->phys_offset
;
3158 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3159 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3161 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3162 #ifdef TARGET_WORDS_BIGENDIAN
3163 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3164 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3166 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3167 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3170 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3171 (addr
& ~TARGET_PAGE_MASK
);
3176 /* warning: addr must be aligned */
3177 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3184 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3186 pd
= IO_MEM_UNASSIGNED
;
3188 pd
= p
->phys_offset
;
3191 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3192 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3194 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3195 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3197 unsigned long addr1
;
3198 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3200 ptr
= phys_ram_base
+ addr1
;
3202 if (!cpu_physical_memory_is_dirty(addr1
)) {
3203 /* invalidate code */
3204 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3206 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3207 (0xff & ~CODE_DIRTY_FLAG
);
3213 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3216 cpu_physical_memory_write(addr
, &v
, 1);
3220 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3222 uint16_t v
= tswap16(val
);
3223 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3227 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3230 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3235 /* virtual memory access for debug */
3236 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3237 uint8_t *buf
, int len
, int is_write
)
3240 target_phys_addr_t phys_addr
;
3244 page
= addr
& TARGET_PAGE_MASK
;
3245 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3246 /* if no physical page mapped, return an error */
3247 if (phys_addr
== -1)
3249 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3252 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3261 /* in deterministic execution mode, instructions doing device I/Os
3262 must be at the end of the TB */
3263 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3265 TranslationBlock
*tb
;
3267 target_ulong pc
, cs_base
;
3270 tb
= tb_find_pc((unsigned long)retaddr
);
3272 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3275 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3276 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3277 /* Calculate how many instructions had been executed before the fault
3279 n
= n
- env
->icount_decr
.u16
.low
;
3280 /* Generate a new TB ending on the I/O insn. */
3282 /* On MIPS and SH, delay slot instructions can only be restarted if
3283 they were already the first instruction in the TB. If this is not
3284 the first instruction in a TB then re-execute the preceding
3286 #if defined(TARGET_MIPS)
3287 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3288 env
->active_tc
.PC
-= 4;
3289 env
->icount_decr
.u16
.low
++;
3290 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3292 #elif defined(TARGET_SH4)
3293 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3296 env
->icount_decr
.u16
.low
++;
3297 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3300 /* This should never happen. */
3301 if (n
> CF_COUNT_MASK
)
3302 cpu_abort(env
, "TB too big during recompile");
3304 cflags
= n
| CF_LAST_IO
;
3306 cs_base
= tb
->cs_base
;
3308 tb_phys_invalidate(tb
, -1);
3309 /* FIXME: In theory this could raise an exception. In practice
3310 we have already translated the block once so it's probably ok. */
3311 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3312 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3313 the first in the TB) then we end up generating a whole new TB and
3314 repeating the fault, which is horribly inefficient.
3315 Better would be to execute just this insn uncached, or generate a
3317 cpu_resume_from_signal(env
, NULL
);
3320 void dump_exec_info(FILE *f
,
3321 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3323 int i
, target_code_size
, max_target_code_size
;
3324 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3325 TranslationBlock
*tb
;
3327 target_code_size
= 0;
3328 max_target_code_size
= 0;
3330 direct_jmp_count
= 0;
3331 direct_jmp2_count
= 0;
3332 for(i
= 0; i
< nb_tbs
; i
++) {
3334 target_code_size
+= tb
->size
;
3335 if (tb
->size
> max_target_code_size
)
3336 max_target_code_size
= tb
->size
;
3337 if (tb
->page_addr
[1] != -1)
3339 if (tb
->tb_next_offset
[0] != 0xffff) {
3341 if (tb
->tb_next_offset
[1] != 0xffff) {
3342 direct_jmp2_count
++;
3346 /* XXX: avoid using doubles ? */
3347 cpu_fprintf(f
, "Translation buffer state:\n");
3348 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3349 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3350 cpu_fprintf(f
, "TB count %d/%d\n",
3351 nb_tbs
, code_gen_max_blocks
);
3352 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3353 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3354 max_target_code_size
);
3355 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3356 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3357 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3358 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3360 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3361 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3363 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3365 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3366 cpu_fprintf(f
, "\nStatistics:\n");
3367 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3368 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3369 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3370 tcg_dump_info(f
, cpu_fprintf
);
3373 #if !defined(CONFIG_USER_ONLY)
3375 #define MMUSUFFIX _cmmu
3376 #define GETPC() NULL
3377 #define env cpu_single_env
3378 #define SOFTMMU_CODE_ACCESS
3381 #include "softmmu_template.h"
3384 #include "softmmu_template.h"
3387 #include "softmmu_template.h"
3390 #include "softmmu_template.h"