2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
43 #if defined(CONFIG_USER_ONLY)
47 //#define DEBUG_TB_INVALIDATE
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 static TranslationBlock
*tbs
;
88 int code_gen_max_blocks
;
89 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
102 #define code_gen_section \
103 __attribute__((aligned (32)))
106 uint8_t code_gen_prologue
[1024] code_gen_section
;
107 static uint8_t *code_gen_buffer
;
108 static unsigned long code_gen_buffer_size
;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size
;
111 uint8_t *code_gen_ptr
;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size
;
116 uint8_t *phys_ram_base
;
117 uint8_t *phys_ram_dirty
;
118 static int in_migration
;
119 static ram_addr_t phys_ram_alloc_offset
= 0;
123 /* current CPU in the current thread. It is only valid inside
125 CPUState
*cpu_single_env
;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 typedef struct PhysPageDesc
{
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset
;
149 ram_addr_t region_offset
;
153 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154 /* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
158 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
160 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
163 #define L1_SIZE (1 << L1_BITS)
164 #define L2_SIZE (1 << L2_BITS)
166 unsigned long qemu_real_host_page_size
;
167 unsigned long qemu_host_page_bits
;
168 unsigned long qemu_host_page_size
;
169 unsigned long qemu_host_page_mask
;
171 /* XXX: for system emulation, it could just be an array */
172 static PageDesc
*l1_map
[L1_SIZE
];
173 static PhysPageDesc
**l1_phys_map
;
175 #if !defined(CONFIG_USER_ONLY)
176 static void io_mem_init(void);
178 /* io memory support */
179 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
180 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
181 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
182 static int io_mem_nb
;
183 static int io_mem_watch
;
187 static const char *logfilename
= "/tmp/qemu.log";
190 static int log_append
= 0;
193 static int tlb_flush_count
;
194 static int tb_flush_count
;
195 static int tb_phys_invalidate_count
;
197 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198 typedef struct subpage_t
{
199 target_phys_addr_t base
;
200 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
201 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
202 void *opaque
[TARGET_PAGE_SIZE
][2][4];
203 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
207 static void map_exec(void *addr
, long size
)
210 VirtualProtect(addr
, size
,
211 PAGE_EXECUTE_READWRITE
, &old_protect
);
215 static void map_exec(void *addr
, long size
)
217 unsigned long start
, end
, page_size
;
219 page_size
= getpagesize();
220 start
= (unsigned long)addr
;
221 start
&= ~(page_size
- 1);
223 end
= (unsigned long)addr
+ size
;
224 end
+= page_size
- 1;
225 end
&= ~(page_size
- 1);
227 mprotect((void *)start
, end
- start
,
228 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
232 static void page_init(void)
234 /* NOTE: we can always suppose that qemu_host_page_size >=
238 SYSTEM_INFO system_info
;
240 GetSystemInfo(&system_info
);
241 qemu_real_host_page_size
= system_info
.dwPageSize
;
244 qemu_real_host_page_size
= getpagesize();
246 if (qemu_host_page_size
== 0)
247 qemu_host_page_size
= qemu_real_host_page_size
;
248 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
249 qemu_host_page_size
= TARGET_PAGE_SIZE
;
250 qemu_host_page_bits
= 0;
251 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
252 qemu_host_page_bits
++;
253 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
254 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
255 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
257 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
259 long long startaddr
, endaddr
;
264 last_brk
= (unsigned long)sbrk(0);
265 f
= fopen("/proc/self/maps", "r");
268 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
270 startaddr
= MIN(startaddr
,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
272 endaddr
= MIN(endaddr
,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
274 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
275 TARGET_PAGE_ALIGN(endaddr
),
286 static inline PageDesc
**page_l1_map(target_ulong index
)
288 #if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
291 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
294 return &l1_map
[index
>> L2_BITS
];
297 static inline PageDesc
*page_find_alloc(target_ulong index
)
300 lp
= page_l1_map(index
);
306 /* allocate if not found */
307 #if defined(CONFIG_USER_ONLY)
308 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
309 /* Don't use qemu_malloc because it may recurse. */
310 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
311 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
314 unsigned long addr
= h2g(p
);
315 page_set_flags(addr
& TARGET_PAGE_MASK
,
316 TARGET_PAGE_ALIGN(addr
+ len
),
320 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
324 return p
+ (index
& (L2_SIZE
- 1));
327 static inline PageDesc
*page_find(target_ulong index
)
330 lp
= page_l1_map(index
);
337 return p
+ (index
& (L2_SIZE
- 1));
340 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
345 p
= (void **)l1_phys_map
;
346 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
348 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
351 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
354 /* allocate if not found */
357 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
358 memset(p
, 0, sizeof(void *) * L1_SIZE
);
362 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
366 /* allocate if not found */
369 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
371 for (i
= 0; i
< L2_SIZE
; i
++)
372 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
374 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
377 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
379 return phys_page_find_alloc(index
, 0);
382 #if !defined(CONFIG_USER_ONLY)
383 static void tlb_protect_code(ram_addr_t ram_addr
);
384 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
386 #define mmap_lock() do { } while(0)
387 #define mmap_unlock() do { } while(0)
390 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
392 #if defined(CONFIG_USER_ONLY)
393 /* Currently it is not recommanded to allocate big chunks of data in
394 user mode. It will change when a dedicated libc will be used */
395 #define USE_STATIC_CODE_GEN_BUFFER
398 #ifdef USE_STATIC_CODE_GEN_BUFFER
399 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
402 static void code_gen_alloc(unsigned long tb_size
)
404 #ifdef USE_STATIC_CODE_GEN_BUFFER
405 code_gen_buffer
= static_code_gen_buffer
;
406 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
407 map_exec(code_gen_buffer
, code_gen_buffer_size
);
409 code_gen_buffer_size
= tb_size
;
410 if (code_gen_buffer_size
== 0) {
411 #if defined(CONFIG_USER_ONLY)
412 /* in user mode, phys_ram_size is not meaningful */
413 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
415 /* XXX: needs ajustments */
416 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
419 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
420 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
421 /* The code gen buffer location may have constraints depending on
422 the host cpu and OS */
423 #if defined(__linux__)
428 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
429 #if defined(__x86_64__)
431 /* Cannot map more than that */
432 if (code_gen_buffer_size
> (800 * 1024 * 1024))
433 code_gen_buffer_size
= (800 * 1024 * 1024);
434 #elif defined(__sparc_v9__)
435 // Map the buffer below 2G, so we can use direct calls and branches
437 start
= (void *) 0x60000000UL
;
438 if (code_gen_buffer_size
> (512 * 1024 * 1024))
439 code_gen_buffer_size
= (512 * 1024 * 1024);
440 #elif defined(__arm__)
441 /* Map the buffer below 32M, so we can use direct calls and branches */
443 start
= (void *) 0x01000000UL
;
444 if (code_gen_buffer_size
> 16 * 1024 * 1024)
445 code_gen_buffer_size
= 16 * 1024 * 1024;
447 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
448 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
450 if (code_gen_buffer
== MAP_FAILED
) {
451 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
455 #elif defined(__FreeBSD__)
459 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
460 #if defined(__x86_64__)
461 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
462 * 0x40000000 is free */
464 addr
= (void *)0x40000000;
465 /* Cannot map more than that */
466 if (code_gen_buffer_size
> (800 * 1024 * 1024))
467 code_gen_buffer_size
= (800 * 1024 * 1024);
469 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
470 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
472 if (code_gen_buffer
== MAP_FAILED
) {
473 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
478 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
479 map_exec(code_gen_buffer
, code_gen_buffer_size
);
481 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
482 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
483 code_gen_buffer_max_size
= code_gen_buffer_size
-
484 code_gen_max_block_size();
485 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
486 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
489 /* Must be called before using the QEMU cpus. 'tb_size' is the size
490 (in bytes) allocated to the translation buffer. Zero means default
492 void cpu_exec_init_all(unsigned long tb_size
)
495 code_gen_alloc(tb_size
);
496 code_gen_ptr
= code_gen_buffer
;
498 #if !defined(CONFIG_USER_ONLY)
503 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
505 #define CPU_COMMON_SAVE_VERSION 1
507 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
509 CPUState
*env
= opaque
;
511 qemu_put_be32s(f
, &env
->halted
);
512 qemu_put_be32s(f
, &env
->interrupt_request
);
515 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
517 CPUState
*env
= opaque
;
519 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
522 qemu_get_be32s(f
, &env
->halted
);
523 qemu_get_be32s(f
, &env
->interrupt_request
);
530 void cpu_exec_init(CPUState
*env
)
535 env
->next_cpu
= NULL
;
538 while (*penv
!= NULL
) {
539 penv
= (CPUState
**)&(*penv
)->next_cpu
;
542 env
->cpu_index
= cpu_index
;
543 TAILQ_INIT(&env
->breakpoints
);
544 TAILQ_INIT(&env
->watchpoints
);
546 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
547 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
548 cpu_common_save
, cpu_common_load
, env
);
549 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
550 cpu_save
, cpu_load
, env
);
554 static inline void invalidate_page_bitmap(PageDesc
*p
)
556 if (p
->code_bitmap
) {
557 qemu_free(p
->code_bitmap
);
558 p
->code_bitmap
= NULL
;
560 p
->code_write_count
= 0;
563 /* set to NULL all the 'first_tb' fields in all PageDescs */
564 static void page_flush_tb(void)
569 for(i
= 0; i
< L1_SIZE
; i
++) {
572 for(j
= 0; j
< L2_SIZE
; j
++) {
574 invalidate_page_bitmap(p
);
581 /* flush all the translation blocks */
582 /* XXX: tb_flush is currently not thread safe */
583 void tb_flush(CPUState
*env1
)
586 #if defined(DEBUG_FLUSH)
587 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
588 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
590 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
592 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
593 cpu_abort(env1
, "Internal error: code buffer overflow\n");
597 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
598 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
601 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
604 code_gen_ptr
= code_gen_buffer
;
605 /* XXX: flush processor icache at this point if cache flush is
610 #ifdef DEBUG_TB_CHECK
612 static void tb_invalidate_check(target_ulong address
)
614 TranslationBlock
*tb
;
616 address
&= TARGET_PAGE_MASK
;
617 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
618 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
619 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
620 address
>= tb
->pc
+ tb
->size
)) {
621 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
622 address
, (long)tb
->pc
, tb
->size
);
628 /* verify that all the pages have correct rights for code */
629 static void tb_page_check(void)
631 TranslationBlock
*tb
;
632 int i
, flags1
, flags2
;
634 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
635 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
636 flags1
= page_get_flags(tb
->pc
);
637 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
638 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
639 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
640 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
646 static void tb_jmp_check(TranslationBlock
*tb
)
648 TranslationBlock
*tb1
;
651 /* suppress any remaining jumps to this TB */
655 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
658 tb1
= tb1
->jmp_next
[n1
];
660 /* check end of list */
662 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
668 /* invalidate one TB */
669 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
672 TranslationBlock
*tb1
;
676 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
679 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
683 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
685 TranslationBlock
*tb1
;
691 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
693 *ptb
= tb1
->page_next
[n1
];
696 ptb
= &tb1
->page_next
[n1
];
700 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
702 TranslationBlock
*tb1
, **ptb
;
705 ptb
= &tb
->jmp_next
[n
];
708 /* find tb(n) in circular list */
712 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
713 if (n1
== n
&& tb1
== tb
)
716 ptb
= &tb1
->jmp_first
;
718 ptb
= &tb1
->jmp_next
[n1
];
721 /* now we can suppress tb(n) from the list */
722 *ptb
= tb
->jmp_next
[n
];
724 tb
->jmp_next
[n
] = NULL
;
728 /* reset the jump entry 'n' of a TB so that it is not chained to
730 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
732 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
735 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
740 target_phys_addr_t phys_pc
;
741 TranslationBlock
*tb1
, *tb2
;
743 /* remove the TB from the hash list */
744 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
745 h
= tb_phys_hash_func(phys_pc
);
746 tb_remove(&tb_phys_hash
[h
], tb
,
747 offsetof(TranslationBlock
, phys_hash_next
));
749 /* remove the TB from the page list */
750 if (tb
->page_addr
[0] != page_addr
) {
751 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
752 tb_page_remove(&p
->first_tb
, tb
);
753 invalidate_page_bitmap(p
);
755 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
756 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
757 tb_page_remove(&p
->first_tb
, tb
);
758 invalidate_page_bitmap(p
);
761 tb_invalidated_flag
= 1;
763 /* remove the TB from the hash list */
764 h
= tb_jmp_cache_hash_func(tb
->pc
);
765 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
766 if (env
->tb_jmp_cache
[h
] == tb
)
767 env
->tb_jmp_cache
[h
] = NULL
;
770 /* suppress this TB from the two jump lists */
771 tb_jmp_remove(tb
, 0);
772 tb_jmp_remove(tb
, 1);
774 /* suppress any remaining jumps to this TB */
780 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
781 tb2
= tb1
->jmp_next
[n1
];
782 tb_reset_jump(tb1
, n1
);
783 tb1
->jmp_next
[n1
] = NULL
;
786 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
788 tb_phys_invalidate_count
++;
791 static inline void set_bits(uint8_t *tab
, int start
, int len
)
797 mask
= 0xff << (start
& 7);
798 if ((start
& ~7) == (end
& ~7)) {
800 mask
&= ~(0xff << (end
& 7));
805 start
= (start
+ 8) & ~7;
807 while (start
< end1
) {
812 mask
= ~(0xff << (end
& 7));
818 static void build_page_bitmap(PageDesc
*p
)
820 int n
, tb_start
, tb_end
;
821 TranslationBlock
*tb
;
823 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
828 tb
= (TranslationBlock
*)((long)tb
& ~3);
829 /* NOTE: this is subtle as a TB may span two physical pages */
831 /* NOTE: tb_end may be after the end of the page, but
832 it is not a problem */
833 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
834 tb_end
= tb_start
+ tb
->size
;
835 if (tb_end
> TARGET_PAGE_SIZE
)
836 tb_end
= TARGET_PAGE_SIZE
;
839 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
841 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
842 tb
= tb
->page_next
[n
];
846 TranslationBlock
*tb_gen_code(CPUState
*env
,
847 target_ulong pc
, target_ulong cs_base
,
848 int flags
, int cflags
)
850 TranslationBlock
*tb
;
852 target_ulong phys_pc
, phys_page2
, virt_page2
;
855 phys_pc
= get_phys_addr_code(env
, pc
);
858 /* flush must be done */
860 /* cannot fail at this point */
862 /* Don't forget to invalidate previous TB info. */
863 tb_invalidated_flag
= 1;
865 tc_ptr
= code_gen_ptr
;
867 tb
->cs_base
= cs_base
;
870 cpu_gen_code(env
, tb
, &code_gen_size
);
871 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
873 /* check next page if needed */
874 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
876 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
877 phys_page2
= get_phys_addr_code(env
, virt_page2
);
879 tb_link_phys(tb
, phys_pc
, phys_page2
);
883 /* invalidate all TBs which intersect with the target physical page
884 starting in range [start;end[. NOTE: start and end must refer to
885 the same physical page. 'is_cpu_write_access' should be true if called
886 from a real cpu write access: the virtual CPU will exit the current
887 TB if code is modified inside this TB. */
888 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
889 int is_cpu_write_access
)
891 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
892 CPUState
*env
= cpu_single_env
;
893 target_ulong tb_start
, tb_end
;
896 #ifdef TARGET_HAS_PRECISE_SMC
897 int current_tb_not_found
= is_cpu_write_access
;
898 TranslationBlock
*current_tb
= NULL
;
899 int current_tb_modified
= 0;
900 target_ulong current_pc
= 0;
901 target_ulong current_cs_base
= 0;
902 int current_flags
= 0;
903 #endif /* TARGET_HAS_PRECISE_SMC */
905 p
= page_find(start
>> TARGET_PAGE_BITS
);
908 if (!p
->code_bitmap
&&
909 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
910 is_cpu_write_access
) {
911 /* build code bitmap */
912 build_page_bitmap(p
);
915 /* we remove all the TBs in the range [start, end[ */
916 /* XXX: see if in some cases it could be faster to invalidate all the code */
920 tb
= (TranslationBlock
*)((long)tb
& ~3);
921 tb_next
= tb
->page_next
[n
];
922 /* NOTE: this is subtle as a TB may span two physical pages */
924 /* NOTE: tb_end may be after the end of the page, but
925 it is not a problem */
926 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
927 tb_end
= tb_start
+ tb
->size
;
929 tb_start
= tb
->page_addr
[1];
930 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
932 if (!(tb_end
<= start
|| tb_start
>= end
)) {
933 #ifdef TARGET_HAS_PRECISE_SMC
934 if (current_tb_not_found
) {
935 current_tb_not_found
= 0;
937 if (env
->mem_io_pc
) {
938 /* now we have a real cpu fault */
939 current_tb
= tb_find_pc(env
->mem_io_pc
);
942 if (current_tb
== tb
&&
943 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
944 /* If we are modifying the current TB, we must stop
945 its execution. We could be more precise by checking
946 that the modification is after the current PC, but it
947 would require a specialized function to partially
948 restore the CPU state */
950 current_tb_modified
= 1;
951 cpu_restore_state(current_tb
, env
,
952 env
->mem_io_pc
, NULL
);
953 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
956 #endif /* TARGET_HAS_PRECISE_SMC */
957 /* we need to do that to handle the case where a signal
958 occurs while doing tb_phys_invalidate() */
961 saved_tb
= env
->current_tb
;
962 env
->current_tb
= NULL
;
964 tb_phys_invalidate(tb
, -1);
966 env
->current_tb
= saved_tb
;
967 if (env
->interrupt_request
&& env
->current_tb
)
968 cpu_interrupt(env
, env
->interrupt_request
);
973 #if !defined(CONFIG_USER_ONLY)
974 /* if no code remaining, no need to continue to use slow writes */
976 invalidate_page_bitmap(p
);
977 if (is_cpu_write_access
) {
978 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
982 #ifdef TARGET_HAS_PRECISE_SMC
983 if (current_tb_modified
) {
984 /* we generate a block containing just the instruction
985 modifying the memory. It will ensure that it cannot modify
987 env
->current_tb
= NULL
;
988 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
989 cpu_resume_from_signal(env
, NULL
);
994 /* len must be <= 8 and start must be a multiple of len */
995 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1001 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1002 cpu_single_env
->mem_io_vaddr
, len
,
1003 cpu_single_env
->eip
,
1004 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1007 p
= page_find(start
>> TARGET_PAGE_BITS
);
1010 if (p
->code_bitmap
) {
1011 offset
= start
& ~TARGET_PAGE_MASK
;
1012 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1013 if (b
& ((1 << len
) - 1))
1017 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1021 #if !defined(CONFIG_SOFTMMU)
1022 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1023 unsigned long pc
, void *puc
)
1025 TranslationBlock
*tb
;
1028 #ifdef TARGET_HAS_PRECISE_SMC
1029 TranslationBlock
*current_tb
= NULL
;
1030 CPUState
*env
= cpu_single_env
;
1031 int current_tb_modified
= 0;
1032 target_ulong current_pc
= 0;
1033 target_ulong current_cs_base
= 0;
1034 int current_flags
= 0;
1037 addr
&= TARGET_PAGE_MASK
;
1038 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1042 #ifdef TARGET_HAS_PRECISE_SMC
1043 if (tb
&& pc
!= 0) {
1044 current_tb
= tb_find_pc(pc
);
1047 while (tb
!= NULL
) {
1049 tb
= (TranslationBlock
*)((long)tb
& ~3);
1050 #ifdef TARGET_HAS_PRECISE_SMC
1051 if (current_tb
== tb
&&
1052 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1053 /* If we are modifying the current TB, we must stop
1054 its execution. We could be more precise by checking
1055 that the modification is after the current PC, but it
1056 would require a specialized function to partially
1057 restore the CPU state */
1059 current_tb_modified
= 1;
1060 cpu_restore_state(current_tb
, env
, pc
, puc
);
1061 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1064 #endif /* TARGET_HAS_PRECISE_SMC */
1065 tb_phys_invalidate(tb
, addr
);
1066 tb
= tb
->page_next
[n
];
1069 #ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb_modified
) {
1071 /* we generate a block containing just the instruction
1072 modifying the memory. It will ensure that it cannot modify
1074 env
->current_tb
= NULL
;
1075 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1076 cpu_resume_from_signal(env
, puc
);
1082 /* add the tb in the target page and protect it if necessary */
1083 static inline void tb_alloc_page(TranslationBlock
*tb
,
1084 unsigned int n
, target_ulong page_addr
)
1087 TranslationBlock
*last_first_tb
;
1089 tb
->page_addr
[n
] = page_addr
;
1090 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1091 tb
->page_next
[n
] = p
->first_tb
;
1092 last_first_tb
= p
->first_tb
;
1093 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1094 invalidate_page_bitmap(p
);
1096 #if defined(TARGET_HAS_SMC) || 1
1098 #if defined(CONFIG_USER_ONLY)
1099 if (p
->flags
& PAGE_WRITE
) {
1104 /* force the host page as non writable (writes will have a
1105 page fault + mprotect overhead) */
1106 page_addr
&= qemu_host_page_mask
;
1108 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1109 addr
+= TARGET_PAGE_SIZE
) {
1111 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1115 p2
->flags
&= ~PAGE_WRITE
;
1116 page_get_flags(addr
);
1118 mprotect(g2h(page_addr
), qemu_host_page_size
,
1119 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1120 #ifdef DEBUG_TB_INVALIDATE
1121 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1126 /* if some code is already present, then the pages are already
1127 protected. So we handle the case where only the first TB is
1128 allocated in a physical page */
1129 if (!last_first_tb
) {
1130 tlb_protect_code(page_addr
);
1134 #endif /* TARGET_HAS_SMC */
1137 /* Allocate a new translation block. Flush the translation buffer if
1138 too many translation blocks or too much generated code. */
1139 TranslationBlock
*tb_alloc(target_ulong pc
)
1141 TranslationBlock
*tb
;
1143 if (nb_tbs
>= code_gen_max_blocks
||
1144 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1146 tb
= &tbs
[nb_tbs
++];
1152 void tb_free(TranslationBlock
*tb
)
1154 /* In practice this is mostly used for single use temporary TB
1155 Ignore the hard cases and just back up if this TB happens to
1156 be the last one generated. */
1157 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1158 code_gen_ptr
= tb
->tc_ptr
;
1163 /* add a new TB and link it to the physical page tables. phys_page2 is
1164 (-1) to indicate that only one page contains the TB. */
1165 void tb_link_phys(TranslationBlock
*tb
,
1166 target_ulong phys_pc
, target_ulong phys_page2
)
1169 TranslationBlock
**ptb
;
1171 /* Grab the mmap lock to stop another thread invalidating this TB
1172 before we are done. */
1174 /* add in the physical hash table */
1175 h
= tb_phys_hash_func(phys_pc
);
1176 ptb
= &tb_phys_hash
[h
];
1177 tb
->phys_hash_next
= *ptb
;
1180 /* add in the page list */
1181 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1182 if (phys_page2
!= -1)
1183 tb_alloc_page(tb
, 1, phys_page2
);
1185 tb
->page_addr
[1] = -1;
1187 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1188 tb
->jmp_next
[0] = NULL
;
1189 tb
->jmp_next
[1] = NULL
;
1191 /* init original jump addresses */
1192 if (tb
->tb_next_offset
[0] != 0xffff)
1193 tb_reset_jump(tb
, 0);
1194 if (tb
->tb_next_offset
[1] != 0xffff)
1195 tb_reset_jump(tb
, 1);
1197 #ifdef DEBUG_TB_CHECK
1203 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1204 tb[1].tc_ptr. Return NULL if not found */
1205 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1207 int m_min
, m_max
, m
;
1209 TranslationBlock
*tb
;
1213 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1214 tc_ptr
>= (unsigned long)code_gen_ptr
)
1216 /* binary search (cf Knuth) */
1219 while (m_min
<= m_max
) {
1220 m
= (m_min
+ m_max
) >> 1;
1222 v
= (unsigned long)tb
->tc_ptr
;
1225 else if (tc_ptr
< v
) {
1234 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1236 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1238 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1241 tb1
= tb
->jmp_next
[n
];
1243 /* find head of list */
1246 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1249 tb1
= tb1
->jmp_next
[n1
];
1251 /* we are now sure now that tb jumps to tb1 */
1254 /* remove tb from the jmp_first list */
1255 ptb
= &tb_next
->jmp_first
;
1259 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1260 if (n1
== n
&& tb1
== tb
)
1262 ptb
= &tb1
->jmp_next
[n1
];
1264 *ptb
= tb
->jmp_next
[n
];
1265 tb
->jmp_next
[n
] = NULL
;
1267 /* suppress the jump to next tb in generated code */
1268 tb_reset_jump(tb
, n
);
1270 /* suppress jumps in the tb on which we could have jumped */
1271 tb_reset_jump_recursive(tb_next
);
1275 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1277 tb_reset_jump_recursive2(tb
, 0);
1278 tb_reset_jump_recursive2(tb
, 1);
1281 #if defined(TARGET_HAS_ICE)
1282 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1284 target_phys_addr_t addr
;
1286 ram_addr_t ram_addr
;
1289 addr
= cpu_get_phys_page_debug(env
, pc
);
1290 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1292 pd
= IO_MEM_UNASSIGNED
;
1294 pd
= p
->phys_offset
;
1296 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1297 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1301 /* Add a watchpoint. */
1302 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1303 int flags
, CPUWatchpoint
**watchpoint
)
1305 target_ulong len_mask
= ~(len
- 1);
1308 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1309 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1310 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1311 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1314 wp
= qemu_malloc(sizeof(*wp
));
1317 wp
->len_mask
= len_mask
;
1320 /* keep all GDB-injected watchpoints in front */
1322 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1324 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1326 tlb_flush_page(env
, addr
);
1333 /* Remove a specific watchpoint. */
1334 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1337 target_ulong len_mask
= ~(len
- 1);
1340 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1341 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1342 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1343 cpu_watchpoint_remove_by_ref(env
, wp
);
1350 /* Remove a specific watchpoint by reference. */
1351 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1353 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1355 tlb_flush_page(env
, watchpoint
->vaddr
);
1357 qemu_free(watchpoint
);
1360 /* Remove all matching watchpoints. */
1361 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1363 CPUWatchpoint
*wp
, *next
;
1365 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1366 if (wp
->flags
& mask
)
1367 cpu_watchpoint_remove_by_ref(env
, wp
);
1371 /* Add a breakpoint. */
1372 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1373 CPUBreakpoint
**breakpoint
)
1375 #if defined(TARGET_HAS_ICE)
1378 bp
= qemu_malloc(sizeof(*bp
));
1383 /* keep all GDB-injected breakpoints in front */
1385 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1387 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1389 breakpoint_invalidate(env
, pc
);
1399 /* Remove a specific breakpoint. */
1400 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1402 #if defined(TARGET_HAS_ICE)
1405 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1406 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1407 cpu_breakpoint_remove_by_ref(env
, bp
);
1417 /* Remove a specific breakpoint by reference. */
1418 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1420 #if defined(TARGET_HAS_ICE)
1421 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1423 breakpoint_invalidate(env
, breakpoint
->pc
);
1425 qemu_free(breakpoint
);
1429 /* Remove all matching breakpoints. */
1430 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1432 #if defined(TARGET_HAS_ICE)
1433 CPUBreakpoint
*bp
, *next
;
1435 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1436 if (bp
->flags
& mask
)
1437 cpu_breakpoint_remove_by_ref(env
, bp
);
1442 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1443 CPU loop after each instruction */
1444 void cpu_single_step(CPUState
*env
, int enabled
)
1446 #if defined(TARGET_HAS_ICE)
1447 if (env
->singlestep_enabled
!= enabled
) {
1448 env
->singlestep_enabled
= enabled
;
1449 /* must flush all the translated code to avoid inconsistancies */
1450 /* XXX: only flush what is necessary */
1456 /* enable or disable low levels log */
1457 void cpu_set_log(int log_flags
)
1459 loglevel
= log_flags
;
1460 if (loglevel
&& !logfile
) {
1461 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1463 perror(logfilename
);
1466 #if !defined(CONFIG_SOFTMMU)
1467 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1469 static char logfile_buf
[4096];
1470 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1473 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1477 if (!loglevel
&& logfile
) {
1483 void cpu_set_log_filename(const char *filename
)
1485 logfilename
= strdup(filename
);
1490 cpu_set_log(loglevel
);
1493 /* mask must never be zero, except for A20 change call */
1494 void cpu_interrupt(CPUState
*env
, int mask
)
1496 #if !defined(USE_NPTL)
1497 TranslationBlock
*tb
;
1498 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1502 old_mask
= env
->interrupt_request
;
1503 /* FIXME: This is probably not threadsafe. A different thread could
1504 be in the middle of a read-modify-write operation. */
1505 env
->interrupt_request
|= mask
;
1506 #if defined(USE_NPTL)
1507 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1508 problem and hope the cpu will stop of its own accord. For userspace
1509 emulation this often isn't actually as bad as it sounds. Often
1510 signals are used primarily to interrupt blocking syscalls. */
1513 env
->icount_decr
.u16
.high
= 0xffff;
1514 #ifndef CONFIG_USER_ONLY
1515 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1516 an async event happened and we need to process it. */
1518 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1519 cpu_abort(env
, "Raised interrupt while not in I/O function");
1523 tb
= env
->current_tb
;
1524 /* if the cpu is currently executing code, we must unlink it and
1525 all the potentially executing TB */
1526 if (tb
&& !testandset(&interrupt_lock
)) {
1527 env
->current_tb
= NULL
;
1528 tb_reset_jump_recursive(tb
);
1529 resetlock(&interrupt_lock
);
1535 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1537 env
->interrupt_request
&= ~mask
;
1540 const CPULogItem cpu_log_items
[] = {
1541 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1542 "show generated host assembly code for each compiled TB" },
1543 { CPU_LOG_TB_IN_ASM
, "in_asm",
1544 "show target assembly code for each compiled TB" },
1545 { CPU_LOG_TB_OP
, "op",
1546 "show micro ops for each compiled TB" },
1547 { CPU_LOG_TB_OP_OPT
, "op_opt",
1550 "before eflags optimization and "
1552 "after liveness analysis" },
1553 { CPU_LOG_INT
, "int",
1554 "show interrupts/exceptions in short format" },
1555 { CPU_LOG_EXEC
, "exec",
1556 "show trace before each executed TB (lots of logs)" },
1557 { CPU_LOG_TB_CPU
, "cpu",
1558 "show CPU state before block translation" },
1560 { CPU_LOG_PCALL
, "pcall",
1561 "show protected mode far calls/returns/exceptions" },
1562 { CPU_LOG_RESET
, "cpu_reset",
1563 "show CPU state before CPU resets" },
1566 { CPU_LOG_IOPORT
, "ioport",
1567 "show all i/o ports accesses" },
1572 static int cmp1(const char *s1
, int n
, const char *s2
)
1574 if (strlen(s2
) != n
)
1576 return memcmp(s1
, s2
, n
) == 0;
1579 /* takes a comma separated list of log masks. Return 0 if error. */
1580 int cpu_str_to_log_mask(const char *str
)
1582 const CPULogItem
*item
;
1589 p1
= strchr(p
, ',');
1592 if(cmp1(p
,p1
-p
,"all")) {
1593 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1597 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1598 if (cmp1(p
, p1
- p
, item
->name
))
1612 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1619 fprintf(stderr
, "qemu: fatal: ");
1620 vfprintf(stderr
, fmt
, ap
);
1621 fprintf(stderr
, "\n");
1623 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1625 cpu_dump_state(env
, stderr
, fprintf
, 0);
1627 if (qemu_log_enabled()) {
1628 qemu_log("qemu: fatal: ");
1629 qemu_log_vprintf(fmt
, ap2
);
1632 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1634 log_cpu_state(env
, 0);
1644 CPUState
*cpu_copy(CPUState
*env
)
1646 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1647 CPUState
*next_cpu
= new_env
->next_cpu
;
1648 int cpu_index
= new_env
->cpu_index
;
1649 #if defined(TARGET_HAS_ICE)
1654 memcpy(new_env
, env
, sizeof(CPUState
));
1656 /* Preserve chaining and index. */
1657 new_env
->next_cpu
= next_cpu
;
1658 new_env
->cpu_index
= cpu_index
;
1660 /* Clone all break/watchpoints.
1661 Note: Once we support ptrace with hw-debug register access, make sure
1662 BP_CPU break/watchpoints are handled correctly on clone. */
1663 TAILQ_INIT(&env
->breakpoints
);
1664 TAILQ_INIT(&env
->watchpoints
);
1665 #if defined(TARGET_HAS_ICE)
1666 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1667 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1669 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1670 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1678 #if !defined(CONFIG_USER_ONLY)
1680 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1684 /* Discard jump cache entries for any tb which might potentially
1685 overlap the flushed page. */
1686 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1687 memset (&env
->tb_jmp_cache
[i
], 0,
1688 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1690 i
= tb_jmp_cache_hash_page(addr
);
1691 memset (&env
->tb_jmp_cache
[i
], 0,
1692 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1695 /* NOTE: if flush_global is true, also flush global entries (not
1697 void tlb_flush(CPUState
*env
, int flush_global
)
1701 #if defined(DEBUG_TLB)
1702 printf("tlb_flush:\n");
1704 /* must reset current TB so that interrupts cannot modify the
1705 links while we are modifying them */
1706 env
->current_tb
= NULL
;
1708 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1709 env
->tlb_table
[0][i
].addr_read
= -1;
1710 env
->tlb_table
[0][i
].addr_write
= -1;
1711 env
->tlb_table
[0][i
].addr_code
= -1;
1712 env
->tlb_table
[1][i
].addr_read
= -1;
1713 env
->tlb_table
[1][i
].addr_write
= -1;
1714 env
->tlb_table
[1][i
].addr_code
= -1;
1715 #if (NB_MMU_MODES >= 3)
1716 env
->tlb_table
[2][i
].addr_read
= -1;
1717 env
->tlb_table
[2][i
].addr_write
= -1;
1718 env
->tlb_table
[2][i
].addr_code
= -1;
1719 #if (NB_MMU_MODES == 4)
1720 env
->tlb_table
[3][i
].addr_read
= -1;
1721 env
->tlb_table
[3][i
].addr_write
= -1;
1722 env
->tlb_table
[3][i
].addr_code
= -1;
1727 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1730 if (env
->kqemu_enabled
) {
1731 kqemu_flush(env
, flush_global
);
1737 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1739 if (addr
== (tlb_entry
->addr_read
&
1740 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1741 addr
== (tlb_entry
->addr_write
&
1742 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1743 addr
== (tlb_entry
->addr_code
&
1744 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1745 tlb_entry
->addr_read
= -1;
1746 tlb_entry
->addr_write
= -1;
1747 tlb_entry
->addr_code
= -1;
1751 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1755 #if defined(DEBUG_TLB)
1756 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1758 /* must reset current TB so that interrupts cannot modify the
1759 links while we are modifying them */
1760 env
->current_tb
= NULL
;
1762 addr
&= TARGET_PAGE_MASK
;
1763 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1764 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1765 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1766 #if (NB_MMU_MODES >= 3)
1767 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1768 #if (NB_MMU_MODES == 4)
1769 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1773 tlb_flush_jmp_cache(env
, addr
);
1776 if (env
->kqemu_enabled
) {
1777 kqemu_flush_page(env
, addr
);
1782 /* update the TLBs so that writes to code in the virtual page 'addr'
1784 static void tlb_protect_code(ram_addr_t ram_addr
)
1786 cpu_physical_memory_reset_dirty(ram_addr
,
1787 ram_addr
+ TARGET_PAGE_SIZE
,
1791 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1792 tested for self modifying code */
1793 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1796 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1799 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1800 unsigned long start
, unsigned long length
)
1803 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1804 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1805 if ((addr
- start
) < length
) {
1806 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1811 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1815 unsigned long length
, start1
;
1819 start
&= TARGET_PAGE_MASK
;
1820 end
= TARGET_PAGE_ALIGN(end
);
1822 length
= end
- start
;
1825 len
= length
>> TARGET_PAGE_BITS
;
1827 /* XXX: should not depend on cpu context */
1829 if (env
->kqemu_enabled
) {
1832 for(i
= 0; i
< len
; i
++) {
1833 kqemu_set_notdirty(env
, addr
);
1834 addr
+= TARGET_PAGE_SIZE
;
1838 mask
= ~dirty_flags
;
1839 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1840 for(i
= 0; i
< len
; i
++)
1843 /* we modify the TLB cache so that the dirty bit will be set again
1844 when accessing the range */
1845 start1
= start
+ (unsigned long)phys_ram_base
;
1846 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1847 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1848 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1849 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1850 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1851 #if (NB_MMU_MODES >= 3)
1852 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1853 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1854 #if (NB_MMU_MODES == 4)
1855 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1856 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1862 int cpu_physical_memory_set_dirty_tracking(int enable
)
1864 in_migration
= enable
;
1868 int cpu_physical_memory_get_dirty_tracking(void)
1870 return in_migration
;
1873 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1876 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1879 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1881 ram_addr_t ram_addr
;
1883 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1884 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1885 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1886 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1887 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1892 /* update the TLB according to the current state of the dirty bits */
1893 void cpu_tlb_update_dirty(CPUState
*env
)
1896 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1897 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1898 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1899 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1900 #if (NB_MMU_MODES >= 3)
1901 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1902 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1903 #if (NB_MMU_MODES == 4)
1904 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1905 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1910 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1912 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1913 tlb_entry
->addr_write
= vaddr
;
1916 /* update the TLB corresponding to virtual page vaddr
1917 so that it is no longer dirty */
1918 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1922 vaddr
&= TARGET_PAGE_MASK
;
1923 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1924 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1925 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1926 #if (NB_MMU_MODES >= 3)
1927 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1928 #if (NB_MMU_MODES == 4)
1929 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1934 /* add a new TLB entry. At most one entry for a given virtual address
1935 is permitted. Return 0 if OK or 2 if the page could not be mapped
1936 (can only happen in non SOFTMMU mode for I/O pages or pages
1937 conflicting with the host address space). */
1938 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1939 target_phys_addr_t paddr
, int prot
,
1940 int mmu_idx
, int is_softmmu
)
1945 target_ulong address
;
1946 target_ulong code_address
;
1947 target_phys_addr_t addend
;
1951 target_phys_addr_t iotlb
;
1953 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1955 pd
= IO_MEM_UNASSIGNED
;
1957 pd
= p
->phys_offset
;
1959 #if defined(DEBUG_TLB)
1960 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1961 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1966 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1967 /* IO memory case (romd handled later) */
1968 address
|= TLB_MMIO
;
1970 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1971 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1973 iotlb
= pd
& TARGET_PAGE_MASK
;
1974 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1975 iotlb
|= IO_MEM_NOTDIRTY
;
1977 iotlb
|= IO_MEM_ROM
;
1979 /* IO handlers are currently passed a phsical address.
1980 It would be nice to pass an offset from the base address
1981 of that region. This would avoid having to special case RAM,
1982 and avoid full address decoding in every device.
1983 We can't use the high bits of pd for this because
1984 IO_MEM_ROMD uses these as a ram address. */
1985 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
1987 iotlb
+= p
->region_offset
;
1993 code_address
= address
;
1994 /* Make accesses to pages with watchpoints go via the
1995 watchpoint trap routines. */
1996 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1997 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
1998 iotlb
= io_mem_watch
+ paddr
;
1999 /* TODO: The memory case can be optimized by not trapping
2000 reads of pages with a write breakpoint. */
2001 address
|= TLB_MMIO
;
2005 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2006 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2007 te
= &env
->tlb_table
[mmu_idx
][index
];
2008 te
->addend
= addend
- vaddr
;
2009 if (prot
& PAGE_READ
) {
2010 te
->addr_read
= address
;
2015 if (prot
& PAGE_EXEC
) {
2016 te
->addr_code
= code_address
;
2020 if (prot
& PAGE_WRITE
) {
2021 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2022 (pd
& IO_MEM_ROMD
)) {
2023 /* Write access calls the I/O callback. */
2024 te
->addr_write
= address
| TLB_MMIO
;
2025 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2026 !cpu_physical_memory_is_dirty(pd
)) {
2027 te
->addr_write
= address
| TLB_NOTDIRTY
;
2029 te
->addr_write
= address
;
2032 te
->addr_write
= -1;
2039 void tlb_flush(CPUState
*env
, int flush_global
)
2043 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2047 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2048 target_phys_addr_t paddr
, int prot
,
2049 int mmu_idx
, int is_softmmu
)
2054 /* dump memory mappings */
2055 void page_dump(FILE *f
)
2057 unsigned long start
, end
;
2058 int i
, j
, prot
, prot1
;
2061 fprintf(f
, "%-8s %-8s %-8s %s\n",
2062 "start", "end", "size", "prot");
2066 for(i
= 0; i
<= L1_SIZE
; i
++) {
2071 for(j
= 0;j
< L2_SIZE
; j
++) {
2076 if (prot1
!= prot
) {
2077 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2079 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2080 start
, end
, end
- start
,
2081 prot
& PAGE_READ
? 'r' : '-',
2082 prot
& PAGE_WRITE
? 'w' : '-',
2083 prot
& PAGE_EXEC
? 'x' : '-');
2097 int page_get_flags(target_ulong address
)
2101 p
= page_find(address
>> TARGET_PAGE_BITS
);
2107 /* modify the flags of a page and invalidate the code if
2108 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2109 depending on PAGE_WRITE */
2110 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2115 /* mmap_lock should already be held. */
2116 start
= start
& TARGET_PAGE_MASK
;
2117 end
= TARGET_PAGE_ALIGN(end
);
2118 if (flags
& PAGE_WRITE
)
2119 flags
|= PAGE_WRITE_ORG
;
2120 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2121 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2122 /* We may be called for host regions that are outside guest
2126 /* if the write protection is set, then we invalidate the code
2128 if (!(p
->flags
& PAGE_WRITE
) &&
2129 (flags
& PAGE_WRITE
) &&
2131 tb_invalidate_phys_page(addr
, 0, NULL
);
2137 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2143 if (start
+ len
< start
)
2144 /* we've wrapped around */
2147 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2148 start
= start
& TARGET_PAGE_MASK
;
2150 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2151 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2154 if( !(p
->flags
& PAGE_VALID
) )
2157 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2159 if (flags
& PAGE_WRITE
) {
2160 if (!(p
->flags
& PAGE_WRITE_ORG
))
2162 /* unprotect the page if it was put read-only because it
2163 contains translated code */
2164 if (!(p
->flags
& PAGE_WRITE
)) {
2165 if (!page_unprotect(addr
, 0, NULL
))
2174 /* called from signal handler: invalidate the code and unprotect the
2175 page. Return TRUE if the fault was succesfully handled. */
2176 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2178 unsigned int page_index
, prot
, pindex
;
2180 target_ulong host_start
, host_end
, addr
;
2182 /* Technically this isn't safe inside a signal handler. However we
2183 know this only ever happens in a synchronous SEGV handler, so in
2184 practice it seems to be ok. */
2187 host_start
= address
& qemu_host_page_mask
;
2188 page_index
= host_start
>> TARGET_PAGE_BITS
;
2189 p1
= page_find(page_index
);
2194 host_end
= host_start
+ qemu_host_page_size
;
2197 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2201 /* if the page was really writable, then we change its
2202 protection back to writable */
2203 if (prot
& PAGE_WRITE_ORG
) {
2204 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2205 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2206 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2207 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2208 p1
[pindex
].flags
|= PAGE_WRITE
;
2209 /* and since the content will be modified, we must invalidate
2210 the corresponding translated code. */
2211 tb_invalidate_phys_page(address
, pc
, puc
);
2212 #ifdef DEBUG_TB_CHECK
2213 tb_invalidate_check(address
);
2223 static inline void tlb_set_dirty(CPUState
*env
,
2224 unsigned long addr
, target_ulong vaddr
)
2227 #endif /* defined(CONFIG_USER_ONLY) */
2229 #if !defined(CONFIG_USER_ONLY)
2231 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2232 ram_addr_t memory
, ram_addr_t region_offset
);
2233 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2234 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2235 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2238 if (addr > start_addr) \
2241 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2242 if (start_addr2 > 0) \
2246 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2247 end_addr2 = TARGET_PAGE_SIZE - 1; \
2249 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2250 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2255 /* register physical memory. 'size' must be a multiple of the target
2256 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2257 io memory page. The address used when calling the IO function is
2258 the offset from the start of the region, plus region_offset. Both
2259 start_region and regon_offset are rounded down to a page boundary
2260 before calculating this offset. This should not be a problem unless
2261 the low bits of start_addr and region_offset differ. */
2262 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2264 ram_addr_t phys_offset
,
2265 ram_addr_t region_offset
)
2267 target_phys_addr_t addr
, end_addr
;
2270 ram_addr_t orig_size
= size
;
2274 /* XXX: should not depend on cpu context */
2276 if (env
->kqemu_enabled
) {
2277 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2281 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2283 region_offset
&= TARGET_PAGE_MASK
;
2284 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2285 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2286 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2287 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2288 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2289 ram_addr_t orig_memory
= p
->phys_offset
;
2290 target_phys_addr_t start_addr2
, end_addr2
;
2291 int need_subpage
= 0;
2293 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2295 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2296 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2297 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2298 &p
->phys_offset
, orig_memory
,
2301 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2304 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2306 p
->region_offset
= 0;
2308 p
->phys_offset
= phys_offset
;
2309 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2310 (phys_offset
& IO_MEM_ROMD
))
2311 phys_offset
+= TARGET_PAGE_SIZE
;
2314 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2315 p
->phys_offset
= phys_offset
;
2316 p
->region_offset
= region_offset
;
2317 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2318 (phys_offset
& IO_MEM_ROMD
)) {
2319 phys_offset
+= TARGET_PAGE_SIZE
;
2321 target_phys_addr_t start_addr2
, end_addr2
;
2322 int need_subpage
= 0;
2324 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2325 end_addr2
, need_subpage
);
2327 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2328 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2329 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2331 subpage_register(subpage
, start_addr2
, end_addr2
,
2332 phys_offset
, region_offset
);
2333 p
->region_offset
= 0;
2337 region_offset
+= TARGET_PAGE_SIZE
;
2340 /* since each CPU stores ram addresses in its TLB cache, we must
2341 reset the modified entries */
2343 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2348 /* XXX: temporary until new memory mapping API */
2349 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2353 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2355 return IO_MEM_UNASSIGNED
;
2356 return p
->phys_offset
;
2359 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2362 kvm_coalesce_mmio_region(addr
, size
);
2365 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2368 kvm_uncoalesce_mmio_region(addr
, size
);
2371 /* XXX: better than nothing */
2372 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2375 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2376 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2377 (uint64_t)size
, (uint64_t)phys_ram_size
);
2380 addr
= phys_ram_alloc_offset
;
2381 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2385 void qemu_ram_free(ram_addr_t addr
)
2389 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2391 #ifdef DEBUG_UNASSIGNED
2392 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2394 #if defined(TARGET_SPARC)
2395 do_unassigned_access(addr
, 0, 0, 0, 1);
2400 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2402 #ifdef DEBUG_UNASSIGNED
2403 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2405 #if defined(TARGET_SPARC)
2406 do_unassigned_access(addr
, 0, 0, 0, 2);
2411 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2413 #ifdef DEBUG_UNASSIGNED
2414 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2416 #if defined(TARGET_SPARC)
2417 do_unassigned_access(addr
, 0, 0, 0, 4);
2422 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2424 #ifdef DEBUG_UNASSIGNED
2425 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2427 #if defined(TARGET_SPARC)
2428 do_unassigned_access(addr
, 1, 0, 0, 1);
2432 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2434 #ifdef DEBUG_UNASSIGNED
2435 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2437 #if defined(TARGET_SPARC)
2438 do_unassigned_access(addr
, 1, 0, 0, 2);
2442 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2444 #ifdef DEBUG_UNASSIGNED
2445 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2447 #if defined(TARGET_SPARC)
2448 do_unassigned_access(addr
, 1, 0, 0, 4);
2452 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2453 unassigned_mem_readb
,
2454 unassigned_mem_readw
,
2455 unassigned_mem_readl
,
2458 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2459 unassigned_mem_writeb
,
2460 unassigned_mem_writew
,
2461 unassigned_mem_writel
,
2464 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2468 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2469 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2470 #if !defined(CONFIG_USER_ONLY)
2471 tb_invalidate_phys_page_fast(ram_addr
, 1);
2472 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2475 stb_p(phys_ram_base
+ ram_addr
, val
);
2477 if (cpu_single_env
->kqemu_enabled
&&
2478 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2479 kqemu_modify_page(cpu_single_env
, ram_addr
);
2481 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2482 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2483 /* we remove the notdirty callback only if the code has been
2485 if (dirty_flags
== 0xff)
2486 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2489 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2493 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2494 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2495 #if !defined(CONFIG_USER_ONLY)
2496 tb_invalidate_phys_page_fast(ram_addr
, 2);
2497 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2500 stw_p(phys_ram_base
+ ram_addr
, val
);
2502 if (cpu_single_env
->kqemu_enabled
&&
2503 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2504 kqemu_modify_page(cpu_single_env
, ram_addr
);
2506 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2507 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2508 /* we remove the notdirty callback only if the code has been
2510 if (dirty_flags
== 0xff)
2511 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2514 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2518 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2519 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2520 #if !defined(CONFIG_USER_ONLY)
2521 tb_invalidate_phys_page_fast(ram_addr
, 4);
2522 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2525 stl_p(phys_ram_base
+ ram_addr
, val
);
2527 if (cpu_single_env
->kqemu_enabled
&&
2528 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2529 kqemu_modify_page(cpu_single_env
, ram_addr
);
2531 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2532 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2533 /* we remove the notdirty callback only if the code has been
2535 if (dirty_flags
== 0xff)
2536 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2539 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2540 NULL
, /* never used */
2541 NULL
, /* never used */
2542 NULL
, /* never used */
2545 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2546 notdirty_mem_writeb
,
2547 notdirty_mem_writew
,
2548 notdirty_mem_writel
,
2551 /* Generate a debug exception if a watchpoint has been hit. */
2552 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2554 CPUState
*env
= cpu_single_env
;
2555 target_ulong pc
, cs_base
;
2556 TranslationBlock
*tb
;
2561 if (env
->watchpoint_hit
) {
2562 /* We re-entered the check after replacing the TB. Now raise
2563 * the debug interrupt so that is will trigger after the
2564 * current instruction. */
2565 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2568 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2569 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2570 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2571 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2572 wp
->flags
|= BP_WATCHPOINT_HIT
;
2573 if (!env
->watchpoint_hit
) {
2574 env
->watchpoint_hit
= wp
;
2575 tb
= tb_find_pc(env
->mem_io_pc
);
2577 cpu_abort(env
, "check_watchpoint: could not find TB for "
2578 "pc=%p", (void *)env
->mem_io_pc
);
2580 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2581 tb_phys_invalidate(tb
, -1);
2582 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2583 env
->exception_index
= EXCP_DEBUG
;
2585 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2586 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2588 cpu_resume_from_signal(env
, NULL
);
2591 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2596 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2597 so these check for a hit then pass through to the normal out-of-line
2599 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2601 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2602 return ldub_phys(addr
);
2605 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2607 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2608 return lduw_phys(addr
);
2611 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2613 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2614 return ldl_phys(addr
);
2617 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2620 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2621 stb_phys(addr
, val
);
2624 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2627 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2628 stw_phys(addr
, val
);
2631 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2634 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2635 stl_phys(addr
, val
);
2638 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2644 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2650 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2656 idx
= SUBPAGE_IDX(addr
);
2657 #if defined(DEBUG_SUBPAGE)
2658 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2659 mmio
, len
, addr
, idx
);
2661 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2662 addr
+ mmio
->region_offset
[idx
][0][len
]);
2667 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2668 uint32_t value
, unsigned int len
)
2672 idx
= SUBPAGE_IDX(addr
);
2673 #if defined(DEBUG_SUBPAGE)
2674 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2675 mmio
, len
, addr
, idx
, value
);
2677 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2678 addr
+ mmio
->region_offset
[idx
][1][len
],
2682 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2684 #if defined(DEBUG_SUBPAGE)
2685 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2688 return subpage_readlen(opaque
, addr
, 0);
2691 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2694 #if defined(DEBUG_SUBPAGE)
2695 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2697 subpage_writelen(opaque
, addr
, value
, 0);
2700 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2702 #if defined(DEBUG_SUBPAGE)
2703 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2706 return subpage_readlen(opaque
, addr
, 1);
2709 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2712 #if defined(DEBUG_SUBPAGE)
2713 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2715 subpage_writelen(opaque
, addr
, value
, 1);
2718 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2720 #if defined(DEBUG_SUBPAGE)
2721 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2724 return subpage_readlen(opaque
, addr
, 2);
2727 static void subpage_writel (void *opaque
,
2728 target_phys_addr_t addr
, uint32_t value
)
2730 #if defined(DEBUG_SUBPAGE)
2731 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2733 subpage_writelen(opaque
, addr
, value
, 2);
2736 static CPUReadMemoryFunc
*subpage_read
[] = {
2742 static CPUWriteMemoryFunc
*subpage_write
[] = {
2748 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2749 ram_addr_t memory
, ram_addr_t region_offset
)
2754 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2756 idx
= SUBPAGE_IDX(start
);
2757 eidx
= SUBPAGE_IDX(end
);
2758 #if defined(DEBUG_SUBPAGE)
2759 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2760 mmio
, start
, end
, idx
, eidx
, memory
);
2762 memory
>>= IO_MEM_SHIFT
;
2763 for (; idx
<= eidx
; idx
++) {
2764 for (i
= 0; i
< 4; i
++) {
2765 if (io_mem_read
[memory
][i
]) {
2766 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2767 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2768 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2770 if (io_mem_write
[memory
][i
]) {
2771 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2772 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2773 mmio
->region_offset
[idx
][1][i
] = region_offset
;
2781 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2782 ram_addr_t orig_memory
, ram_addr_t region_offset
)
2787 mmio
= qemu_mallocz(sizeof(subpage_t
));
2790 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2791 #if defined(DEBUG_SUBPAGE)
2792 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2793 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2795 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2796 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
2802 static void io_mem_init(void)
2804 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2805 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2806 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2809 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2810 watch_mem_write
, NULL
);
2811 /* alloc dirty bits array */
2812 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2813 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2816 /* mem_read and mem_write are arrays of functions containing the
2817 function to access byte (index 0), word (index 1) and dword (index
2818 2). Functions can be omitted with a NULL function pointer. The
2819 registered functions may be modified dynamically later.
2820 If io_index is non zero, the corresponding io zone is
2821 modified. If it is zero, a new io zone is allocated. The return
2822 value can be used with cpu_register_physical_memory(). (-1) is
2823 returned if error. */
2824 int cpu_register_io_memory(int io_index
,
2825 CPUReadMemoryFunc
**mem_read
,
2826 CPUWriteMemoryFunc
**mem_write
,
2829 int i
, subwidth
= 0;
2831 if (io_index
<= 0) {
2832 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2834 io_index
= io_mem_nb
++;
2836 if (io_index
>= IO_MEM_NB_ENTRIES
)
2840 for(i
= 0;i
< 3; i
++) {
2841 if (!mem_read
[i
] || !mem_write
[i
])
2842 subwidth
= IO_MEM_SUBWIDTH
;
2843 io_mem_read
[io_index
][i
] = mem_read
[i
];
2844 io_mem_write
[io_index
][i
] = mem_write
[i
];
2846 io_mem_opaque
[io_index
] = opaque
;
2847 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2850 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2852 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2855 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2857 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2860 #endif /* !defined(CONFIG_USER_ONLY) */
2862 /* physical memory access (slow version, mainly for debug) */
2863 #if defined(CONFIG_USER_ONLY)
2864 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2865 int len
, int is_write
)
2872 page
= addr
& TARGET_PAGE_MASK
;
2873 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2876 flags
= page_get_flags(page
);
2877 if (!(flags
& PAGE_VALID
))
2880 if (!(flags
& PAGE_WRITE
))
2882 /* XXX: this code should not depend on lock_user */
2883 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2884 /* FIXME - should this return an error rather than just fail? */
2887 unlock_user(p
, addr
, l
);
2889 if (!(flags
& PAGE_READ
))
2891 /* XXX: this code should not depend on lock_user */
2892 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2893 /* FIXME - should this return an error rather than just fail? */
2896 unlock_user(p
, addr
, 0);
2905 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2906 int len
, int is_write
)
2911 target_phys_addr_t page
;
2916 page
= addr
& TARGET_PAGE_MASK
;
2917 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2920 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2922 pd
= IO_MEM_UNASSIGNED
;
2924 pd
= p
->phys_offset
;
2928 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2929 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2931 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
2932 /* XXX: could force cpu_single_env to NULL to avoid
2934 if (l
>= 4 && ((addr
& 3) == 0)) {
2935 /* 32 bit write access */
2937 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2939 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2940 /* 16 bit write access */
2942 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2945 /* 8 bit write access */
2947 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2951 unsigned long addr1
;
2952 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2954 ptr
= phys_ram_base
+ addr1
;
2955 memcpy(ptr
, buf
, l
);
2956 if (!cpu_physical_memory_is_dirty(addr1
)) {
2957 /* invalidate code */
2958 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2960 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2961 (0xff & ~CODE_DIRTY_FLAG
);
2965 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2966 !(pd
& IO_MEM_ROMD
)) {
2968 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2970 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
2971 if (l
>= 4 && ((addr
& 3) == 0)) {
2972 /* 32 bit read access */
2973 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2976 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2977 /* 16 bit read access */
2978 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2982 /* 8 bit read access */
2983 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2989 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2990 (addr
& ~TARGET_PAGE_MASK
);
2991 memcpy(buf
, ptr
, l
);
3000 /* used for ROM loading : can write in RAM and ROM */
3001 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3002 const uint8_t *buf
, int len
)
3006 target_phys_addr_t page
;
3011 page
= addr
& TARGET_PAGE_MASK
;
3012 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3015 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3017 pd
= IO_MEM_UNASSIGNED
;
3019 pd
= p
->phys_offset
;
3022 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3023 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3024 !(pd
& IO_MEM_ROMD
)) {
3027 unsigned long addr1
;
3028 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3030 ptr
= phys_ram_base
+ addr1
;
3031 memcpy(ptr
, buf
, l
);
3041 target_phys_addr_t addr
;
3042 target_phys_addr_t len
;
3045 static BounceBuffer bounce
;
3047 typedef struct MapClient
{
3049 void (*callback
)(void *opaque
);
3050 LIST_ENTRY(MapClient
) link
;
3053 static LIST_HEAD(map_client_list
, MapClient
) map_client_list
3054 = LIST_HEAD_INITIALIZER(map_client_list
);
3056 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3058 MapClient
*client
= qemu_malloc(sizeof(*client
));
3060 client
->opaque
= opaque
;
3061 client
->callback
= callback
;
3062 LIST_INSERT_HEAD(&map_client_list
, client
, link
);
3066 void cpu_unregister_map_client(void *_client
)
3068 MapClient
*client
= (MapClient
*)_client
;
3070 LIST_REMOVE(client
, link
);
3073 static void cpu_notify_map_clients(void)
3077 while (!LIST_EMPTY(&map_client_list
)) {
3078 client
= LIST_FIRST(&map_client_list
);
3079 client
->callback(client
->opaque
);
3080 LIST_REMOVE(client
, link
);
3084 /* Map a physical memory region into a host virtual address.
3085 * May map a subset of the requested range, given by and returned in *plen.
3086 * May return NULL if resources needed to perform the mapping are exhausted.
3087 * Use only for reads OR writes - not for read-modify-write operations.
3088 * Use cpu_register_map_client() to know when retrying the map operation is
3089 * likely to succeed.
3091 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3092 target_phys_addr_t
*plen
,
3095 target_phys_addr_t len
= *plen
;
3096 target_phys_addr_t done
= 0;
3098 uint8_t *ret
= NULL
;
3100 target_phys_addr_t page
;
3103 unsigned long addr1
;
3106 page
= addr
& TARGET_PAGE_MASK
;
3107 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3110 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3112 pd
= IO_MEM_UNASSIGNED
;
3114 pd
= p
->phys_offset
;
3117 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3118 if (done
|| bounce
.buffer
) {
3121 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3125 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3127 ptr
= bounce
.buffer
;
3129 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3130 ptr
= phys_ram_base
+ addr1
;
3134 } else if (ret
+ done
!= ptr
) {
3146 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3147 * Will also mark the memory as dirty if is_write == 1. access_len gives
3148 * the amount of memory that was actually read or written by the caller.
3150 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3151 int is_write
, target_phys_addr_t access_len
)
3153 if (buffer
!= bounce
.buffer
) {
3155 unsigned long addr1
= (uint8_t *)buffer
- phys_ram_base
;
3156 while (access_len
) {
3158 l
= TARGET_PAGE_SIZE
;
3161 if (!cpu_physical_memory_is_dirty(addr1
)) {
3162 /* invalidate code */
3163 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3165 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3166 (0xff & ~CODE_DIRTY_FLAG
);
3175 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3177 qemu_free(bounce
.buffer
);
3178 bounce
.buffer
= NULL
;
3179 cpu_notify_map_clients();
3182 /* warning: addr must be aligned */
3183 uint32_t ldl_phys(target_phys_addr_t addr
)
3191 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3193 pd
= IO_MEM_UNASSIGNED
;
3195 pd
= p
->phys_offset
;
3198 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3199 !(pd
& IO_MEM_ROMD
)) {
3201 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3203 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3204 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3207 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3208 (addr
& ~TARGET_PAGE_MASK
);
3214 /* warning: addr must be aligned */
3215 uint64_t ldq_phys(target_phys_addr_t addr
)
3223 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3225 pd
= IO_MEM_UNASSIGNED
;
3227 pd
= p
->phys_offset
;
3230 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3231 !(pd
& IO_MEM_ROMD
)) {
3233 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3235 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3236 #ifdef TARGET_WORDS_BIGENDIAN
3237 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3238 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3240 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3241 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3245 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3246 (addr
& ~TARGET_PAGE_MASK
);
3253 uint32_t ldub_phys(target_phys_addr_t addr
)
3256 cpu_physical_memory_read(addr
, &val
, 1);
3261 uint32_t lduw_phys(target_phys_addr_t addr
)
3264 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3265 return tswap16(val
);
3268 /* warning: addr must be aligned. The ram page is not masked as dirty
3269 and the code inside is not invalidated. It is useful if the dirty
3270 bits are used to track modified PTEs */
3271 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3278 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3280 pd
= IO_MEM_UNASSIGNED
;
3282 pd
= p
->phys_offset
;
3285 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3286 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3288 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3289 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3291 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3292 ptr
= phys_ram_base
+ addr1
;
3295 if (unlikely(in_migration
)) {
3296 if (!cpu_physical_memory_is_dirty(addr1
)) {
3297 /* invalidate code */
3298 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3300 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3301 (0xff & ~CODE_DIRTY_FLAG
);
3307 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3314 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3316 pd
= IO_MEM_UNASSIGNED
;
3318 pd
= p
->phys_offset
;
3321 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3322 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3324 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3325 #ifdef TARGET_WORDS_BIGENDIAN
3326 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3327 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3329 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3330 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3333 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3334 (addr
& ~TARGET_PAGE_MASK
);
3339 /* warning: addr must be aligned */
3340 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3347 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3349 pd
= IO_MEM_UNASSIGNED
;
3351 pd
= p
->phys_offset
;
3354 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3355 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3357 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3358 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3360 unsigned long addr1
;
3361 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3363 ptr
= phys_ram_base
+ addr1
;
3365 if (!cpu_physical_memory_is_dirty(addr1
)) {
3366 /* invalidate code */
3367 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3369 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3370 (0xff & ~CODE_DIRTY_FLAG
);
3376 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3379 cpu_physical_memory_write(addr
, &v
, 1);
3383 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3385 uint16_t v
= tswap16(val
);
3386 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3390 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3393 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3398 /* virtual memory access for debug */
3399 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3400 uint8_t *buf
, int len
, int is_write
)
3403 target_phys_addr_t phys_addr
;
3407 page
= addr
& TARGET_PAGE_MASK
;
3408 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3409 /* if no physical page mapped, return an error */
3410 if (phys_addr
== -1)
3412 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3415 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3424 /* in deterministic execution mode, instructions doing device I/Os
3425 must be at the end of the TB */
3426 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3428 TranslationBlock
*tb
;
3430 target_ulong pc
, cs_base
;
3433 tb
= tb_find_pc((unsigned long)retaddr
);
3435 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3438 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3439 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3440 /* Calculate how many instructions had been executed before the fault
3442 n
= n
- env
->icount_decr
.u16
.low
;
3443 /* Generate a new TB ending on the I/O insn. */
3445 /* On MIPS and SH, delay slot instructions can only be restarted if
3446 they were already the first instruction in the TB. If this is not
3447 the first instruction in a TB then re-execute the preceding
3449 #if defined(TARGET_MIPS)
3450 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3451 env
->active_tc
.PC
-= 4;
3452 env
->icount_decr
.u16
.low
++;
3453 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3455 #elif defined(TARGET_SH4)
3456 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3459 env
->icount_decr
.u16
.low
++;
3460 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3463 /* This should never happen. */
3464 if (n
> CF_COUNT_MASK
)
3465 cpu_abort(env
, "TB too big during recompile");
3467 cflags
= n
| CF_LAST_IO
;
3469 cs_base
= tb
->cs_base
;
3471 tb_phys_invalidate(tb
, -1);
3472 /* FIXME: In theory this could raise an exception. In practice
3473 we have already translated the block once so it's probably ok. */
3474 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3475 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3476 the first in the TB) then we end up generating a whole new TB and
3477 repeating the fault, which is horribly inefficient.
3478 Better would be to execute just this insn uncached, or generate a
3480 cpu_resume_from_signal(env
, NULL
);
3483 void dump_exec_info(FILE *f
,
3484 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3486 int i
, target_code_size
, max_target_code_size
;
3487 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3488 TranslationBlock
*tb
;
3490 target_code_size
= 0;
3491 max_target_code_size
= 0;
3493 direct_jmp_count
= 0;
3494 direct_jmp2_count
= 0;
3495 for(i
= 0; i
< nb_tbs
; i
++) {
3497 target_code_size
+= tb
->size
;
3498 if (tb
->size
> max_target_code_size
)
3499 max_target_code_size
= tb
->size
;
3500 if (tb
->page_addr
[1] != -1)
3502 if (tb
->tb_next_offset
[0] != 0xffff) {
3504 if (tb
->tb_next_offset
[1] != 0xffff) {
3505 direct_jmp2_count
++;
3509 /* XXX: avoid using doubles ? */
3510 cpu_fprintf(f
, "Translation buffer state:\n");
3511 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3512 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3513 cpu_fprintf(f
, "TB count %d/%d\n",
3514 nb_tbs
, code_gen_max_blocks
);
3515 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3516 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3517 max_target_code_size
);
3518 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3519 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3520 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3521 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3523 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3524 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3526 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3528 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3529 cpu_fprintf(f
, "\nStatistics:\n");
3530 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3531 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3532 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3533 tcg_dump_info(f
, cpu_fprintf
);
3536 #if !defined(CONFIG_USER_ONLY)
3538 #define MMUSUFFIX _cmmu
3539 #define GETPC() NULL
3540 #define env cpu_single_env
3541 #define SOFTMMU_CODE_ACCESS
3544 #include "softmmu_template.h"
3547 #include "softmmu_template.h"
3550 #include "softmmu_template.h"
3553 #include "softmmu_template.h"