2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
41 #if defined(CONFIG_USER_ONLY)
46 //#define DEBUG_TB_INVALIDATE
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_SPARC)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 36
69 #elif defined(TARGET_ALPHA)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 #define TARGET_VIRT_ADDR_SPACE_BITS 42
72 #elif defined(TARGET_PPC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_X86_64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_I386)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 static TranslationBlock
*tbs
;
83 int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 uint8_t *phys_ram_dirty
;
115 static int in_migration
;
117 typedef struct RAMBlock
{
121 struct RAMBlock
*next
;
124 static RAMBlock
*ram_blocks
;
125 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126 then we can no longer assume contiguous ram offsets, and external uses
127 of this variable will break. */
128 ram_addr_t last_ram_offset
;
132 /* current CPU in the current thread. It is only valid inside
134 CPUState
*cpu_single_env
;
135 /* 0 = Do not count executed instructions.
136 1 = Precise instruction counting.
137 2 = Adaptive rate instruction counting. */
139 /* Current instruction counter. While executing translated code this may
140 include some instructions that have not yet been executed. */
143 typedef struct PageDesc
{
144 /* list of TBs intersecting this ram page */
145 TranslationBlock
*first_tb
;
146 /* in order to optimize self modifying code, we count the number
147 of lookups we do to a given page to use a bitmap */
148 unsigned int code_write_count
;
149 uint8_t *code_bitmap
;
150 #if defined(CONFIG_USER_ONLY)
155 typedef struct PhysPageDesc
{
156 /* offset in host memory of the page + io_index in the low bits */
157 ram_addr_t phys_offset
;
158 ram_addr_t region_offset
;
162 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163 /* XXX: this is a temporary hack for alpha target.
164 * In the future, this is to be replaced by a multi-level table
165 * to actually be able to handle the complete 64 bits address space.
167 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
169 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
172 #define L1_SIZE (1 << L1_BITS)
173 #define L2_SIZE (1 << L2_BITS)
175 unsigned long qemu_real_host_page_size
;
176 unsigned long qemu_host_page_bits
;
177 unsigned long qemu_host_page_size
;
178 unsigned long qemu_host_page_mask
;
180 /* XXX: for system emulation, it could just be an array */
181 static PageDesc
*l1_map
[L1_SIZE
];
183 #if !defined(CONFIG_USER_ONLY)
184 static PhysPageDesc
**l1_phys_map
;
186 static void io_mem_init(void);
188 /* io memory support */
189 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
190 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
191 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
192 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
193 static int io_mem_watch
;
198 static const char *logfilename
= "qemu.log";
200 static const char *logfilename
= "/tmp/qemu.log";
204 static int log_append
= 0;
207 static int tlb_flush_count
;
208 static int tb_flush_count
;
209 static int tb_phys_invalidate_count
;
212 static void map_exec(void *addr
, long size
)
215 VirtualProtect(addr
, size
,
216 PAGE_EXECUTE_READWRITE
, &old_protect
);
220 static void map_exec(void *addr
, long size
)
222 unsigned long start
, end
, page_size
;
224 page_size
= getpagesize();
225 start
= (unsigned long)addr
;
226 start
&= ~(page_size
- 1);
228 end
= (unsigned long)addr
+ size
;
229 end
+= page_size
- 1;
230 end
&= ~(page_size
- 1);
232 mprotect((void *)start
, end
- start
,
233 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
237 static void page_init(void)
239 /* NOTE: we can always suppose that qemu_host_page_size >=
243 SYSTEM_INFO system_info
;
245 GetSystemInfo(&system_info
);
246 qemu_real_host_page_size
= system_info
.dwPageSize
;
249 qemu_real_host_page_size
= getpagesize();
251 if (qemu_host_page_size
== 0)
252 qemu_host_page_size
= qemu_real_host_page_size
;
253 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
254 qemu_host_page_size
= TARGET_PAGE_SIZE
;
255 qemu_host_page_bits
= 0;
256 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
257 qemu_host_page_bits
++;
258 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
259 #if !defined(CONFIG_USER_ONLY)
260 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
261 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
264 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
266 long long startaddr
, endaddr
;
271 last_brk
= (unsigned long)sbrk(0);
272 f
= fopen("/proc/self/maps", "r");
275 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
277 startaddr
= MIN(startaddr
,
278 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
279 endaddr
= MIN(endaddr
,
280 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
281 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
282 TARGET_PAGE_ALIGN(endaddr
),
293 static inline PageDesc
**page_l1_map(target_ulong index
)
295 #if TARGET_LONG_BITS > 32
296 /* Host memory outside guest VM. For 32-bit targets we have already
297 excluded high addresses. */
298 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
301 return &l1_map
[index
>> L2_BITS
];
304 static inline PageDesc
*page_find_alloc(target_ulong index
)
307 lp
= page_l1_map(index
);
313 /* allocate if not found */
314 #if defined(CONFIG_USER_ONLY)
315 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
316 /* Don't use qemu_malloc because it may recurse. */
317 p
= mmap(NULL
, len
, PROT_READ
| PROT_WRITE
,
318 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
321 unsigned long addr
= h2g(p
);
322 page_set_flags(addr
& TARGET_PAGE_MASK
,
323 TARGET_PAGE_ALIGN(addr
+ len
),
327 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
331 return p
+ (index
& (L2_SIZE
- 1));
334 static inline PageDesc
*page_find(target_ulong index
)
337 lp
= page_l1_map(index
);
345 return p
+ (index
& (L2_SIZE
- 1));
348 #if !defined(CONFIG_USER_ONLY)
349 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
354 p
= (void **)l1_phys_map
;
355 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
357 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
360 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
363 /* allocate if not found */
366 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
367 memset(p
, 0, sizeof(void *) * L1_SIZE
);
371 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
375 /* allocate if not found */
378 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
380 for (i
= 0; i
< L2_SIZE
; i
++) {
381 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
382 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
385 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
388 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
390 return phys_page_find_alloc(index
, 0);
393 static void tlb_protect_code(ram_addr_t ram_addr
);
394 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
396 #define mmap_lock() do { } while(0)
397 #define mmap_unlock() do { } while(0)
400 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
402 #if defined(CONFIG_USER_ONLY)
403 /* Currently it is not recommended to allocate big chunks of data in
404 user mode. It will change when a dedicated libc will be used */
405 #define USE_STATIC_CODE_GEN_BUFFER
408 #ifdef USE_STATIC_CODE_GEN_BUFFER
409 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
412 static void code_gen_alloc(unsigned long tb_size
)
414 #ifdef USE_STATIC_CODE_GEN_BUFFER
415 code_gen_buffer
= static_code_gen_buffer
;
416 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
417 map_exec(code_gen_buffer
, code_gen_buffer_size
);
419 code_gen_buffer_size
= tb_size
;
420 if (code_gen_buffer_size
== 0) {
421 #if defined(CONFIG_USER_ONLY)
422 /* in user mode, phys_ram_size is not meaningful */
423 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
425 /* XXX: needs adjustments */
426 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
429 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
430 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
431 /* The code gen buffer location may have constraints depending on
432 the host cpu and OS */
433 #if defined(__linux__)
438 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
439 #if defined(__x86_64__)
441 /* Cannot map more than that */
442 if (code_gen_buffer_size
> (800 * 1024 * 1024))
443 code_gen_buffer_size
= (800 * 1024 * 1024);
444 #elif defined(__sparc_v9__)
445 // Map the buffer below 2G, so we can use direct calls and branches
447 start
= (void *) 0x60000000UL
;
448 if (code_gen_buffer_size
> (512 * 1024 * 1024))
449 code_gen_buffer_size
= (512 * 1024 * 1024);
450 #elif defined(__arm__)
451 /* Map the buffer below 32M, so we can use direct calls and branches */
453 start
= (void *) 0x01000000UL
;
454 if (code_gen_buffer_size
> 16 * 1024 * 1024)
455 code_gen_buffer_size
= 16 * 1024 * 1024;
457 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
458 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
460 if (code_gen_buffer
== MAP_FAILED
) {
461 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
465 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
469 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
470 #if defined(__x86_64__)
471 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
472 * 0x40000000 is free */
474 addr
= (void *)0x40000000;
475 /* Cannot map more than that */
476 if (code_gen_buffer_size
> (800 * 1024 * 1024))
477 code_gen_buffer_size
= (800 * 1024 * 1024);
479 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
480 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
482 if (code_gen_buffer
== MAP_FAILED
) {
483 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
488 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
489 map_exec(code_gen_buffer
, code_gen_buffer_size
);
491 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
492 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
493 code_gen_buffer_max_size
= code_gen_buffer_size
-
494 code_gen_max_block_size();
495 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
496 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
499 /* Must be called before using the QEMU cpus. 'tb_size' is the size
500 (in bytes) allocated to the translation buffer. Zero means default
502 void cpu_exec_init_all(unsigned long tb_size
)
505 code_gen_alloc(tb_size
);
506 code_gen_ptr
= code_gen_buffer
;
508 #if !defined(CONFIG_USER_ONLY)
513 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
515 static int cpu_common_post_load(void *opaque
, int version_id
)
517 CPUState
*env
= opaque
;
519 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
520 version_id is increased. */
521 env
->interrupt_request
&= ~0x01;
527 static const VMStateDescription vmstate_cpu_common
= {
528 .name
= "cpu_common",
530 .minimum_version_id
= 1,
531 .minimum_version_id_old
= 1,
532 .post_load
= cpu_common_post_load
,
533 .fields
= (VMStateField
[]) {
534 VMSTATE_UINT32(halted
, CPUState
),
535 VMSTATE_UINT32(interrupt_request
, CPUState
),
536 VMSTATE_END_OF_LIST()
541 CPUState
*qemu_get_cpu(int cpu
)
543 CPUState
*env
= first_cpu
;
546 if (env
->cpu_index
== cpu
)
554 void cpu_exec_init(CPUState
*env
)
559 #if defined(CONFIG_USER_ONLY)
562 env
->next_cpu
= NULL
;
565 while (*penv
!= NULL
) {
566 penv
= &(*penv
)->next_cpu
;
569 env
->cpu_index
= cpu_index
;
571 QTAILQ_INIT(&env
->breakpoints
);
572 QTAILQ_INIT(&env
->watchpoints
);
574 #if defined(CONFIG_USER_ONLY)
577 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
578 vmstate_register(cpu_index
, &vmstate_cpu_common
, env
);
579 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
580 cpu_save
, cpu_load
, env
);
584 static inline void invalidate_page_bitmap(PageDesc
*p
)
586 if (p
->code_bitmap
) {
587 qemu_free(p
->code_bitmap
);
588 p
->code_bitmap
= NULL
;
590 p
->code_write_count
= 0;
593 /* set to NULL all the 'first_tb' fields in all PageDescs */
594 static void page_flush_tb(void)
599 for(i
= 0; i
< L1_SIZE
; i
++) {
602 for(j
= 0; j
< L2_SIZE
; j
++) {
604 invalidate_page_bitmap(p
);
611 /* flush all the translation blocks */
612 /* XXX: tb_flush is currently not thread safe */
613 void tb_flush(CPUState
*env1
)
616 #if defined(DEBUG_FLUSH)
617 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
618 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
620 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
622 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
623 cpu_abort(env1
, "Internal error: code buffer overflow\n");
627 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
628 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
631 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
634 code_gen_ptr
= code_gen_buffer
;
635 /* XXX: flush processor icache at this point if cache flush is
640 #ifdef DEBUG_TB_CHECK
642 static void tb_invalidate_check(target_ulong address
)
644 TranslationBlock
*tb
;
646 address
&= TARGET_PAGE_MASK
;
647 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
648 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
649 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
650 address
>= tb
->pc
+ tb
->size
)) {
651 printf("ERROR invalidate: address=" TARGET_FMT_lx
652 " PC=%08lx size=%04x\n",
653 address
, (long)tb
->pc
, tb
->size
);
659 /* verify that all the pages have correct rights for code */
660 static void tb_page_check(void)
662 TranslationBlock
*tb
;
663 int i
, flags1
, flags2
;
665 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
666 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
667 flags1
= page_get_flags(tb
->pc
);
668 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
669 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
670 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
671 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
679 /* invalidate one TB */
680 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
683 TranslationBlock
*tb1
;
687 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
690 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
694 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
696 TranslationBlock
*tb1
;
702 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
704 *ptb
= tb1
->page_next
[n1
];
707 ptb
= &tb1
->page_next
[n1
];
711 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
713 TranslationBlock
*tb1
, **ptb
;
716 ptb
= &tb
->jmp_next
[n
];
719 /* find tb(n) in circular list */
723 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
724 if (n1
== n
&& tb1
== tb
)
727 ptb
= &tb1
->jmp_first
;
729 ptb
= &tb1
->jmp_next
[n1
];
732 /* now we can suppress tb(n) from the list */
733 *ptb
= tb
->jmp_next
[n
];
735 tb
->jmp_next
[n
] = NULL
;
739 /* reset the jump entry 'n' of a TB so that it is not chained to
741 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
743 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
746 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
751 target_phys_addr_t phys_pc
;
752 TranslationBlock
*tb1
, *tb2
;
754 /* remove the TB from the hash list */
755 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
756 h
= tb_phys_hash_func(phys_pc
);
757 tb_remove(&tb_phys_hash
[h
], tb
,
758 offsetof(TranslationBlock
, phys_hash_next
));
760 /* remove the TB from the page list */
761 if (tb
->page_addr
[0] != page_addr
) {
762 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
763 tb_page_remove(&p
->first_tb
, tb
);
764 invalidate_page_bitmap(p
);
766 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
767 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
768 tb_page_remove(&p
->first_tb
, tb
);
769 invalidate_page_bitmap(p
);
772 tb_invalidated_flag
= 1;
774 /* remove the TB from the hash list */
775 h
= tb_jmp_cache_hash_func(tb
->pc
);
776 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
777 if (env
->tb_jmp_cache
[h
] == tb
)
778 env
->tb_jmp_cache
[h
] = NULL
;
781 /* suppress this TB from the two jump lists */
782 tb_jmp_remove(tb
, 0);
783 tb_jmp_remove(tb
, 1);
785 /* suppress any remaining jumps to this TB */
791 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
792 tb2
= tb1
->jmp_next
[n1
];
793 tb_reset_jump(tb1
, n1
);
794 tb1
->jmp_next
[n1
] = NULL
;
797 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
799 tb_phys_invalidate_count
++;
802 static inline void set_bits(uint8_t *tab
, int start
, int len
)
808 mask
= 0xff << (start
& 7);
809 if ((start
& ~7) == (end
& ~7)) {
811 mask
&= ~(0xff << (end
& 7));
816 start
= (start
+ 8) & ~7;
818 while (start
< end1
) {
823 mask
= ~(0xff << (end
& 7));
829 static void build_page_bitmap(PageDesc
*p
)
831 int n
, tb_start
, tb_end
;
832 TranslationBlock
*tb
;
834 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
839 tb
= (TranslationBlock
*)((long)tb
& ~3);
840 /* NOTE: this is subtle as a TB may span two physical pages */
842 /* NOTE: tb_end may be after the end of the page, but
843 it is not a problem */
844 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
845 tb_end
= tb_start
+ tb
->size
;
846 if (tb_end
> TARGET_PAGE_SIZE
)
847 tb_end
= TARGET_PAGE_SIZE
;
850 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
852 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
853 tb
= tb
->page_next
[n
];
857 TranslationBlock
*tb_gen_code(CPUState
*env
,
858 target_ulong pc
, target_ulong cs_base
,
859 int flags
, int cflags
)
861 TranslationBlock
*tb
;
863 target_ulong phys_pc
, phys_page2
, virt_page2
;
866 phys_pc
= get_phys_addr_code(env
, pc
);
869 /* flush must be done */
871 /* cannot fail at this point */
873 /* Don't forget to invalidate previous TB info. */
874 tb_invalidated_flag
= 1;
876 tc_ptr
= code_gen_ptr
;
878 tb
->cs_base
= cs_base
;
881 cpu_gen_code(env
, tb
, &code_gen_size
);
882 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
884 /* check next page if needed */
885 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
887 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
888 phys_page2
= get_phys_addr_code(env
, virt_page2
);
890 tb_link_phys(tb
, phys_pc
, phys_page2
);
894 /* invalidate all TBs which intersect with the target physical page
895 starting in range [start;end[. NOTE: start and end must refer to
896 the same physical page. 'is_cpu_write_access' should be true if called
897 from a real cpu write access: the virtual CPU will exit the current
898 TB if code is modified inside this TB. */
899 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
900 int is_cpu_write_access
)
902 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
903 CPUState
*env
= cpu_single_env
;
904 target_ulong tb_start
, tb_end
;
907 #ifdef TARGET_HAS_PRECISE_SMC
908 int current_tb_not_found
= is_cpu_write_access
;
909 TranslationBlock
*current_tb
= NULL
;
910 int current_tb_modified
= 0;
911 target_ulong current_pc
= 0;
912 target_ulong current_cs_base
= 0;
913 int current_flags
= 0;
914 #endif /* TARGET_HAS_PRECISE_SMC */
916 p
= page_find(start
>> TARGET_PAGE_BITS
);
919 if (!p
->code_bitmap
&&
920 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
921 is_cpu_write_access
) {
922 /* build code bitmap */
923 build_page_bitmap(p
);
926 /* we remove all the TBs in the range [start, end[ */
927 /* XXX: see if in some cases it could be faster to invalidate all the code */
931 tb
= (TranslationBlock
*)((long)tb
& ~3);
932 tb_next
= tb
->page_next
[n
];
933 /* NOTE: this is subtle as a TB may span two physical pages */
935 /* NOTE: tb_end may be after the end of the page, but
936 it is not a problem */
937 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
938 tb_end
= tb_start
+ tb
->size
;
940 tb_start
= tb
->page_addr
[1];
941 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
943 if (!(tb_end
<= start
|| tb_start
>= end
)) {
944 #ifdef TARGET_HAS_PRECISE_SMC
945 if (current_tb_not_found
) {
946 current_tb_not_found
= 0;
948 if (env
->mem_io_pc
) {
949 /* now we have a real cpu fault */
950 current_tb
= tb_find_pc(env
->mem_io_pc
);
953 if (current_tb
== tb
&&
954 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
955 /* If we are modifying the current TB, we must stop
956 its execution. We could be more precise by checking
957 that the modification is after the current PC, but it
958 would require a specialized function to partially
959 restore the CPU state */
961 current_tb_modified
= 1;
962 cpu_restore_state(current_tb
, env
,
963 env
->mem_io_pc
, NULL
);
964 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
967 #endif /* TARGET_HAS_PRECISE_SMC */
968 /* we need to do that to handle the case where a signal
969 occurs while doing tb_phys_invalidate() */
972 saved_tb
= env
->current_tb
;
973 env
->current_tb
= NULL
;
975 tb_phys_invalidate(tb
, -1);
977 env
->current_tb
= saved_tb
;
978 if (env
->interrupt_request
&& env
->current_tb
)
979 cpu_interrupt(env
, env
->interrupt_request
);
984 #if !defined(CONFIG_USER_ONLY)
985 /* if no code remaining, no need to continue to use slow writes */
987 invalidate_page_bitmap(p
);
988 if (is_cpu_write_access
) {
989 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
993 #ifdef TARGET_HAS_PRECISE_SMC
994 if (current_tb_modified
) {
995 /* we generate a block containing just the instruction
996 modifying the memory. It will ensure that it cannot modify
998 env
->current_tb
= NULL
;
999 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1000 cpu_resume_from_signal(env
, NULL
);
1005 /* len must be <= 8 and start must be a multiple of len */
1006 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1012 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1013 cpu_single_env
->mem_io_vaddr
, len
,
1014 cpu_single_env
->eip
,
1015 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1018 p
= page_find(start
>> TARGET_PAGE_BITS
);
1021 if (p
->code_bitmap
) {
1022 offset
= start
& ~TARGET_PAGE_MASK
;
1023 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1024 if (b
& ((1 << len
) - 1))
1028 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1032 #if !defined(CONFIG_SOFTMMU)
1033 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1034 unsigned long pc
, void *puc
)
1036 TranslationBlock
*tb
;
1039 #ifdef TARGET_HAS_PRECISE_SMC
1040 TranslationBlock
*current_tb
= NULL
;
1041 CPUState
*env
= cpu_single_env
;
1042 int current_tb_modified
= 0;
1043 target_ulong current_pc
= 0;
1044 target_ulong current_cs_base
= 0;
1045 int current_flags
= 0;
1048 addr
&= TARGET_PAGE_MASK
;
1049 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1053 #ifdef TARGET_HAS_PRECISE_SMC
1054 if (tb
&& pc
!= 0) {
1055 current_tb
= tb_find_pc(pc
);
1058 while (tb
!= NULL
) {
1060 tb
= (TranslationBlock
*)((long)tb
& ~3);
1061 #ifdef TARGET_HAS_PRECISE_SMC
1062 if (current_tb
== tb
&&
1063 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1064 /* If we are modifying the current TB, we must stop
1065 its execution. We could be more precise by checking
1066 that the modification is after the current PC, but it
1067 would require a specialized function to partially
1068 restore the CPU state */
1070 current_tb_modified
= 1;
1071 cpu_restore_state(current_tb
, env
, pc
, puc
);
1072 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1075 #endif /* TARGET_HAS_PRECISE_SMC */
1076 tb_phys_invalidate(tb
, addr
);
1077 tb
= tb
->page_next
[n
];
1080 #ifdef TARGET_HAS_PRECISE_SMC
1081 if (current_tb_modified
) {
1082 /* we generate a block containing just the instruction
1083 modifying the memory. It will ensure that it cannot modify
1085 env
->current_tb
= NULL
;
1086 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1087 cpu_resume_from_signal(env
, puc
);
1093 /* add the tb in the target page and protect it if necessary */
1094 static inline void tb_alloc_page(TranslationBlock
*tb
,
1095 unsigned int n
, target_ulong page_addr
)
1098 TranslationBlock
*last_first_tb
;
1100 tb
->page_addr
[n
] = page_addr
;
1101 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1102 tb
->page_next
[n
] = p
->first_tb
;
1103 last_first_tb
= p
->first_tb
;
1104 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1105 invalidate_page_bitmap(p
);
1107 #if defined(TARGET_HAS_SMC) || 1
1109 #if defined(CONFIG_USER_ONLY)
1110 if (p
->flags
& PAGE_WRITE
) {
1115 /* force the host page as non writable (writes will have a
1116 page fault + mprotect overhead) */
1117 page_addr
&= qemu_host_page_mask
;
1119 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1120 addr
+= TARGET_PAGE_SIZE
) {
1122 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1126 p2
->flags
&= ~PAGE_WRITE
;
1127 page_get_flags(addr
);
1129 mprotect(g2h(page_addr
), qemu_host_page_size
,
1130 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1131 #ifdef DEBUG_TB_INVALIDATE
1132 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1137 /* if some code is already present, then the pages are already
1138 protected. So we handle the case where only the first TB is
1139 allocated in a physical page */
1140 if (!last_first_tb
) {
1141 tlb_protect_code(page_addr
);
1145 #endif /* TARGET_HAS_SMC */
1148 /* Allocate a new translation block. Flush the translation buffer if
1149 too many translation blocks or too much generated code. */
1150 TranslationBlock
*tb_alloc(target_ulong pc
)
1152 TranslationBlock
*tb
;
1154 if (nb_tbs
>= code_gen_max_blocks
||
1155 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1157 tb
= &tbs
[nb_tbs
++];
1163 void tb_free(TranslationBlock
*tb
)
1165 /* In practice this is mostly used for single use temporary TB
1166 Ignore the hard cases and just back up if this TB happens to
1167 be the last one generated. */
1168 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1169 code_gen_ptr
= tb
->tc_ptr
;
1174 /* add a new TB and link it to the physical page tables. phys_page2 is
1175 (-1) to indicate that only one page contains the TB. */
1176 void tb_link_phys(TranslationBlock
*tb
,
1177 target_ulong phys_pc
, target_ulong phys_page2
)
1180 TranslationBlock
**ptb
;
1182 /* Grab the mmap lock to stop another thread invalidating this TB
1183 before we are done. */
1185 /* add in the physical hash table */
1186 h
= tb_phys_hash_func(phys_pc
);
1187 ptb
= &tb_phys_hash
[h
];
1188 tb
->phys_hash_next
= *ptb
;
1191 /* add in the page list */
1192 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1193 if (phys_page2
!= -1)
1194 tb_alloc_page(tb
, 1, phys_page2
);
1196 tb
->page_addr
[1] = -1;
1198 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1199 tb
->jmp_next
[0] = NULL
;
1200 tb
->jmp_next
[1] = NULL
;
1202 /* init original jump addresses */
1203 if (tb
->tb_next_offset
[0] != 0xffff)
1204 tb_reset_jump(tb
, 0);
1205 if (tb
->tb_next_offset
[1] != 0xffff)
1206 tb_reset_jump(tb
, 1);
1208 #ifdef DEBUG_TB_CHECK
1214 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1215 tb[1].tc_ptr. Return NULL if not found */
1216 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1218 int m_min
, m_max
, m
;
1220 TranslationBlock
*tb
;
1224 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1225 tc_ptr
>= (unsigned long)code_gen_ptr
)
1227 /* binary search (cf Knuth) */
1230 while (m_min
<= m_max
) {
1231 m
= (m_min
+ m_max
) >> 1;
1233 v
= (unsigned long)tb
->tc_ptr
;
1236 else if (tc_ptr
< v
) {
1245 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1247 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1249 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1252 tb1
= tb
->jmp_next
[n
];
1254 /* find head of list */
1257 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1260 tb1
= tb1
->jmp_next
[n1
];
1262 /* we are now sure now that tb jumps to tb1 */
1265 /* remove tb from the jmp_first list */
1266 ptb
= &tb_next
->jmp_first
;
1270 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1271 if (n1
== n
&& tb1
== tb
)
1273 ptb
= &tb1
->jmp_next
[n1
];
1275 *ptb
= tb
->jmp_next
[n
];
1276 tb
->jmp_next
[n
] = NULL
;
1278 /* suppress the jump to next tb in generated code */
1279 tb_reset_jump(tb
, n
);
1281 /* suppress jumps in the tb on which we could have jumped */
1282 tb_reset_jump_recursive(tb_next
);
1286 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1288 tb_reset_jump_recursive2(tb
, 0);
1289 tb_reset_jump_recursive2(tb
, 1);
1292 #if defined(TARGET_HAS_ICE)
1293 #if defined(CONFIG_USER_ONLY)
1294 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1296 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1299 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1301 target_phys_addr_t addr
;
1303 ram_addr_t ram_addr
;
1306 addr
= cpu_get_phys_page_debug(env
, pc
);
1307 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1309 pd
= IO_MEM_UNASSIGNED
;
1311 pd
= p
->phys_offset
;
1313 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1314 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1317 #endif /* TARGET_HAS_ICE */
1319 #if defined(CONFIG_USER_ONLY)
1320 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1325 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1326 int flags
, CPUWatchpoint
**watchpoint
)
1331 /* Add a watchpoint. */
1332 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1333 int flags
, CPUWatchpoint
**watchpoint
)
1335 target_ulong len_mask
= ~(len
- 1);
1338 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1339 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1340 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1341 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1344 wp
= qemu_malloc(sizeof(*wp
));
1347 wp
->len_mask
= len_mask
;
1350 /* keep all GDB-injected watchpoints in front */
1352 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1354 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1356 tlb_flush_page(env
, addr
);
1363 /* Remove a specific watchpoint. */
1364 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1367 target_ulong len_mask
= ~(len
- 1);
1370 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1371 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1372 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1373 cpu_watchpoint_remove_by_ref(env
, wp
);
1380 /* Remove a specific watchpoint by reference. */
1381 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1383 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1385 tlb_flush_page(env
, watchpoint
->vaddr
);
1387 qemu_free(watchpoint
);
1390 /* Remove all matching watchpoints. */
1391 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1393 CPUWatchpoint
*wp
, *next
;
1395 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1396 if (wp
->flags
& mask
)
1397 cpu_watchpoint_remove_by_ref(env
, wp
);
1402 /* Add a breakpoint. */
1403 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1404 CPUBreakpoint
**breakpoint
)
1406 #if defined(TARGET_HAS_ICE)
1409 bp
= qemu_malloc(sizeof(*bp
));
1414 /* keep all GDB-injected breakpoints in front */
1416 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1418 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1420 breakpoint_invalidate(env
, pc
);
1430 /* Remove a specific breakpoint. */
1431 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1433 #if defined(TARGET_HAS_ICE)
1436 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1437 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1438 cpu_breakpoint_remove_by_ref(env
, bp
);
1448 /* Remove a specific breakpoint by reference. */
1449 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1451 #if defined(TARGET_HAS_ICE)
1452 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1454 breakpoint_invalidate(env
, breakpoint
->pc
);
1456 qemu_free(breakpoint
);
1460 /* Remove all matching breakpoints. */
1461 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1463 #if defined(TARGET_HAS_ICE)
1464 CPUBreakpoint
*bp
, *next
;
1466 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1467 if (bp
->flags
& mask
)
1468 cpu_breakpoint_remove_by_ref(env
, bp
);
1473 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1474 CPU loop after each instruction */
1475 void cpu_single_step(CPUState
*env
, int enabled
)
1477 #if defined(TARGET_HAS_ICE)
1478 if (env
->singlestep_enabled
!= enabled
) {
1479 env
->singlestep_enabled
= enabled
;
1481 kvm_update_guest_debug(env
, 0);
1483 /* must flush all the translated code to avoid inconsistencies */
1484 /* XXX: only flush what is necessary */
1491 /* enable or disable low levels log */
1492 void cpu_set_log(int log_flags
)
1494 loglevel
= log_flags
;
1495 if (loglevel
&& !logfile
) {
1496 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1498 perror(logfilename
);
1501 #if !defined(CONFIG_SOFTMMU)
1502 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1504 static char logfile_buf
[4096];
1505 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1507 #elif !defined(_WIN32)
1508 /* Win32 doesn't support line-buffering and requires size >= 2 */
1509 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1513 if (!loglevel
&& logfile
) {
1519 void cpu_set_log_filename(const char *filename
)
1521 logfilename
= strdup(filename
);
1526 cpu_set_log(loglevel
);
1529 static void cpu_unlink_tb(CPUState
*env
)
1531 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1532 problem and hope the cpu will stop of its own accord. For userspace
1533 emulation this often isn't actually as bad as it sounds. Often
1534 signals are used primarily to interrupt blocking syscalls. */
1535 TranslationBlock
*tb
;
1536 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1538 spin_lock(&interrupt_lock
);
1539 tb
= env
->current_tb
;
1540 /* if the cpu is currently executing code, we must unlink it and
1541 all the potentially executing TB */
1543 env
->current_tb
= NULL
;
1544 tb_reset_jump_recursive(tb
);
1546 spin_unlock(&interrupt_lock
);
1549 /* mask must never be zero, except for A20 change call */
1550 void cpu_interrupt(CPUState
*env
, int mask
)
1554 old_mask
= env
->interrupt_request
;
1555 env
->interrupt_request
|= mask
;
1557 #ifndef CONFIG_USER_ONLY
1559 * If called from iothread context, wake the target cpu in
1562 if (!qemu_cpu_self(env
)) {
1569 env
->icount_decr
.u16
.high
= 0xffff;
1570 #ifndef CONFIG_USER_ONLY
1572 && (mask
& ~old_mask
) != 0) {
1573 cpu_abort(env
, "Raised interrupt while not in I/O function");
1581 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1583 env
->interrupt_request
&= ~mask
;
1586 void cpu_exit(CPUState
*env
)
1588 env
->exit_request
= 1;
1592 const CPULogItem cpu_log_items
[] = {
1593 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1594 "show generated host assembly code for each compiled TB" },
1595 { CPU_LOG_TB_IN_ASM
, "in_asm",
1596 "show target assembly code for each compiled TB" },
1597 { CPU_LOG_TB_OP
, "op",
1598 "show micro ops for each compiled TB" },
1599 { CPU_LOG_TB_OP_OPT
, "op_opt",
1602 "before eflags optimization and "
1604 "after liveness analysis" },
1605 { CPU_LOG_INT
, "int",
1606 "show interrupts/exceptions in short format" },
1607 { CPU_LOG_EXEC
, "exec",
1608 "show trace before each executed TB (lots of logs)" },
1609 { CPU_LOG_TB_CPU
, "cpu",
1610 "show CPU state before block translation" },
1612 { CPU_LOG_PCALL
, "pcall",
1613 "show protected mode far calls/returns/exceptions" },
1614 { CPU_LOG_RESET
, "cpu_reset",
1615 "show CPU state before CPU resets" },
1618 { CPU_LOG_IOPORT
, "ioport",
1619 "show all i/o ports accesses" },
1624 #ifndef CONFIG_USER_ONLY
1625 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1626 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1628 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1630 ram_addr_t phys_offset
)
1632 CPUPhysMemoryClient
*client
;
1633 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1634 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1638 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1639 target_phys_addr_t end
)
1641 CPUPhysMemoryClient
*client
;
1642 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1643 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1650 static int cpu_notify_migration_log(int enable
)
1652 CPUPhysMemoryClient
*client
;
1653 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1654 int r
= client
->migration_log(client
, enable
);
1661 static void phys_page_for_each_in_l1_map(PhysPageDesc
**phys_map
,
1662 CPUPhysMemoryClient
*client
)
1667 for (l1
= 0; l1
< L1_SIZE
; ++l1
) {
1672 for (l2
= 0; l2
< L2_SIZE
; ++l2
) {
1673 if (pd
[l2
].phys_offset
== IO_MEM_UNASSIGNED
) {
1676 client
->set_memory(client
, pd
[l2
].region_offset
,
1677 TARGET_PAGE_SIZE
, pd
[l2
].phys_offset
);
1682 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1684 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
1686 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
1687 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
1689 void **phys_map
= (void **)l1_phys_map
;
1694 for (l1
= 0; l1
< L1_SIZE
; ++l1
) {
1696 phys_page_for_each_in_l1_map(phys_map
[l1
], client
);
1703 phys_page_for_each_in_l1_map(l1_phys_map
, client
);
1707 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1709 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1710 phys_page_for_each(client
);
1713 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1715 QLIST_REMOVE(client
, list
);
1719 static int cmp1(const char *s1
, int n
, const char *s2
)
1721 if (strlen(s2
) != n
)
1723 return memcmp(s1
, s2
, n
) == 0;
1726 /* takes a comma separated list of log masks. Return 0 if error. */
1727 int cpu_str_to_log_mask(const char *str
)
1729 const CPULogItem
*item
;
1736 p1
= strchr(p
, ',');
1739 if(cmp1(p
,p1
-p
,"all")) {
1740 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1744 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1745 if (cmp1(p
, p1
- p
, item
->name
))
1759 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1766 fprintf(stderr
, "qemu: fatal: ");
1767 vfprintf(stderr
, fmt
, ap
);
1768 fprintf(stderr
, "\n");
1770 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1772 cpu_dump_state(env
, stderr
, fprintf
, 0);
1774 if (qemu_log_enabled()) {
1775 qemu_log("qemu: fatal: ");
1776 qemu_log_vprintf(fmt
, ap2
);
1779 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1781 log_cpu_state(env
, 0);
1788 #if defined(CONFIG_USER_ONLY)
1790 struct sigaction act
;
1791 sigfillset(&act
.sa_mask
);
1792 act
.sa_handler
= SIG_DFL
;
1793 sigaction(SIGABRT
, &act
, NULL
);
1799 CPUState
*cpu_copy(CPUState
*env
)
1801 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1802 CPUState
*next_cpu
= new_env
->next_cpu
;
1803 int cpu_index
= new_env
->cpu_index
;
1804 #if defined(TARGET_HAS_ICE)
1809 memcpy(new_env
, env
, sizeof(CPUState
));
1811 /* Preserve chaining and index. */
1812 new_env
->next_cpu
= next_cpu
;
1813 new_env
->cpu_index
= cpu_index
;
1815 /* Clone all break/watchpoints.
1816 Note: Once we support ptrace with hw-debug register access, make sure
1817 BP_CPU break/watchpoints are handled correctly on clone. */
1818 QTAILQ_INIT(&env
->breakpoints
);
1819 QTAILQ_INIT(&env
->watchpoints
);
1820 #if defined(TARGET_HAS_ICE)
1821 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1822 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1824 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1825 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1833 #if !defined(CONFIG_USER_ONLY)
1835 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1839 /* Discard jump cache entries for any tb which might potentially
1840 overlap the flushed page. */
1841 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1842 memset (&env
->tb_jmp_cache
[i
], 0,
1843 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1845 i
= tb_jmp_cache_hash_page(addr
);
1846 memset (&env
->tb_jmp_cache
[i
], 0,
1847 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1850 static CPUTLBEntry s_cputlb_empty_entry
= {
1857 /* NOTE: if flush_global is true, also flush global entries (not
1859 void tlb_flush(CPUState
*env
, int flush_global
)
1863 #if defined(DEBUG_TLB)
1864 printf("tlb_flush:\n");
1866 /* must reset current TB so that interrupts cannot modify the
1867 links while we are modifying them */
1868 env
->current_tb
= NULL
;
1870 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1872 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1873 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1877 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1882 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1884 if (addr
== (tlb_entry
->addr_read
&
1885 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1886 addr
== (tlb_entry
->addr_write
&
1887 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1888 addr
== (tlb_entry
->addr_code
&
1889 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1890 *tlb_entry
= s_cputlb_empty_entry
;
1894 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1899 #if defined(DEBUG_TLB)
1900 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1902 /* must reset current TB so that interrupts cannot modify the
1903 links while we are modifying them */
1904 env
->current_tb
= NULL
;
1906 addr
&= TARGET_PAGE_MASK
;
1907 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1908 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1909 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1911 tlb_flush_jmp_cache(env
, addr
);
1914 /* update the TLBs so that writes to code in the virtual page 'addr'
1916 static void tlb_protect_code(ram_addr_t ram_addr
)
1918 cpu_physical_memory_reset_dirty(ram_addr
,
1919 ram_addr
+ TARGET_PAGE_SIZE
,
1923 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1924 tested for self modifying code */
1925 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1928 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1931 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1932 unsigned long start
, unsigned long length
)
1935 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1936 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1937 if ((addr
- start
) < length
) {
1938 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1943 /* Note: start and end must be within the same ram block. */
1944 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1948 unsigned long length
, start1
;
1952 start
&= TARGET_PAGE_MASK
;
1953 end
= TARGET_PAGE_ALIGN(end
);
1955 length
= end
- start
;
1958 len
= length
>> TARGET_PAGE_BITS
;
1959 mask
= ~dirty_flags
;
1960 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1961 for(i
= 0; i
< len
; i
++)
1964 /* we modify the TLB cache so that the dirty bit will be set again
1965 when accessing the range */
1966 start1
= (unsigned long)qemu_get_ram_ptr(start
);
1967 /* Chek that we don't span multiple blocks - this breaks the
1968 address comparisons below. */
1969 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
1970 != (end
- 1) - start
) {
1974 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1976 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1977 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1978 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
1984 int cpu_physical_memory_set_dirty_tracking(int enable
)
1987 in_migration
= enable
;
1988 ret
= cpu_notify_migration_log(!!enable
);
1992 int cpu_physical_memory_get_dirty_tracking(void)
1994 return in_migration
;
1997 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
1998 target_phys_addr_t end_addr
)
2002 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2006 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2008 ram_addr_t ram_addr
;
2011 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2012 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2013 + tlb_entry
->addend
);
2014 ram_addr
= qemu_ram_addr_from_host(p
);
2015 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2016 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2021 /* update the TLB according to the current state of the dirty bits */
2022 void cpu_tlb_update_dirty(CPUState
*env
)
2026 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2027 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2028 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2032 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2034 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2035 tlb_entry
->addr_write
= vaddr
;
2038 /* update the TLB corresponding to virtual page vaddr
2039 so that it is no longer dirty */
2040 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2045 vaddr
&= TARGET_PAGE_MASK
;
2046 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2047 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2048 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2051 /* add a new TLB entry. At most one entry for a given virtual address
2052 is permitted. Return 0 if OK or 2 if the page could not be mapped
2053 (can only happen in non SOFTMMU mode for I/O pages or pages
2054 conflicting with the host address space). */
2055 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2056 target_phys_addr_t paddr
, int prot
,
2057 int mmu_idx
, int is_softmmu
)
2062 target_ulong address
;
2063 target_ulong code_address
;
2064 target_phys_addr_t addend
;
2068 target_phys_addr_t iotlb
;
2070 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2072 pd
= IO_MEM_UNASSIGNED
;
2074 pd
= p
->phys_offset
;
2076 #if defined(DEBUG_TLB)
2077 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2078 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2083 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2084 /* IO memory case (romd handled later) */
2085 address
|= TLB_MMIO
;
2087 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2088 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2090 iotlb
= pd
& TARGET_PAGE_MASK
;
2091 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2092 iotlb
|= IO_MEM_NOTDIRTY
;
2094 iotlb
|= IO_MEM_ROM
;
2096 /* IO handlers are currently passed a physical address.
2097 It would be nice to pass an offset from the base address
2098 of that region. This would avoid having to special case RAM,
2099 and avoid full address decoding in every device.
2100 We can't use the high bits of pd for this because
2101 IO_MEM_ROMD uses these as a ram address. */
2102 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2104 iotlb
+= p
->region_offset
;
2110 code_address
= address
;
2111 /* Make accesses to pages with watchpoints go via the
2112 watchpoint trap routines. */
2113 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2114 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2115 iotlb
= io_mem_watch
+ paddr
;
2116 /* TODO: The memory case can be optimized by not trapping
2117 reads of pages with a write breakpoint. */
2118 address
|= TLB_MMIO
;
2122 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2123 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2124 te
= &env
->tlb_table
[mmu_idx
][index
];
2125 te
->addend
= addend
- vaddr
;
2126 if (prot
& PAGE_READ
) {
2127 te
->addr_read
= address
;
2132 if (prot
& PAGE_EXEC
) {
2133 te
->addr_code
= code_address
;
2137 if (prot
& PAGE_WRITE
) {
2138 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2139 (pd
& IO_MEM_ROMD
)) {
2140 /* Write access calls the I/O callback. */
2141 te
->addr_write
= address
| TLB_MMIO
;
2142 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2143 !cpu_physical_memory_is_dirty(pd
)) {
2144 te
->addr_write
= address
| TLB_NOTDIRTY
;
2146 te
->addr_write
= address
;
2149 te
->addr_write
= -1;
2156 void tlb_flush(CPUState
*env
, int flush_global
)
2160 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2165 * Walks guest process memory "regions" one by one
2166 * and calls callback function 'fn' for each region.
2168 int walk_memory_regions(void *priv
,
2169 int (*fn
)(void *, unsigned long, unsigned long, unsigned long))
2171 unsigned long start
, end
;
2173 int i
, j
, prot
, prot1
;
2179 for (i
= 0; i
<= L1_SIZE
; i
++) {
2180 p
= (i
< L1_SIZE
) ? l1_map
[i
] : NULL
;
2181 for (j
= 0; j
< L2_SIZE
; j
++) {
2182 prot1
= (p
== NULL
) ? 0 : p
[j
].flags
;
2184 * "region" is one continuous chunk of memory
2185 * that has same protection flags set.
2187 if (prot1
!= prot
) {
2188 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2190 rc
= (*fn
)(priv
, start
, end
, prot
);
2191 /* callback can stop iteration by returning != 0 */
2208 static int dump_region(void *priv
, unsigned long start
,
2209 unsigned long end
, unsigned long prot
)
2211 FILE *f
= (FILE *)priv
;
2213 (void) fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2214 start
, end
, end
- start
,
2215 ((prot
& PAGE_READ
) ? 'r' : '-'),
2216 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2217 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2222 /* dump memory mappings */
2223 void page_dump(FILE *f
)
2225 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2226 "start", "end", "size", "prot");
2227 walk_memory_regions(f
, dump_region
);
2230 int page_get_flags(target_ulong address
)
2234 p
= page_find(address
>> TARGET_PAGE_BITS
);
2240 /* modify the flags of a page and invalidate the code if
2241 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2242 depending on PAGE_WRITE */
2243 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2248 /* mmap_lock should already be held. */
2249 start
= start
& TARGET_PAGE_MASK
;
2250 end
= TARGET_PAGE_ALIGN(end
);
2251 if (flags
& PAGE_WRITE
)
2252 flags
|= PAGE_WRITE_ORG
;
2253 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2254 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2255 /* We may be called for host regions that are outside guest
2259 /* if the write protection is set, then we invalidate the code
2261 if (!(p
->flags
& PAGE_WRITE
) &&
2262 (flags
& PAGE_WRITE
) &&
2264 tb_invalidate_phys_page(addr
, 0, NULL
);
2270 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2276 if (start
+ len
< start
)
2277 /* we've wrapped around */
2280 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2281 start
= start
& TARGET_PAGE_MASK
;
2283 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2284 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2287 if( !(p
->flags
& PAGE_VALID
) )
2290 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2292 if (flags
& PAGE_WRITE
) {
2293 if (!(p
->flags
& PAGE_WRITE_ORG
))
2295 /* unprotect the page if it was put read-only because it
2296 contains translated code */
2297 if (!(p
->flags
& PAGE_WRITE
)) {
2298 if (!page_unprotect(addr
, 0, NULL
))
2307 /* called from signal handler: invalidate the code and unprotect the
2308 page. Return TRUE if the fault was successfully handled. */
2309 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2311 unsigned int page_index
, prot
, pindex
;
2313 target_ulong host_start
, host_end
, addr
;
2315 /* Technically this isn't safe inside a signal handler. However we
2316 know this only ever happens in a synchronous SEGV handler, so in
2317 practice it seems to be ok. */
2320 host_start
= address
& qemu_host_page_mask
;
2321 page_index
= host_start
>> TARGET_PAGE_BITS
;
2322 p1
= page_find(page_index
);
2327 host_end
= host_start
+ qemu_host_page_size
;
2330 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2334 /* if the page was really writable, then we change its
2335 protection back to writable */
2336 if (prot
& PAGE_WRITE_ORG
) {
2337 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2338 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2339 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2340 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2341 p1
[pindex
].flags
|= PAGE_WRITE
;
2342 /* and since the content will be modified, we must invalidate
2343 the corresponding translated code. */
2344 tb_invalidate_phys_page(address
, pc
, puc
);
2345 #ifdef DEBUG_TB_CHECK
2346 tb_invalidate_check(address
);
2356 static inline void tlb_set_dirty(CPUState
*env
,
2357 unsigned long addr
, target_ulong vaddr
)
2360 #endif /* defined(CONFIG_USER_ONLY) */
2362 #if !defined(CONFIG_USER_ONLY)
2364 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2365 typedef struct subpage_t
{
2366 target_phys_addr_t base
;
2367 CPUReadMemoryFunc
* const *mem_read
[TARGET_PAGE_SIZE
][4];
2368 CPUWriteMemoryFunc
* const *mem_write
[TARGET_PAGE_SIZE
][4];
2369 void *opaque
[TARGET_PAGE_SIZE
][2][4];
2370 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
2373 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2374 ram_addr_t memory
, ram_addr_t region_offset
);
2375 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2376 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2377 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2380 if (addr > start_addr) \
2383 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2384 if (start_addr2 > 0) \
2388 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2389 end_addr2 = TARGET_PAGE_SIZE - 1; \
2391 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2392 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2397 /* register physical memory.
2398 For RAM, 'size' must be a multiple of the target page size.
2399 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2400 io memory page. The address used when calling the IO function is
2401 the offset from the start of the region, plus region_offset. Both
2402 start_addr and region_offset are rounded down to a page boundary
2403 before calculating this offset. This should not be a problem unless
2404 the low bits of start_addr and region_offset differ. */
2405 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2407 ram_addr_t phys_offset
,
2408 ram_addr_t region_offset
)
2410 target_phys_addr_t addr
, end_addr
;
2413 ram_addr_t orig_size
= size
;
2416 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2418 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2419 region_offset
= start_addr
;
2421 region_offset
&= TARGET_PAGE_MASK
;
2422 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2423 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2424 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2425 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2426 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2427 ram_addr_t orig_memory
= p
->phys_offset
;
2428 target_phys_addr_t start_addr2
, end_addr2
;
2429 int need_subpage
= 0;
2431 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2433 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2434 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2435 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2436 &p
->phys_offset
, orig_memory
,
2439 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2442 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2444 p
->region_offset
= 0;
2446 p
->phys_offset
= phys_offset
;
2447 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2448 (phys_offset
& IO_MEM_ROMD
))
2449 phys_offset
+= TARGET_PAGE_SIZE
;
2452 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2453 p
->phys_offset
= phys_offset
;
2454 p
->region_offset
= region_offset
;
2455 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2456 (phys_offset
& IO_MEM_ROMD
)) {
2457 phys_offset
+= TARGET_PAGE_SIZE
;
2459 target_phys_addr_t start_addr2
, end_addr2
;
2460 int need_subpage
= 0;
2462 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2463 end_addr2
, need_subpage
);
2465 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2466 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2467 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2468 addr
& TARGET_PAGE_MASK
);
2469 subpage_register(subpage
, start_addr2
, end_addr2
,
2470 phys_offset
, region_offset
);
2471 p
->region_offset
= 0;
2475 region_offset
+= TARGET_PAGE_SIZE
;
2478 /* since each CPU stores ram addresses in its TLB cache, we must
2479 reset the modified entries */
2481 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2486 /* XXX: temporary until new memory mapping API */
2487 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2491 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2493 return IO_MEM_UNASSIGNED
;
2494 return p
->phys_offset
;
2497 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2500 kvm_coalesce_mmio_region(addr
, size
);
2503 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2506 kvm_uncoalesce_mmio_region(addr
, size
);
2509 void qemu_flush_coalesced_mmio_buffer(void)
2512 kvm_flush_coalesced_mmio_buffer();
2515 #if defined(__linux__) && !defined(TARGET_S390X)
2517 #include <sys/vfs.h>
2519 #define HUGETLBFS_MAGIC 0x958458f6
2521 static long gethugepagesize(const char *path
)
2527 ret
= statfs(path
, &fs
);
2528 } while (ret
!= 0 && errno
== EINTR
);
2535 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2536 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2541 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2549 unsigned long hpagesize
;
2551 hpagesize
= gethugepagesize(path
);
2556 if (memory
< hpagesize
) {
2560 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2561 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2565 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2569 fd
= mkstemp(filename
);
2578 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2581 * ftruncate is not supported by hugetlbfs in older
2582 * hosts, so don't bother bailing out on errors.
2583 * If anything goes wrong with it under other filesystems,
2586 if (ftruncate(fd
, memory
))
2587 perror("ftruncate");
2590 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2591 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2592 * to sidestep this quirk.
2594 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2595 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2597 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2599 if (area
== MAP_FAILED
) {
2600 perror("file_ram_alloc: can't mmap RAM pages");
2608 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2610 RAMBlock
*new_block
;
2612 size
= TARGET_PAGE_ALIGN(size
);
2613 new_block
= qemu_malloc(sizeof(*new_block
));
2616 #if defined (__linux__) && !defined(TARGET_S390X)
2617 new_block
->host
= file_ram_alloc(size
, mem_path
);
2618 if (!new_block
->host
)
2621 fprintf(stderr
, "-mem-path option unsupported\n");
2625 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2626 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2627 new_block
->host
= mmap((void*)0x1000000, size
,
2628 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2629 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2631 new_block
->host
= qemu_vmalloc(size
);
2633 #ifdef MADV_MERGEABLE
2634 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2637 new_block
->offset
= last_ram_offset
;
2638 new_block
->length
= size
;
2640 new_block
->next
= ram_blocks
;
2641 ram_blocks
= new_block
;
2643 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2644 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2645 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2646 0xff, size
>> TARGET_PAGE_BITS
);
2648 last_ram_offset
+= size
;
2651 kvm_setup_guest_memory(new_block
->host
, size
);
2653 return new_block
->offset
;
2656 void qemu_ram_free(ram_addr_t addr
)
2658 /* TODO: implement this. */
2661 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2662 With the exception of the softmmu code in this file, this should
2663 only be used for local memory (e.g. video ram) that the device owns,
2664 and knows it isn't going to access beyond the end of the block.
2666 It should not be used for general purpose DMA.
2667 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2669 void *qemu_get_ram_ptr(ram_addr_t addr
)
2676 prevp
= &ram_blocks
;
2678 while (block
&& (block
->offset
> addr
2679 || block
->offset
+ block
->length
<= addr
)) {
2681 prevp
= &prev
->next
;
2683 block
= block
->next
;
2686 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2689 /* Move this entry to to start of the list. */
2691 prev
->next
= block
->next
;
2692 block
->next
= *prevp
;
2695 return block
->host
+ (addr
- block
->offset
);
2698 /* Some of the softmmu routines need to translate from a host pointer
2699 (typically a TLB entry) back to a ram offset. */
2700 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2704 uint8_t *host
= ptr
;
2708 while (block
&& (block
->host
> host
2709 || block
->host
+ block
->length
<= host
)) {
2711 block
= block
->next
;
2714 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2717 return block
->offset
+ (host
- block
->host
);
2720 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2722 #ifdef DEBUG_UNASSIGNED
2723 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2725 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2726 do_unassigned_access(addr
, 0, 0, 0, 1);
2731 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2733 #ifdef DEBUG_UNASSIGNED
2734 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2736 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2737 do_unassigned_access(addr
, 0, 0, 0, 2);
2742 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2744 #ifdef DEBUG_UNASSIGNED
2745 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2747 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2748 do_unassigned_access(addr
, 0, 0, 0, 4);
2753 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2755 #ifdef DEBUG_UNASSIGNED
2756 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2758 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2759 do_unassigned_access(addr
, 1, 0, 0, 1);
2763 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2765 #ifdef DEBUG_UNASSIGNED
2766 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2768 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2769 do_unassigned_access(addr
, 1, 0, 0, 2);
2773 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2775 #ifdef DEBUG_UNASSIGNED
2776 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2778 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2779 do_unassigned_access(addr
, 1, 0, 0, 4);
2783 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
2784 unassigned_mem_readb
,
2785 unassigned_mem_readw
,
2786 unassigned_mem_readl
,
2789 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
2790 unassigned_mem_writeb
,
2791 unassigned_mem_writew
,
2792 unassigned_mem_writel
,
2795 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2799 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2800 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2801 #if !defined(CONFIG_USER_ONLY)
2802 tb_invalidate_phys_page_fast(ram_addr
, 1);
2803 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2806 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2807 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2808 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2809 /* we remove the notdirty callback only if the code has been
2811 if (dirty_flags
== 0xff)
2812 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2815 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2819 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2820 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2821 #if !defined(CONFIG_USER_ONLY)
2822 tb_invalidate_phys_page_fast(ram_addr
, 2);
2823 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2826 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2827 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2828 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2829 /* we remove the notdirty callback only if the code has been
2831 if (dirty_flags
== 0xff)
2832 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2835 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2839 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2840 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2841 #if !defined(CONFIG_USER_ONLY)
2842 tb_invalidate_phys_page_fast(ram_addr
, 4);
2843 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2846 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2847 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2848 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2849 /* we remove the notdirty callback only if the code has been
2851 if (dirty_flags
== 0xff)
2852 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2855 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
2856 NULL
, /* never used */
2857 NULL
, /* never used */
2858 NULL
, /* never used */
2861 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
2862 notdirty_mem_writeb
,
2863 notdirty_mem_writew
,
2864 notdirty_mem_writel
,
2867 /* Generate a debug exception if a watchpoint has been hit. */
2868 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2870 CPUState
*env
= cpu_single_env
;
2871 target_ulong pc
, cs_base
;
2872 TranslationBlock
*tb
;
2877 if (env
->watchpoint_hit
) {
2878 /* We re-entered the check after replacing the TB. Now raise
2879 * the debug interrupt so that is will trigger after the
2880 * current instruction. */
2881 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2884 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2885 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2886 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2887 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2888 wp
->flags
|= BP_WATCHPOINT_HIT
;
2889 if (!env
->watchpoint_hit
) {
2890 env
->watchpoint_hit
= wp
;
2891 tb
= tb_find_pc(env
->mem_io_pc
);
2893 cpu_abort(env
, "check_watchpoint: could not find TB for "
2894 "pc=%p", (void *)env
->mem_io_pc
);
2896 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2897 tb_phys_invalidate(tb
, -1);
2898 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2899 env
->exception_index
= EXCP_DEBUG
;
2901 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2902 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2904 cpu_resume_from_signal(env
, NULL
);
2907 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2912 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2913 so these check for a hit then pass through to the normal out-of-line
2915 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2917 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2918 return ldub_phys(addr
);
2921 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2923 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2924 return lduw_phys(addr
);
2927 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2929 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2930 return ldl_phys(addr
);
2933 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2936 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2937 stb_phys(addr
, val
);
2940 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2943 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2944 stw_phys(addr
, val
);
2947 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2950 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2951 stl_phys(addr
, val
);
2954 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
2960 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
2966 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2972 idx
= SUBPAGE_IDX(addr
);
2973 #if defined(DEBUG_SUBPAGE)
2974 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2975 mmio
, len
, addr
, idx
);
2977 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2978 addr
+ mmio
->region_offset
[idx
][0][len
]);
2983 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2984 uint32_t value
, unsigned int len
)
2988 idx
= SUBPAGE_IDX(addr
);
2989 #if defined(DEBUG_SUBPAGE)
2990 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2991 mmio
, len
, addr
, idx
, value
);
2993 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2994 addr
+ mmio
->region_offset
[idx
][1][len
],
2998 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3000 #if defined(DEBUG_SUBPAGE)
3001 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3004 return subpage_readlen(opaque
, addr
, 0);
3007 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3010 #if defined(DEBUG_SUBPAGE)
3011 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3013 subpage_writelen(opaque
, addr
, value
, 0);
3016 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3018 #if defined(DEBUG_SUBPAGE)
3019 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3022 return subpage_readlen(opaque
, addr
, 1);
3025 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3028 #if defined(DEBUG_SUBPAGE)
3029 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3031 subpage_writelen(opaque
, addr
, value
, 1);
3034 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3036 #if defined(DEBUG_SUBPAGE)
3037 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3040 return subpage_readlen(opaque
, addr
, 2);
3043 static void subpage_writel (void *opaque
,
3044 target_phys_addr_t addr
, uint32_t value
)
3046 #if defined(DEBUG_SUBPAGE)
3047 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3049 subpage_writelen(opaque
, addr
, value
, 2);
3052 static CPUReadMemoryFunc
* const subpage_read
[] = {
3058 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3064 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3065 ram_addr_t memory
, ram_addr_t region_offset
)
3070 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3072 idx
= SUBPAGE_IDX(start
);
3073 eidx
= SUBPAGE_IDX(end
);
3074 #if defined(DEBUG_SUBPAGE)
3075 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3076 mmio
, start
, end
, idx
, eidx
, memory
);
3078 memory
>>= IO_MEM_SHIFT
;
3079 for (; idx
<= eidx
; idx
++) {
3080 for (i
= 0; i
< 4; i
++) {
3081 if (io_mem_read
[memory
][i
]) {
3082 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
3083 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
3084 mmio
->region_offset
[idx
][0][i
] = region_offset
;
3086 if (io_mem_write
[memory
][i
]) {
3087 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
3088 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
3089 mmio
->region_offset
[idx
][1][i
] = region_offset
;
3097 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3098 ram_addr_t orig_memory
, ram_addr_t region_offset
)
3103 mmio
= qemu_mallocz(sizeof(subpage_t
));
3106 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3107 #if defined(DEBUG_SUBPAGE)
3108 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3109 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3111 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3112 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
3118 static int get_free_io_mem_idx(void)
3122 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3123 if (!io_mem_used
[i
]) {
3127 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3131 /* mem_read and mem_write are arrays of functions containing the
3132 function to access byte (index 0), word (index 1) and dword (index
3133 2). Functions can be omitted with a NULL function pointer.
3134 If io_index is non zero, the corresponding io zone is
3135 modified. If it is zero, a new io zone is allocated. The return
3136 value can be used with cpu_register_physical_memory(). (-1) is
3137 returned if error. */
3138 static int cpu_register_io_memory_fixed(int io_index
,
3139 CPUReadMemoryFunc
* const *mem_read
,
3140 CPUWriteMemoryFunc
* const *mem_write
,
3143 int i
, subwidth
= 0;
3145 if (io_index
<= 0) {
3146 io_index
= get_free_io_mem_idx();
3150 io_index
>>= IO_MEM_SHIFT
;
3151 if (io_index
>= IO_MEM_NB_ENTRIES
)
3155 for(i
= 0;i
< 3; i
++) {
3156 if (!mem_read
[i
] || !mem_write
[i
])
3157 subwidth
= IO_MEM_SUBWIDTH
;
3158 io_mem_read
[io_index
][i
] = mem_read
[i
];
3159 io_mem_write
[io_index
][i
] = mem_write
[i
];
3161 io_mem_opaque
[io_index
] = opaque
;
3162 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3165 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3166 CPUWriteMemoryFunc
* const *mem_write
,
3169 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3172 void cpu_unregister_io_memory(int io_table_address
)
3175 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3177 for (i
=0;i
< 3; i
++) {
3178 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3179 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3181 io_mem_opaque
[io_index
] = NULL
;
3182 io_mem_used
[io_index
] = 0;
3185 static void io_mem_init(void)
3189 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3190 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3191 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3195 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3196 watch_mem_write
, NULL
);
3199 #endif /* !defined(CONFIG_USER_ONLY) */
3201 /* physical memory access (slow version, mainly for debug) */
3202 #if defined(CONFIG_USER_ONLY)
3203 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3204 uint8_t *buf
, int len
, int is_write
)
3211 page
= addr
& TARGET_PAGE_MASK
;
3212 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3215 flags
= page_get_flags(page
);
3216 if (!(flags
& PAGE_VALID
))
3219 if (!(flags
& PAGE_WRITE
))
3221 /* XXX: this code should not depend on lock_user */
3222 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3225 unlock_user(p
, addr
, l
);
3227 if (!(flags
& PAGE_READ
))
3229 /* XXX: this code should not depend on lock_user */
3230 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3233 unlock_user(p
, addr
, 0);
3243 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3244 int len
, int is_write
)
3249 target_phys_addr_t page
;
3254 page
= addr
& TARGET_PAGE_MASK
;
3255 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3258 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3260 pd
= IO_MEM_UNASSIGNED
;
3262 pd
= p
->phys_offset
;
3266 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3267 target_phys_addr_t addr1
= addr
;
3268 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3270 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3271 /* XXX: could force cpu_single_env to NULL to avoid
3273 if (l
>= 4 && ((addr1
& 3) == 0)) {
3274 /* 32 bit write access */
3276 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3278 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3279 /* 16 bit write access */
3281 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3284 /* 8 bit write access */
3286 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3290 unsigned long addr1
;
3291 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3293 ptr
= qemu_get_ram_ptr(addr1
);
3294 memcpy(ptr
, buf
, l
);
3295 if (!cpu_physical_memory_is_dirty(addr1
)) {
3296 /* invalidate code */
3297 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3299 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3300 (0xff & ~CODE_DIRTY_FLAG
);
3304 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3305 !(pd
& IO_MEM_ROMD
)) {
3306 target_phys_addr_t addr1
= addr
;
3308 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3310 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3311 if (l
>= 4 && ((addr1
& 3) == 0)) {
3312 /* 32 bit read access */
3313 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3316 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3317 /* 16 bit read access */
3318 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3322 /* 8 bit read access */
3323 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3329 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3330 (addr
& ~TARGET_PAGE_MASK
);
3331 memcpy(buf
, ptr
, l
);
3340 /* used for ROM loading : can write in RAM and ROM */
3341 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3342 const uint8_t *buf
, int len
)
3346 target_phys_addr_t page
;
3351 page
= addr
& TARGET_PAGE_MASK
;
3352 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3355 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3357 pd
= IO_MEM_UNASSIGNED
;
3359 pd
= p
->phys_offset
;
3362 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3363 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3364 !(pd
& IO_MEM_ROMD
)) {
3367 unsigned long addr1
;
3368 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3370 ptr
= qemu_get_ram_ptr(addr1
);
3371 memcpy(ptr
, buf
, l
);
3381 target_phys_addr_t addr
;
3382 target_phys_addr_t len
;
3385 static BounceBuffer bounce
;
3387 typedef struct MapClient
{
3389 void (*callback
)(void *opaque
);
3390 QLIST_ENTRY(MapClient
) link
;
3393 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3394 = QLIST_HEAD_INITIALIZER(map_client_list
);
3396 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3398 MapClient
*client
= qemu_malloc(sizeof(*client
));
3400 client
->opaque
= opaque
;
3401 client
->callback
= callback
;
3402 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3406 void cpu_unregister_map_client(void *_client
)
3408 MapClient
*client
= (MapClient
*)_client
;
3410 QLIST_REMOVE(client
, link
);
3414 static void cpu_notify_map_clients(void)
3418 while (!QLIST_EMPTY(&map_client_list
)) {
3419 client
= QLIST_FIRST(&map_client_list
);
3420 client
->callback(client
->opaque
);
3421 cpu_unregister_map_client(client
);
3425 /* Map a physical memory region into a host virtual address.
3426 * May map a subset of the requested range, given by and returned in *plen.
3427 * May return NULL if resources needed to perform the mapping are exhausted.
3428 * Use only for reads OR writes - not for read-modify-write operations.
3429 * Use cpu_register_map_client() to know when retrying the map operation is
3430 * likely to succeed.
3432 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3433 target_phys_addr_t
*plen
,
3436 target_phys_addr_t len
= *plen
;
3437 target_phys_addr_t done
= 0;
3439 uint8_t *ret
= NULL
;
3441 target_phys_addr_t page
;
3444 unsigned long addr1
;
3447 page
= addr
& TARGET_PAGE_MASK
;
3448 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3451 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3453 pd
= IO_MEM_UNASSIGNED
;
3455 pd
= p
->phys_offset
;
3458 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3459 if (done
|| bounce
.buffer
) {
3462 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3466 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3468 ptr
= bounce
.buffer
;
3470 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3471 ptr
= qemu_get_ram_ptr(addr1
);
3475 } else if (ret
+ done
!= ptr
) {
3487 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3488 * Will also mark the memory as dirty if is_write == 1. access_len gives
3489 * the amount of memory that was actually read or written by the caller.
3491 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3492 int is_write
, target_phys_addr_t access_len
)
3494 if (buffer
!= bounce
.buffer
) {
3496 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3497 while (access_len
) {
3499 l
= TARGET_PAGE_SIZE
;
3502 if (!cpu_physical_memory_is_dirty(addr1
)) {
3503 /* invalidate code */
3504 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3506 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3507 (0xff & ~CODE_DIRTY_FLAG
);
3516 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3518 qemu_vfree(bounce
.buffer
);
3519 bounce
.buffer
= NULL
;
3520 cpu_notify_map_clients();
3523 /* warning: addr must be aligned */
3524 uint32_t ldl_phys(target_phys_addr_t addr
)
3532 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3534 pd
= IO_MEM_UNASSIGNED
;
3536 pd
= p
->phys_offset
;
3539 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3540 !(pd
& IO_MEM_ROMD
)) {
3542 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3544 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3545 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3548 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3549 (addr
& ~TARGET_PAGE_MASK
);
3555 /* warning: addr must be aligned */
3556 uint64_t ldq_phys(target_phys_addr_t addr
)
3564 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3566 pd
= IO_MEM_UNASSIGNED
;
3568 pd
= p
->phys_offset
;
3571 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3572 !(pd
& IO_MEM_ROMD
)) {
3574 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3576 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3577 #ifdef TARGET_WORDS_BIGENDIAN
3578 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3579 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3581 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3582 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3586 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3587 (addr
& ~TARGET_PAGE_MASK
);
3594 uint32_t ldub_phys(target_phys_addr_t addr
)
3597 cpu_physical_memory_read(addr
, &val
, 1);
3602 uint32_t lduw_phys(target_phys_addr_t addr
)
3605 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3606 return tswap16(val
);
3609 /* warning: addr must be aligned. The ram page is not masked as dirty
3610 and the code inside is not invalidated. It is useful if the dirty
3611 bits are used to track modified PTEs */
3612 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3619 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3621 pd
= IO_MEM_UNASSIGNED
;
3623 pd
= p
->phys_offset
;
3626 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3627 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3629 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3630 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3632 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3633 ptr
= qemu_get_ram_ptr(addr1
);
3636 if (unlikely(in_migration
)) {
3637 if (!cpu_physical_memory_is_dirty(addr1
)) {
3638 /* invalidate code */
3639 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3641 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3642 (0xff & ~CODE_DIRTY_FLAG
);
3648 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3655 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3657 pd
= IO_MEM_UNASSIGNED
;
3659 pd
= p
->phys_offset
;
3662 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3663 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3665 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3666 #ifdef TARGET_WORDS_BIGENDIAN
3667 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3668 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3670 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3671 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3674 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3675 (addr
& ~TARGET_PAGE_MASK
);
3680 /* warning: addr must be aligned */
3681 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3688 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3690 pd
= IO_MEM_UNASSIGNED
;
3692 pd
= p
->phys_offset
;
3695 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3696 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3698 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3699 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3701 unsigned long addr1
;
3702 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3704 ptr
= qemu_get_ram_ptr(addr1
);
3706 if (!cpu_physical_memory_is_dirty(addr1
)) {
3707 /* invalidate code */
3708 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3710 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3711 (0xff & ~CODE_DIRTY_FLAG
);
3717 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3720 cpu_physical_memory_write(addr
, &v
, 1);
3724 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3726 uint16_t v
= tswap16(val
);
3727 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3731 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3734 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3737 /* virtual memory access for debug (includes writing to ROM) */
3738 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3739 uint8_t *buf
, int len
, int is_write
)
3742 target_phys_addr_t phys_addr
;
3746 page
= addr
& TARGET_PAGE_MASK
;
3747 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3748 /* if no physical page mapped, return an error */
3749 if (phys_addr
== -1)
3751 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3754 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3756 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3758 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3767 /* in deterministic execution mode, instructions doing device I/Os
3768 must be at the end of the TB */
3769 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3771 TranslationBlock
*tb
;
3773 target_ulong pc
, cs_base
;
3776 tb
= tb_find_pc((unsigned long)retaddr
);
3778 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3781 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3782 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3783 /* Calculate how many instructions had been executed before the fault
3785 n
= n
- env
->icount_decr
.u16
.low
;
3786 /* Generate a new TB ending on the I/O insn. */
3788 /* On MIPS and SH, delay slot instructions can only be restarted if
3789 they were already the first instruction in the TB. If this is not
3790 the first instruction in a TB then re-execute the preceding
3792 #if defined(TARGET_MIPS)
3793 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3794 env
->active_tc
.PC
-= 4;
3795 env
->icount_decr
.u16
.low
++;
3796 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3798 #elif defined(TARGET_SH4)
3799 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3802 env
->icount_decr
.u16
.low
++;
3803 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3806 /* This should never happen. */
3807 if (n
> CF_COUNT_MASK
)
3808 cpu_abort(env
, "TB too big during recompile");
3810 cflags
= n
| CF_LAST_IO
;
3812 cs_base
= tb
->cs_base
;
3814 tb_phys_invalidate(tb
, -1);
3815 /* FIXME: In theory this could raise an exception. In practice
3816 we have already translated the block once so it's probably ok. */
3817 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3818 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3819 the first in the TB) then we end up generating a whole new TB and
3820 repeating the fault, which is horribly inefficient.
3821 Better would be to execute just this insn uncached, or generate a
3823 cpu_resume_from_signal(env
, NULL
);
3826 void dump_exec_info(FILE *f
,
3827 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3829 int i
, target_code_size
, max_target_code_size
;
3830 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3831 TranslationBlock
*tb
;
3833 target_code_size
= 0;
3834 max_target_code_size
= 0;
3836 direct_jmp_count
= 0;
3837 direct_jmp2_count
= 0;
3838 for(i
= 0; i
< nb_tbs
; i
++) {
3840 target_code_size
+= tb
->size
;
3841 if (tb
->size
> max_target_code_size
)
3842 max_target_code_size
= tb
->size
;
3843 if (tb
->page_addr
[1] != -1)
3845 if (tb
->tb_next_offset
[0] != 0xffff) {
3847 if (tb
->tb_next_offset
[1] != 0xffff) {
3848 direct_jmp2_count
++;
3852 /* XXX: avoid using doubles ? */
3853 cpu_fprintf(f
, "Translation buffer state:\n");
3854 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3855 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3856 cpu_fprintf(f
, "TB count %d/%d\n",
3857 nb_tbs
, code_gen_max_blocks
);
3858 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3859 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3860 max_target_code_size
);
3861 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3862 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3863 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3864 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3866 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3867 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3869 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3871 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3872 cpu_fprintf(f
, "\nStatistics:\n");
3873 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3874 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3875 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3876 tcg_dump_info(f
, cpu_fprintf
);
3879 #if !defined(CONFIG_USER_ONLY)
3881 #define MMUSUFFIX _cmmu
3882 #define GETPC() NULL
3883 #define env cpu_single_env
3884 #define SOFTMMU_CODE_ACCESS
3887 #include "softmmu_template.h"
3890 #include "softmmu_template.h"
3893 #include "softmmu_template.h"
3896 #include "softmmu_template.h"