2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
24 #include <sys/types.h>
37 #include "qemu-common.h"
42 #if defined(CONFIG_USER_ONLY)
46 //#define DEBUG_TB_INVALIDATE
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_SPARC)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 36
69 #elif defined(TARGET_ALPHA)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 #define TARGET_VIRT_ADDR_SPACE_BITS 42
72 #elif defined(TARGET_PPC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 #define TARGET_PHYS_ADDR_SPACE_BITS 32
83 static TranslationBlock
*tbs
;
84 int code_gen_max_blocks
;
85 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
87 /* any access to the tbs or the page table must use this lock */
88 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
90 #if defined(__arm__) || defined(__sparc_v9__)
91 /* The prologue must be reachable with a direct jump. ARM and Sparc64
92 have limited branch ranges (possibly also PPC) so place it in a
93 section close to code segment. */
94 #define code_gen_section \
95 __attribute__((__section__(".gen_code"))) \
96 __attribute__((aligned (32)))
98 #define code_gen_section \
99 __attribute__((aligned (32)))
102 uint8_t code_gen_prologue
[1024] code_gen_section
;
103 static uint8_t *code_gen_buffer
;
104 static unsigned long code_gen_buffer_size
;
105 /* threshold to flush the translated code buffer */
106 static unsigned long code_gen_buffer_max_size
;
107 uint8_t *code_gen_ptr
;
109 #if !defined(CONFIG_USER_ONLY)
111 uint8_t *phys_ram_dirty
;
112 static int in_migration
;
114 typedef struct RAMBlock
{
118 struct RAMBlock
*next
;
121 static RAMBlock
*ram_blocks
;
122 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
123 then we can no longer assume contiguous ram offsets, and external uses
124 of this variable will break. */
125 ram_addr_t last_ram_offset
;
129 /* current CPU in the current thread. It is only valid inside
131 CPUState
*cpu_single_env
;
132 /* 0 = Do not count executed instructions.
133 1 = Precise instruction counting.
134 2 = Adaptive rate instruction counting. */
136 /* Current instruction counter. While executing translated code this may
137 include some instructions that have not yet been executed. */
140 typedef struct PageDesc
{
141 /* list of TBs intersecting this ram page */
142 TranslationBlock
*first_tb
;
143 /* in order to optimize self modifying code, we count the number
144 of lookups we do to a given page to use a bitmap */
145 unsigned int code_write_count
;
146 uint8_t *code_bitmap
;
147 #if defined(CONFIG_USER_ONLY)
152 typedef struct PhysPageDesc
{
153 /* offset in host memory of the page + io_index in the low bits */
154 ram_addr_t phys_offset
;
155 ram_addr_t region_offset
;
159 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
160 /* XXX: this is a temporary hack for alpha target.
161 * In the future, this is to be replaced by a multi-level table
162 * to actually be able to handle the complete 64 bits address space.
164 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
166 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169 #define L1_SIZE (1 << L1_BITS)
170 #define L2_SIZE (1 << L2_BITS)
172 unsigned long qemu_real_host_page_size
;
173 unsigned long qemu_host_page_bits
;
174 unsigned long qemu_host_page_size
;
175 unsigned long qemu_host_page_mask
;
177 /* XXX: for system emulation, it could just be an array */
178 static PageDesc
*l1_map
[L1_SIZE
];
179 static PhysPageDesc
**l1_phys_map
;
181 #if !defined(CONFIG_USER_ONLY)
182 static void io_mem_init(void);
184 /* io memory support */
185 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
186 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
187 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
188 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
189 static int io_mem_watch
;
193 static const char *logfilename
= "/tmp/qemu.log";
196 static int log_append
= 0;
199 static int tlb_flush_count
;
200 static int tb_flush_count
;
201 static int tb_phys_invalidate_count
;
203 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
204 typedef struct subpage_t
{
205 target_phys_addr_t base
;
206 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
207 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
208 void *opaque
[TARGET_PAGE_SIZE
][2][4];
209 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
213 static void map_exec(void *addr
, long size
)
216 VirtualProtect(addr
, size
,
217 PAGE_EXECUTE_READWRITE
, &old_protect
);
221 static void map_exec(void *addr
, long size
)
223 unsigned long start
, end
, page_size
;
225 page_size
= getpagesize();
226 start
= (unsigned long)addr
;
227 start
&= ~(page_size
- 1);
229 end
= (unsigned long)addr
+ size
;
230 end
+= page_size
- 1;
231 end
&= ~(page_size
- 1);
233 mprotect((void *)start
, end
- start
,
234 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
238 static void page_init(void)
240 /* NOTE: we can always suppose that qemu_host_page_size >=
244 SYSTEM_INFO system_info
;
246 GetSystemInfo(&system_info
);
247 qemu_real_host_page_size
= system_info
.dwPageSize
;
250 qemu_real_host_page_size
= getpagesize();
252 if (qemu_host_page_size
== 0)
253 qemu_host_page_size
= qemu_real_host_page_size
;
254 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
255 qemu_host_page_size
= TARGET_PAGE_SIZE
;
256 qemu_host_page_bits
= 0;
257 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
258 qemu_host_page_bits
++;
259 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
260 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
261 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
263 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
265 long long startaddr
, endaddr
;
270 last_brk
= (unsigned long)sbrk(0);
271 f
= fopen("/proc/self/maps", "r");
274 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
276 startaddr
= MIN(startaddr
,
277 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
278 endaddr
= MIN(endaddr
,
279 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
280 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
281 TARGET_PAGE_ALIGN(endaddr
),
292 static inline PageDesc
**page_l1_map(target_ulong index
)
294 #if TARGET_LONG_BITS > 32
295 /* Host memory outside guest VM. For 32-bit targets we have already
296 excluded high addresses. */
297 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
300 return &l1_map
[index
>> L2_BITS
];
303 static inline PageDesc
*page_find_alloc(target_ulong index
)
306 lp
= page_l1_map(index
);
312 /* allocate if not found */
313 #if defined(CONFIG_USER_ONLY)
314 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
315 /* Don't use qemu_malloc because it may recurse. */
316 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
317 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
320 unsigned long addr
= h2g(p
);
321 page_set_flags(addr
& TARGET_PAGE_MASK
,
322 TARGET_PAGE_ALIGN(addr
+ len
),
326 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
330 return p
+ (index
& (L2_SIZE
- 1));
333 static inline PageDesc
*page_find(target_ulong index
)
336 lp
= page_l1_map(index
);
343 return p
+ (index
& (L2_SIZE
- 1));
346 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
351 p
= (void **)l1_phys_map
;
352 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
354 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
355 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
357 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
360 /* allocate if not found */
363 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
364 memset(p
, 0, sizeof(void *) * L1_SIZE
);
368 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
372 /* allocate if not found */
375 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
377 for (i
= 0; i
< L2_SIZE
; i
++) {
378 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
379 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
382 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
385 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
387 return phys_page_find_alloc(index
, 0);
390 #if !defined(CONFIG_USER_ONLY)
391 static void tlb_protect_code(ram_addr_t ram_addr
);
392 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
394 #define mmap_lock() do { } while(0)
395 #define mmap_unlock() do { } while(0)
398 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
400 #if defined(CONFIG_USER_ONLY)
401 /* Currently it is not recommended to allocate big chunks of data in
402 user mode. It will change when a dedicated libc will be used */
403 #define USE_STATIC_CODE_GEN_BUFFER
406 #ifdef USE_STATIC_CODE_GEN_BUFFER
407 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
410 static void code_gen_alloc(unsigned long tb_size
)
412 #ifdef USE_STATIC_CODE_GEN_BUFFER
413 code_gen_buffer
= static_code_gen_buffer
;
414 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
415 map_exec(code_gen_buffer
, code_gen_buffer_size
);
417 code_gen_buffer_size
= tb_size
;
418 if (code_gen_buffer_size
== 0) {
419 #if defined(CONFIG_USER_ONLY)
420 /* in user mode, phys_ram_size is not meaningful */
421 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
423 /* XXX: needs adjustments */
424 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
427 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
428 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
429 /* The code gen buffer location may have constraints depending on
430 the host cpu and OS */
431 #if defined(__linux__)
436 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
437 #if defined(__x86_64__)
439 /* Cannot map more than that */
440 if (code_gen_buffer_size
> (800 * 1024 * 1024))
441 code_gen_buffer_size
= (800 * 1024 * 1024);
442 #elif defined(__sparc_v9__)
443 // Map the buffer below 2G, so we can use direct calls and branches
445 start
= (void *) 0x60000000UL
;
446 if (code_gen_buffer_size
> (512 * 1024 * 1024))
447 code_gen_buffer_size
= (512 * 1024 * 1024);
448 #elif defined(__arm__)
449 /* Map the buffer below 32M, so we can use direct calls and branches */
451 start
= (void *) 0x01000000UL
;
452 if (code_gen_buffer_size
> 16 * 1024 * 1024)
453 code_gen_buffer_size
= 16 * 1024 * 1024;
455 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
456 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
458 if (code_gen_buffer
== MAP_FAILED
) {
459 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
463 #elif defined(__FreeBSD__) || defined(__DragonFly__)
467 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
468 #if defined(__x86_64__)
469 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
470 * 0x40000000 is free */
472 addr
= (void *)0x40000000;
473 /* Cannot map more than that */
474 if (code_gen_buffer_size
> (800 * 1024 * 1024))
475 code_gen_buffer_size
= (800 * 1024 * 1024);
477 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
478 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
480 if (code_gen_buffer
== MAP_FAILED
) {
481 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
486 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
487 map_exec(code_gen_buffer
, code_gen_buffer_size
);
489 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
490 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
491 code_gen_buffer_max_size
= code_gen_buffer_size
-
492 code_gen_max_block_size();
493 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
494 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
497 /* Must be called before using the QEMU cpus. 'tb_size' is the size
498 (in bytes) allocated to the translation buffer. Zero means default
500 void cpu_exec_init_all(unsigned long tb_size
)
503 code_gen_alloc(tb_size
);
504 code_gen_ptr
= code_gen_buffer
;
506 #if !defined(CONFIG_USER_ONLY)
511 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
513 #define CPU_COMMON_SAVE_VERSION 1
515 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
517 CPUState
*env
= opaque
;
519 cpu_synchronize_state(env
, 0);
521 qemu_put_be32s(f
, &env
->halted
);
522 qemu_put_be32s(f
, &env
->interrupt_request
);
525 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
527 CPUState
*env
= opaque
;
529 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
532 qemu_get_be32s(f
, &env
->halted
);
533 qemu_get_be32s(f
, &env
->interrupt_request
);
534 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
535 version_id is increased. */
536 env
->interrupt_request
&= ~0x01;
538 cpu_synchronize_state(env
, 1);
544 CPUState
*qemu_get_cpu(int cpu
)
546 CPUState
*env
= first_cpu
;
549 if (env
->cpu_index
== cpu
)
557 void cpu_exec_init(CPUState
*env
)
562 #if defined(CONFIG_USER_ONLY)
565 env
->next_cpu
= NULL
;
568 while (*penv
!= NULL
) {
569 penv
= &(*penv
)->next_cpu
;
572 env
->cpu_index
= cpu_index
;
574 TAILQ_INIT(&env
->breakpoints
);
575 TAILQ_INIT(&env
->watchpoints
);
577 #if defined(CONFIG_USER_ONLY)
580 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
581 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
582 cpu_common_save
, cpu_common_load
, env
);
583 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
584 cpu_save
, cpu_load
, env
);
588 static inline void invalidate_page_bitmap(PageDesc
*p
)
590 if (p
->code_bitmap
) {
591 qemu_free(p
->code_bitmap
);
592 p
->code_bitmap
= NULL
;
594 p
->code_write_count
= 0;
597 /* set to NULL all the 'first_tb' fields in all PageDescs */
598 static void page_flush_tb(void)
603 for(i
= 0; i
< L1_SIZE
; i
++) {
606 for(j
= 0; j
< L2_SIZE
; j
++) {
608 invalidate_page_bitmap(p
);
615 /* flush all the translation blocks */
616 /* XXX: tb_flush is currently not thread safe */
617 void tb_flush(CPUState
*env1
)
620 #if defined(DEBUG_FLUSH)
621 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
622 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
624 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
626 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
627 cpu_abort(env1
, "Internal error: code buffer overflow\n");
631 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
632 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
635 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
638 code_gen_ptr
= code_gen_buffer
;
639 /* XXX: flush processor icache at this point if cache flush is
644 #ifdef DEBUG_TB_CHECK
646 static void tb_invalidate_check(target_ulong address
)
648 TranslationBlock
*tb
;
650 address
&= TARGET_PAGE_MASK
;
651 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
652 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
653 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
654 address
>= tb
->pc
+ tb
->size
)) {
655 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
656 address
, (long)tb
->pc
, tb
->size
);
662 /* verify that all the pages have correct rights for code */
663 static void tb_page_check(void)
665 TranslationBlock
*tb
;
666 int i
, flags1
, flags2
;
668 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
669 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
670 flags1
= page_get_flags(tb
->pc
);
671 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
672 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
673 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
674 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
680 static void tb_jmp_check(TranslationBlock
*tb
)
682 TranslationBlock
*tb1
;
685 /* suppress any remaining jumps to this TB */
689 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
692 tb1
= tb1
->jmp_next
[n1
];
694 /* check end of list */
696 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
702 /* invalidate one TB */
703 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
706 TranslationBlock
*tb1
;
710 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
713 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
717 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
719 TranslationBlock
*tb1
;
725 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
727 *ptb
= tb1
->page_next
[n1
];
730 ptb
= &tb1
->page_next
[n1
];
734 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
736 TranslationBlock
*tb1
, **ptb
;
739 ptb
= &tb
->jmp_next
[n
];
742 /* find tb(n) in circular list */
746 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
747 if (n1
== n
&& tb1
== tb
)
750 ptb
= &tb1
->jmp_first
;
752 ptb
= &tb1
->jmp_next
[n1
];
755 /* now we can suppress tb(n) from the list */
756 *ptb
= tb
->jmp_next
[n
];
758 tb
->jmp_next
[n
] = NULL
;
762 /* reset the jump entry 'n' of a TB so that it is not chained to
764 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
766 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
769 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
774 target_phys_addr_t phys_pc
;
775 TranslationBlock
*tb1
, *tb2
;
777 /* remove the TB from the hash list */
778 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
779 h
= tb_phys_hash_func(phys_pc
);
780 tb_remove(&tb_phys_hash
[h
], tb
,
781 offsetof(TranslationBlock
, phys_hash_next
));
783 /* remove the TB from the page list */
784 if (tb
->page_addr
[0] != page_addr
) {
785 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
786 tb_page_remove(&p
->first_tb
, tb
);
787 invalidate_page_bitmap(p
);
789 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
790 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
791 tb_page_remove(&p
->first_tb
, tb
);
792 invalidate_page_bitmap(p
);
795 tb_invalidated_flag
= 1;
797 /* remove the TB from the hash list */
798 h
= tb_jmp_cache_hash_func(tb
->pc
);
799 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
800 if (env
->tb_jmp_cache
[h
] == tb
)
801 env
->tb_jmp_cache
[h
] = NULL
;
804 /* suppress this TB from the two jump lists */
805 tb_jmp_remove(tb
, 0);
806 tb_jmp_remove(tb
, 1);
808 /* suppress any remaining jumps to this TB */
814 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
815 tb2
= tb1
->jmp_next
[n1
];
816 tb_reset_jump(tb1
, n1
);
817 tb1
->jmp_next
[n1
] = NULL
;
820 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
822 tb_phys_invalidate_count
++;
825 static inline void set_bits(uint8_t *tab
, int start
, int len
)
831 mask
= 0xff << (start
& 7);
832 if ((start
& ~7) == (end
& ~7)) {
834 mask
&= ~(0xff << (end
& 7));
839 start
= (start
+ 8) & ~7;
841 while (start
< end1
) {
846 mask
= ~(0xff << (end
& 7));
852 static void build_page_bitmap(PageDesc
*p
)
854 int n
, tb_start
, tb_end
;
855 TranslationBlock
*tb
;
857 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
862 tb
= (TranslationBlock
*)((long)tb
& ~3);
863 /* NOTE: this is subtle as a TB may span two physical pages */
865 /* NOTE: tb_end may be after the end of the page, but
866 it is not a problem */
867 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
868 tb_end
= tb_start
+ tb
->size
;
869 if (tb_end
> TARGET_PAGE_SIZE
)
870 tb_end
= TARGET_PAGE_SIZE
;
873 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
875 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
876 tb
= tb
->page_next
[n
];
880 TranslationBlock
*tb_gen_code(CPUState
*env
,
881 target_ulong pc
, target_ulong cs_base
,
882 int flags
, int cflags
)
884 TranslationBlock
*tb
;
886 target_ulong phys_pc
, phys_page2
, virt_page2
;
889 phys_pc
= get_phys_addr_code(env
, pc
);
892 /* flush must be done */
894 /* cannot fail at this point */
896 /* Don't forget to invalidate previous TB info. */
897 tb_invalidated_flag
= 1;
899 tc_ptr
= code_gen_ptr
;
901 tb
->cs_base
= cs_base
;
904 cpu_gen_code(env
, tb
, &code_gen_size
);
905 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
907 /* check next page if needed */
908 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
910 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
911 phys_page2
= get_phys_addr_code(env
, virt_page2
);
913 tb_link_phys(tb
, phys_pc
, phys_page2
);
917 /* invalidate all TBs which intersect with the target physical page
918 starting in range [start;end[. NOTE: start and end must refer to
919 the same physical page. 'is_cpu_write_access' should be true if called
920 from a real cpu write access: the virtual CPU will exit the current
921 TB if code is modified inside this TB. */
922 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
923 int is_cpu_write_access
)
925 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
926 CPUState
*env
= cpu_single_env
;
927 target_ulong tb_start
, tb_end
;
930 #ifdef TARGET_HAS_PRECISE_SMC
931 int current_tb_not_found
= is_cpu_write_access
;
932 TranslationBlock
*current_tb
= NULL
;
933 int current_tb_modified
= 0;
934 target_ulong current_pc
= 0;
935 target_ulong current_cs_base
= 0;
936 int current_flags
= 0;
937 #endif /* TARGET_HAS_PRECISE_SMC */
939 p
= page_find(start
>> TARGET_PAGE_BITS
);
942 if (!p
->code_bitmap
&&
943 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
944 is_cpu_write_access
) {
945 /* build code bitmap */
946 build_page_bitmap(p
);
949 /* we remove all the TBs in the range [start, end[ */
950 /* XXX: see if in some cases it could be faster to invalidate all the code */
954 tb
= (TranslationBlock
*)((long)tb
& ~3);
955 tb_next
= tb
->page_next
[n
];
956 /* NOTE: this is subtle as a TB may span two physical pages */
958 /* NOTE: tb_end may be after the end of the page, but
959 it is not a problem */
960 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
961 tb_end
= tb_start
+ tb
->size
;
963 tb_start
= tb
->page_addr
[1];
964 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
966 if (!(tb_end
<= start
|| tb_start
>= end
)) {
967 #ifdef TARGET_HAS_PRECISE_SMC
968 if (current_tb_not_found
) {
969 current_tb_not_found
= 0;
971 if (env
->mem_io_pc
) {
972 /* now we have a real cpu fault */
973 current_tb
= tb_find_pc(env
->mem_io_pc
);
976 if (current_tb
== tb
&&
977 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
978 /* If we are modifying the current TB, we must stop
979 its execution. We could be more precise by checking
980 that the modification is after the current PC, but it
981 would require a specialized function to partially
982 restore the CPU state */
984 current_tb_modified
= 1;
985 cpu_restore_state(current_tb
, env
,
986 env
->mem_io_pc
, NULL
);
987 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
990 #endif /* TARGET_HAS_PRECISE_SMC */
991 /* we need to do that to handle the case where a signal
992 occurs while doing tb_phys_invalidate() */
995 saved_tb
= env
->current_tb
;
996 env
->current_tb
= NULL
;
998 tb_phys_invalidate(tb
, -1);
1000 env
->current_tb
= saved_tb
;
1001 if (env
->interrupt_request
&& env
->current_tb
)
1002 cpu_interrupt(env
, env
->interrupt_request
);
1007 #if !defined(CONFIG_USER_ONLY)
1008 /* if no code remaining, no need to continue to use slow writes */
1010 invalidate_page_bitmap(p
);
1011 if (is_cpu_write_access
) {
1012 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1016 #ifdef TARGET_HAS_PRECISE_SMC
1017 if (current_tb_modified
) {
1018 /* we generate a block containing just the instruction
1019 modifying the memory. It will ensure that it cannot modify
1021 env
->current_tb
= NULL
;
1022 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1023 cpu_resume_from_signal(env
, NULL
);
1028 /* len must be <= 8 and start must be a multiple of len */
1029 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1035 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1036 cpu_single_env
->mem_io_vaddr
, len
,
1037 cpu_single_env
->eip
,
1038 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1041 p
= page_find(start
>> TARGET_PAGE_BITS
);
1044 if (p
->code_bitmap
) {
1045 offset
= start
& ~TARGET_PAGE_MASK
;
1046 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1047 if (b
& ((1 << len
) - 1))
1051 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1055 #if !defined(CONFIG_SOFTMMU)
1056 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1057 unsigned long pc
, void *puc
)
1059 TranslationBlock
*tb
;
1062 #ifdef TARGET_HAS_PRECISE_SMC
1063 TranslationBlock
*current_tb
= NULL
;
1064 CPUState
*env
= cpu_single_env
;
1065 int current_tb_modified
= 0;
1066 target_ulong current_pc
= 0;
1067 target_ulong current_cs_base
= 0;
1068 int current_flags
= 0;
1071 addr
&= TARGET_PAGE_MASK
;
1072 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1076 #ifdef TARGET_HAS_PRECISE_SMC
1077 if (tb
&& pc
!= 0) {
1078 current_tb
= tb_find_pc(pc
);
1081 while (tb
!= NULL
) {
1083 tb
= (TranslationBlock
*)((long)tb
& ~3);
1084 #ifdef TARGET_HAS_PRECISE_SMC
1085 if (current_tb
== tb
&&
1086 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1087 /* If we are modifying the current TB, we must stop
1088 its execution. We could be more precise by checking
1089 that the modification is after the current PC, but it
1090 would require a specialized function to partially
1091 restore the CPU state */
1093 current_tb_modified
= 1;
1094 cpu_restore_state(current_tb
, env
, pc
, puc
);
1095 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1098 #endif /* TARGET_HAS_PRECISE_SMC */
1099 tb_phys_invalidate(tb
, addr
);
1100 tb
= tb
->page_next
[n
];
1103 #ifdef TARGET_HAS_PRECISE_SMC
1104 if (current_tb_modified
) {
1105 /* we generate a block containing just the instruction
1106 modifying the memory. It will ensure that it cannot modify
1108 env
->current_tb
= NULL
;
1109 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1110 cpu_resume_from_signal(env
, puc
);
1116 /* add the tb in the target page and protect it if necessary */
1117 static inline void tb_alloc_page(TranslationBlock
*tb
,
1118 unsigned int n
, target_ulong page_addr
)
1121 TranslationBlock
*last_first_tb
;
1123 tb
->page_addr
[n
] = page_addr
;
1124 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1125 tb
->page_next
[n
] = p
->first_tb
;
1126 last_first_tb
= p
->first_tb
;
1127 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1128 invalidate_page_bitmap(p
);
1130 #if defined(TARGET_HAS_SMC) || 1
1132 #if defined(CONFIG_USER_ONLY)
1133 if (p
->flags
& PAGE_WRITE
) {
1138 /* force the host page as non writable (writes will have a
1139 page fault + mprotect overhead) */
1140 page_addr
&= qemu_host_page_mask
;
1142 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1143 addr
+= TARGET_PAGE_SIZE
) {
1145 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1149 p2
->flags
&= ~PAGE_WRITE
;
1150 page_get_flags(addr
);
1152 mprotect(g2h(page_addr
), qemu_host_page_size
,
1153 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1154 #ifdef DEBUG_TB_INVALIDATE
1155 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1160 /* if some code is already present, then the pages are already
1161 protected. So we handle the case where only the first TB is
1162 allocated in a physical page */
1163 if (!last_first_tb
) {
1164 tlb_protect_code(page_addr
);
1168 #endif /* TARGET_HAS_SMC */
1171 /* Allocate a new translation block. Flush the translation buffer if
1172 too many translation blocks or too much generated code. */
1173 TranslationBlock
*tb_alloc(target_ulong pc
)
1175 TranslationBlock
*tb
;
1177 if (nb_tbs
>= code_gen_max_blocks
||
1178 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1180 tb
= &tbs
[nb_tbs
++];
1186 void tb_free(TranslationBlock
*tb
)
1188 /* In practice this is mostly used for single use temporary TB
1189 Ignore the hard cases and just back up if this TB happens to
1190 be the last one generated. */
1191 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1192 code_gen_ptr
= tb
->tc_ptr
;
1197 /* add a new TB and link it to the physical page tables. phys_page2 is
1198 (-1) to indicate that only one page contains the TB. */
1199 void tb_link_phys(TranslationBlock
*tb
,
1200 target_ulong phys_pc
, target_ulong phys_page2
)
1203 TranslationBlock
**ptb
;
1205 /* Grab the mmap lock to stop another thread invalidating this TB
1206 before we are done. */
1208 /* add in the physical hash table */
1209 h
= tb_phys_hash_func(phys_pc
);
1210 ptb
= &tb_phys_hash
[h
];
1211 tb
->phys_hash_next
= *ptb
;
1214 /* add in the page list */
1215 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1216 if (phys_page2
!= -1)
1217 tb_alloc_page(tb
, 1, phys_page2
);
1219 tb
->page_addr
[1] = -1;
1221 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1222 tb
->jmp_next
[0] = NULL
;
1223 tb
->jmp_next
[1] = NULL
;
1225 /* init original jump addresses */
1226 if (tb
->tb_next_offset
[0] != 0xffff)
1227 tb_reset_jump(tb
, 0);
1228 if (tb
->tb_next_offset
[1] != 0xffff)
1229 tb_reset_jump(tb
, 1);
1231 #ifdef DEBUG_TB_CHECK
1237 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1238 tb[1].tc_ptr. Return NULL if not found */
1239 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1241 int m_min
, m_max
, m
;
1243 TranslationBlock
*tb
;
1247 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1248 tc_ptr
>= (unsigned long)code_gen_ptr
)
1250 /* binary search (cf Knuth) */
1253 while (m_min
<= m_max
) {
1254 m
= (m_min
+ m_max
) >> 1;
1256 v
= (unsigned long)tb
->tc_ptr
;
1259 else if (tc_ptr
< v
) {
1268 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1270 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1272 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1275 tb1
= tb
->jmp_next
[n
];
1277 /* find head of list */
1280 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1283 tb1
= tb1
->jmp_next
[n1
];
1285 /* we are now sure now that tb jumps to tb1 */
1288 /* remove tb from the jmp_first list */
1289 ptb
= &tb_next
->jmp_first
;
1293 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1294 if (n1
== n
&& tb1
== tb
)
1296 ptb
= &tb1
->jmp_next
[n1
];
1298 *ptb
= tb
->jmp_next
[n
];
1299 tb
->jmp_next
[n
] = NULL
;
1301 /* suppress the jump to next tb in generated code */
1302 tb_reset_jump(tb
, n
);
1304 /* suppress jumps in the tb on which we could have jumped */
1305 tb_reset_jump_recursive(tb_next
);
1309 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1311 tb_reset_jump_recursive2(tb
, 0);
1312 tb_reset_jump_recursive2(tb
, 1);
1315 #if defined(TARGET_HAS_ICE)
1316 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1318 target_phys_addr_t addr
;
1320 ram_addr_t ram_addr
;
1323 addr
= cpu_get_phys_page_debug(env
, pc
);
1324 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1326 pd
= IO_MEM_UNASSIGNED
;
1328 pd
= p
->phys_offset
;
1330 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1331 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1335 /* Add a watchpoint. */
1336 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1337 int flags
, CPUWatchpoint
**watchpoint
)
1339 target_ulong len_mask
= ~(len
- 1);
1342 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1343 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1344 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1345 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1348 wp
= qemu_malloc(sizeof(*wp
));
1351 wp
->len_mask
= len_mask
;
1354 /* keep all GDB-injected watchpoints in front */
1356 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1358 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1360 tlb_flush_page(env
, addr
);
1367 /* Remove a specific watchpoint. */
1368 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1371 target_ulong len_mask
= ~(len
- 1);
1374 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1375 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1376 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1377 cpu_watchpoint_remove_by_ref(env
, wp
);
1384 /* Remove a specific watchpoint by reference. */
1385 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1387 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1389 tlb_flush_page(env
, watchpoint
->vaddr
);
1391 qemu_free(watchpoint
);
1394 /* Remove all matching watchpoints. */
1395 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1397 CPUWatchpoint
*wp
, *next
;
1399 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1400 if (wp
->flags
& mask
)
1401 cpu_watchpoint_remove_by_ref(env
, wp
);
1405 /* Add a breakpoint. */
1406 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1407 CPUBreakpoint
**breakpoint
)
1409 #if defined(TARGET_HAS_ICE)
1412 bp
= qemu_malloc(sizeof(*bp
));
1417 /* keep all GDB-injected breakpoints in front */
1419 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1421 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1423 breakpoint_invalidate(env
, pc
);
1433 /* Remove a specific breakpoint. */
1434 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1436 #if defined(TARGET_HAS_ICE)
1439 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1440 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1441 cpu_breakpoint_remove_by_ref(env
, bp
);
1451 /* Remove a specific breakpoint by reference. */
1452 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1454 #if defined(TARGET_HAS_ICE)
1455 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1457 breakpoint_invalidate(env
, breakpoint
->pc
);
1459 qemu_free(breakpoint
);
1463 /* Remove all matching breakpoints. */
1464 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1466 #if defined(TARGET_HAS_ICE)
1467 CPUBreakpoint
*bp
, *next
;
1469 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1470 if (bp
->flags
& mask
)
1471 cpu_breakpoint_remove_by_ref(env
, bp
);
1476 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1477 CPU loop after each instruction */
1478 void cpu_single_step(CPUState
*env
, int enabled
)
1480 #if defined(TARGET_HAS_ICE)
1481 if (env
->singlestep_enabled
!= enabled
) {
1482 env
->singlestep_enabled
= enabled
;
1484 kvm_update_guest_debug(env
, 0);
1486 /* must flush all the translated code to avoid inconsistencies */
1487 /* XXX: only flush what is necessary */
1494 /* enable or disable low levels log */
1495 void cpu_set_log(int log_flags
)
1497 loglevel
= log_flags
;
1498 if (loglevel
&& !logfile
) {
1499 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1501 perror(logfilename
);
1504 #if !defined(CONFIG_SOFTMMU)
1505 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1507 static char logfile_buf
[4096];
1508 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1511 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1515 if (!loglevel
&& logfile
) {
1521 void cpu_set_log_filename(const char *filename
)
1523 logfilename
= strdup(filename
);
1528 cpu_set_log(loglevel
);
1531 static void cpu_unlink_tb(CPUState
*env
)
1533 #if defined(USE_NPTL)
1534 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1535 problem and hope the cpu will stop of its own accord. For userspace
1536 emulation this often isn't actually as bad as it sounds. Often
1537 signals are used primarily to interrupt blocking syscalls. */
1539 TranslationBlock
*tb
;
1540 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1542 tb
= env
->current_tb
;
1543 /* if the cpu is currently executing code, we must unlink it and
1544 all the potentially executing TB */
1545 if (tb
&& !testandset(&interrupt_lock
)) {
1546 env
->current_tb
= NULL
;
1547 tb_reset_jump_recursive(tb
);
1548 resetlock(&interrupt_lock
);
1553 /* mask must never be zero, except for A20 change call */
1554 void cpu_interrupt(CPUState
*env
, int mask
)
1558 old_mask
= env
->interrupt_request
;
1559 env
->interrupt_request
|= mask
;
1561 #ifndef CONFIG_USER_ONLY
1563 * If called from iothread context, wake the target cpu in
1566 if (!qemu_cpu_self(env
)) {
1573 env
->icount_decr
.u16
.high
= 0xffff;
1574 #ifndef CONFIG_USER_ONLY
1576 && (mask
& ~old_mask
) != 0) {
1577 cpu_abort(env
, "Raised interrupt while not in I/O function");
1585 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1587 env
->interrupt_request
&= ~mask
;
1590 void cpu_exit(CPUState
*env
)
1592 env
->exit_request
= 1;
1596 const CPULogItem cpu_log_items
[] = {
1597 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1598 "show generated host assembly code for each compiled TB" },
1599 { CPU_LOG_TB_IN_ASM
, "in_asm",
1600 "show target assembly code for each compiled TB" },
1601 { CPU_LOG_TB_OP
, "op",
1602 "show micro ops for each compiled TB" },
1603 { CPU_LOG_TB_OP_OPT
, "op_opt",
1606 "before eflags optimization and "
1608 "after liveness analysis" },
1609 { CPU_LOG_INT
, "int",
1610 "show interrupts/exceptions in short format" },
1611 { CPU_LOG_EXEC
, "exec",
1612 "show trace before each executed TB (lots of logs)" },
1613 { CPU_LOG_TB_CPU
, "cpu",
1614 "show CPU state before block translation" },
1616 { CPU_LOG_PCALL
, "pcall",
1617 "show protected mode far calls/returns/exceptions" },
1618 { CPU_LOG_RESET
, "cpu_reset",
1619 "show CPU state before CPU resets" },
1622 { CPU_LOG_IOPORT
, "ioport",
1623 "show all i/o ports accesses" },
1628 static int cmp1(const char *s1
, int n
, const char *s2
)
1630 if (strlen(s2
) != n
)
1632 return memcmp(s1
, s2
, n
) == 0;
1635 /* takes a comma separated list of log masks. Return 0 if error. */
1636 int cpu_str_to_log_mask(const char *str
)
1638 const CPULogItem
*item
;
1645 p1
= strchr(p
, ',');
1648 if(cmp1(p
,p1
-p
,"all")) {
1649 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1653 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1654 if (cmp1(p
, p1
- p
, item
->name
))
1668 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1675 fprintf(stderr
, "qemu: fatal: ");
1676 vfprintf(stderr
, fmt
, ap
);
1677 fprintf(stderr
, "\n");
1679 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1681 cpu_dump_state(env
, stderr
, fprintf
, 0);
1683 if (qemu_log_enabled()) {
1684 qemu_log("qemu: fatal: ");
1685 qemu_log_vprintf(fmt
, ap2
);
1688 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1690 log_cpu_state(env
, 0);
1700 CPUState
*cpu_copy(CPUState
*env
)
1702 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1703 CPUState
*next_cpu
= new_env
->next_cpu
;
1704 int cpu_index
= new_env
->cpu_index
;
1705 #if defined(TARGET_HAS_ICE)
1710 memcpy(new_env
, env
, sizeof(CPUState
));
1712 /* Preserve chaining and index. */
1713 new_env
->next_cpu
= next_cpu
;
1714 new_env
->cpu_index
= cpu_index
;
1716 /* Clone all break/watchpoints.
1717 Note: Once we support ptrace with hw-debug register access, make sure
1718 BP_CPU break/watchpoints are handled correctly on clone. */
1719 TAILQ_INIT(&env
->breakpoints
);
1720 TAILQ_INIT(&env
->watchpoints
);
1721 #if defined(TARGET_HAS_ICE)
1722 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1723 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1725 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1726 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1734 #if !defined(CONFIG_USER_ONLY)
1736 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1740 /* Discard jump cache entries for any tb which might potentially
1741 overlap the flushed page. */
1742 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1743 memset (&env
->tb_jmp_cache
[i
], 0,
1744 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1746 i
= tb_jmp_cache_hash_page(addr
);
1747 memset (&env
->tb_jmp_cache
[i
], 0,
1748 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1751 /* NOTE: if flush_global is true, also flush global entries (not
1753 void tlb_flush(CPUState
*env
, int flush_global
)
1757 #if defined(DEBUG_TLB)
1758 printf("tlb_flush:\n");
1760 /* must reset current TB so that interrupts cannot modify the
1761 links while we are modifying them */
1762 env
->current_tb
= NULL
;
1764 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1766 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1767 env
->tlb_table
[mmu_idx
][i
].addr_read
= -1;
1768 env
->tlb_table
[mmu_idx
][i
].addr_write
= -1;
1769 env
->tlb_table
[mmu_idx
][i
].addr_code
= -1;
1773 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1776 if (env
->kqemu_enabled
) {
1777 kqemu_flush(env
, flush_global
);
1783 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1785 if (addr
== (tlb_entry
->addr_read
&
1786 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1787 addr
== (tlb_entry
->addr_write
&
1788 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1789 addr
== (tlb_entry
->addr_code
&
1790 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1791 tlb_entry
->addr_read
= -1;
1792 tlb_entry
->addr_write
= -1;
1793 tlb_entry
->addr_code
= -1;
1797 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1802 #if defined(DEBUG_TLB)
1803 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1805 /* must reset current TB so that interrupts cannot modify the
1806 links while we are modifying them */
1807 env
->current_tb
= NULL
;
1809 addr
&= TARGET_PAGE_MASK
;
1810 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1811 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1812 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1814 tlb_flush_jmp_cache(env
, addr
);
1817 if (env
->kqemu_enabled
) {
1818 kqemu_flush_page(env
, addr
);
1823 /* update the TLBs so that writes to code in the virtual page 'addr'
1825 static void tlb_protect_code(ram_addr_t ram_addr
)
1827 cpu_physical_memory_reset_dirty(ram_addr
,
1828 ram_addr
+ TARGET_PAGE_SIZE
,
1832 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1833 tested for self modifying code */
1834 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1837 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1840 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1841 unsigned long start
, unsigned long length
)
1844 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1845 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1846 if ((addr
- start
) < length
) {
1847 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1852 /* Note: start and end must be within the same ram block. */
1853 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1857 unsigned long length
, start1
;
1861 start
&= TARGET_PAGE_MASK
;
1862 end
= TARGET_PAGE_ALIGN(end
);
1864 length
= end
- start
;
1867 len
= length
>> TARGET_PAGE_BITS
;
1869 /* XXX: should not depend on cpu context */
1871 if (env
->kqemu_enabled
) {
1874 for(i
= 0; i
< len
; i
++) {
1875 kqemu_set_notdirty(env
, addr
);
1876 addr
+= TARGET_PAGE_SIZE
;
1880 mask
= ~dirty_flags
;
1881 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1882 for(i
= 0; i
< len
; i
++)
1885 /* we modify the TLB cache so that the dirty bit will be set again
1886 when accessing the range */
1887 start1
= (unsigned long)qemu_get_ram_ptr(start
);
1888 /* Chek that we don't span multiple blocks - this breaks the
1889 address comparisons below. */
1890 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
1891 != (end
- 1) - start
) {
1895 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1897 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1898 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1899 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
1905 int cpu_physical_memory_set_dirty_tracking(int enable
)
1907 in_migration
= enable
;
1908 if (kvm_enabled()) {
1909 return kvm_set_migration_log(enable
);
1914 int cpu_physical_memory_get_dirty_tracking(void)
1916 return in_migration
;
1919 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
1920 target_phys_addr_t end_addr
)
1925 ret
= kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1929 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1931 ram_addr_t ram_addr
;
1934 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1935 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
1936 + tlb_entry
->addend
);
1937 ram_addr
= qemu_ram_addr_from_host(p
);
1938 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1939 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1944 /* update the TLB according to the current state of the dirty bits */
1945 void cpu_tlb_update_dirty(CPUState
*env
)
1949 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1950 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1951 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
1955 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1957 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1958 tlb_entry
->addr_write
= vaddr
;
1961 /* update the TLB corresponding to virtual page vaddr
1962 so that it is no longer dirty */
1963 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1968 vaddr
&= TARGET_PAGE_MASK
;
1969 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1970 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1971 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
1974 /* add a new TLB entry. At most one entry for a given virtual address
1975 is permitted. Return 0 if OK or 2 if the page could not be mapped
1976 (can only happen in non SOFTMMU mode for I/O pages or pages
1977 conflicting with the host address space). */
1978 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1979 target_phys_addr_t paddr
, int prot
,
1980 int mmu_idx
, int is_softmmu
)
1985 target_ulong address
;
1986 target_ulong code_address
;
1987 target_phys_addr_t addend
;
1991 target_phys_addr_t iotlb
;
1993 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1995 pd
= IO_MEM_UNASSIGNED
;
1997 pd
= p
->phys_offset
;
1999 #if defined(DEBUG_TLB)
2000 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2001 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2006 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2007 /* IO memory case (romd handled later) */
2008 address
|= TLB_MMIO
;
2010 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2011 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2013 iotlb
= pd
& TARGET_PAGE_MASK
;
2014 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2015 iotlb
|= IO_MEM_NOTDIRTY
;
2017 iotlb
|= IO_MEM_ROM
;
2019 /* IO handlers are currently passed a physical address.
2020 It would be nice to pass an offset from the base address
2021 of that region. This would avoid having to special case RAM,
2022 and avoid full address decoding in every device.
2023 We can't use the high bits of pd for this because
2024 IO_MEM_ROMD uses these as a ram address. */
2025 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2027 iotlb
+= p
->region_offset
;
2033 code_address
= address
;
2034 /* Make accesses to pages with watchpoints go via the
2035 watchpoint trap routines. */
2036 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2037 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2038 iotlb
= io_mem_watch
+ paddr
;
2039 /* TODO: The memory case can be optimized by not trapping
2040 reads of pages with a write breakpoint. */
2041 address
|= TLB_MMIO
;
2045 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2046 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2047 te
= &env
->tlb_table
[mmu_idx
][index
];
2048 te
->addend
= addend
- vaddr
;
2049 if (prot
& PAGE_READ
) {
2050 te
->addr_read
= address
;
2055 if (prot
& PAGE_EXEC
) {
2056 te
->addr_code
= code_address
;
2060 if (prot
& PAGE_WRITE
) {
2061 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2062 (pd
& IO_MEM_ROMD
)) {
2063 /* Write access calls the I/O callback. */
2064 te
->addr_write
= address
| TLB_MMIO
;
2065 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2066 !cpu_physical_memory_is_dirty(pd
)) {
2067 te
->addr_write
= address
| TLB_NOTDIRTY
;
2069 te
->addr_write
= address
;
2072 te
->addr_write
= -1;
2079 void tlb_flush(CPUState
*env
, int flush_global
)
2083 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2087 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2088 target_phys_addr_t paddr
, int prot
,
2089 int mmu_idx
, int is_softmmu
)
2095 * Walks guest process memory "regions" one by one
2096 * and calls callback function 'fn' for each region.
2098 int walk_memory_regions(void *priv
,
2099 int (*fn
)(void *, unsigned long, unsigned long, unsigned long))
2101 unsigned long start
, end
;
2103 int i
, j
, prot
, prot1
;
2109 for (i
= 0; i
<= L1_SIZE
; i
++) {
2110 p
= (i
< L1_SIZE
) ? l1_map
[i
] : NULL
;
2111 for (j
= 0; j
< L2_SIZE
; j
++) {
2112 prot1
= (p
== NULL
) ? 0 : p
[j
].flags
;
2114 * "region" is one continuous chunk of memory
2115 * that has same protection flags set.
2117 if (prot1
!= prot
) {
2118 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2120 rc
= (*fn
)(priv
, start
, end
, prot
);
2121 /* callback can stop iteration by returning != 0 */
2138 static int dump_region(void *priv
, unsigned long start
,
2139 unsigned long end
, unsigned long prot
)
2141 FILE *f
= (FILE *)priv
;
2143 (void) fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2144 start
, end
, end
- start
,
2145 ((prot
& PAGE_READ
) ? 'r' : '-'),
2146 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2147 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2152 /* dump memory mappings */
2153 void page_dump(FILE *f
)
2155 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2156 "start", "end", "size", "prot");
2157 walk_memory_regions(f
, dump_region
);
2160 int page_get_flags(target_ulong address
)
2164 p
= page_find(address
>> TARGET_PAGE_BITS
);
2170 /* modify the flags of a page and invalidate the code if
2171 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2172 depending on PAGE_WRITE */
2173 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2178 /* mmap_lock should already be held. */
2179 start
= start
& TARGET_PAGE_MASK
;
2180 end
= TARGET_PAGE_ALIGN(end
);
2181 if (flags
& PAGE_WRITE
)
2182 flags
|= PAGE_WRITE_ORG
;
2183 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2184 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2185 /* We may be called for host regions that are outside guest
2189 /* if the write protection is set, then we invalidate the code
2191 if (!(p
->flags
& PAGE_WRITE
) &&
2192 (flags
& PAGE_WRITE
) &&
2194 tb_invalidate_phys_page(addr
, 0, NULL
);
2200 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2206 if (start
+ len
< start
)
2207 /* we've wrapped around */
2210 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2211 start
= start
& TARGET_PAGE_MASK
;
2213 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2214 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2217 if( !(p
->flags
& PAGE_VALID
) )
2220 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2222 if (flags
& PAGE_WRITE
) {
2223 if (!(p
->flags
& PAGE_WRITE_ORG
))
2225 /* unprotect the page if it was put read-only because it
2226 contains translated code */
2227 if (!(p
->flags
& PAGE_WRITE
)) {
2228 if (!page_unprotect(addr
, 0, NULL
))
2237 /* called from signal handler: invalidate the code and unprotect the
2238 page. Return TRUE if the fault was successfully handled. */
2239 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2241 unsigned int page_index
, prot
, pindex
;
2243 target_ulong host_start
, host_end
, addr
;
2245 /* Technically this isn't safe inside a signal handler. However we
2246 know this only ever happens in a synchronous SEGV handler, so in
2247 practice it seems to be ok. */
2250 host_start
= address
& qemu_host_page_mask
;
2251 page_index
= host_start
>> TARGET_PAGE_BITS
;
2252 p1
= page_find(page_index
);
2257 host_end
= host_start
+ qemu_host_page_size
;
2260 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2264 /* if the page was really writable, then we change its
2265 protection back to writable */
2266 if (prot
& PAGE_WRITE_ORG
) {
2267 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2268 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2269 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2270 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2271 p1
[pindex
].flags
|= PAGE_WRITE
;
2272 /* and since the content will be modified, we must invalidate
2273 the corresponding translated code. */
2274 tb_invalidate_phys_page(address
, pc
, puc
);
2275 #ifdef DEBUG_TB_CHECK
2276 tb_invalidate_check(address
);
2286 static inline void tlb_set_dirty(CPUState
*env
,
2287 unsigned long addr
, target_ulong vaddr
)
2290 #endif /* defined(CONFIG_USER_ONLY) */
2292 #if !defined(CONFIG_USER_ONLY)
2294 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2295 ram_addr_t memory
, ram_addr_t region_offset
);
2296 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2297 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2298 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2301 if (addr > start_addr) \
2304 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2305 if (start_addr2 > 0) \
2309 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2310 end_addr2 = TARGET_PAGE_SIZE - 1; \
2312 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2313 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2318 /* register physical memory. 'size' must be a multiple of the target
2319 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2320 io memory page. The address used when calling the IO function is
2321 the offset from the start of the region, plus region_offset. Both
2322 start_addr and region_offset are rounded down to a page boundary
2323 before calculating this offset. This should not be a problem unless
2324 the low bits of start_addr and region_offset differ. */
2325 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2327 ram_addr_t phys_offset
,
2328 ram_addr_t region_offset
)
2330 target_phys_addr_t addr
, end_addr
;
2333 ram_addr_t orig_size
= size
;
2337 /* XXX: should not depend on cpu context */
2339 if (env
->kqemu_enabled
) {
2340 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2344 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2346 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2347 region_offset
= start_addr
;
2349 region_offset
&= TARGET_PAGE_MASK
;
2350 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2351 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2352 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2353 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2354 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2355 ram_addr_t orig_memory
= p
->phys_offset
;
2356 target_phys_addr_t start_addr2
, end_addr2
;
2357 int need_subpage
= 0;
2359 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2361 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2362 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2363 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2364 &p
->phys_offset
, orig_memory
,
2367 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2370 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2372 p
->region_offset
= 0;
2374 p
->phys_offset
= phys_offset
;
2375 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2376 (phys_offset
& IO_MEM_ROMD
))
2377 phys_offset
+= TARGET_PAGE_SIZE
;
2380 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2381 p
->phys_offset
= phys_offset
;
2382 p
->region_offset
= region_offset
;
2383 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2384 (phys_offset
& IO_MEM_ROMD
)) {
2385 phys_offset
+= TARGET_PAGE_SIZE
;
2387 target_phys_addr_t start_addr2
, end_addr2
;
2388 int need_subpage
= 0;
2390 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2391 end_addr2
, need_subpage
);
2393 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2394 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2395 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2396 addr
& TARGET_PAGE_MASK
);
2397 subpage_register(subpage
, start_addr2
, end_addr2
,
2398 phys_offset
, region_offset
);
2399 p
->region_offset
= 0;
2403 region_offset
+= TARGET_PAGE_SIZE
;
2406 /* since each CPU stores ram addresses in its TLB cache, we must
2407 reset the modified entries */
2409 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2414 /* XXX: temporary until new memory mapping API */
2415 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2419 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2421 return IO_MEM_UNASSIGNED
;
2422 return p
->phys_offset
;
2425 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2428 kvm_coalesce_mmio_region(addr
, size
);
2431 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2434 kvm_uncoalesce_mmio_region(addr
, size
);
2438 /* XXX: better than nothing */
2439 static ram_addr_t
kqemu_ram_alloc(ram_addr_t size
)
2442 if ((last_ram_offset
+ size
) > kqemu_phys_ram_size
) {
2443 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2444 (uint64_t)size
, (uint64_t)kqemu_phys_ram_size
);
2447 addr
= last_ram_offset
;
2448 last_ram_offset
= TARGET_PAGE_ALIGN(last_ram_offset
+ size
);
2453 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2455 RAMBlock
*new_block
;
2458 if (kqemu_phys_ram_base
) {
2459 return kqemu_ram_alloc(size
);
2463 size
= TARGET_PAGE_ALIGN(size
);
2464 new_block
= qemu_malloc(sizeof(*new_block
));
2466 new_block
->host
= qemu_vmalloc(size
);
2467 new_block
->offset
= last_ram_offset
;
2468 new_block
->length
= size
;
2470 new_block
->next
= ram_blocks
;
2471 ram_blocks
= new_block
;
2473 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2474 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2475 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2476 0xff, size
>> TARGET_PAGE_BITS
);
2478 last_ram_offset
+= size
;
2481 kvm_setup_guest_memory(new_block
->host
, size
);
2483 return new_block
->offset
;
2486 void qemu_ram_free(ram_addr_t addr
)
2488 /* TODO: implement this. */
2491 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2492 With the exception of the softmmu code in this file, this should
2493 only be used for local memory (e.g. video ram) that the device owns,
2494 and knows it isn't going to access beyond the end of the block.
2496 It should not be used for general purpose DMA.
2497 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2499 void *qemu_get_ram_ptr(ram_addr_t addr
)
2506 if (kqemu_phys_ram_base
) {
2507 return kqemu_phys_ram_base
+ addr
;
2512 prevp
= &ram_blocks
;
2514 while (block
&& (block
->offset
> addr
2515 || block
->offset
+ block
->length
<= addr
)) {
2517 prevp
= &prev
->next
;
2519 block
= block
->next
;
2522 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2525 /* Move this entry to to start of the list. */
2527 prev
->next
= block
->next
;
2528 block
->next
= *prevp
;
2531 return block
->host
+ (addr
- block
->offset
);
2534 /* Some of the softmmu routines need to translate from a host pointer
2535 (typically a TLB entry) back to a ram offset. */
2536 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2541 uint8_t *host
= ptr
;
2544 if (kqemu_phys_ram_base
) {
2545 return host
- kqemu_phys_ram_base
;
2550 prevp
= &ram_blocks
;
2552 while (block
&& (block
->host
> host
2553 || block
->host
+ block
->length
<= host
)) {
2555 prevp
= &prev
->next
;
2557 block
= block
->next
;
2560 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2563 return block
->offset
+ (host
- block
->host
);
2566 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2568 #ifdef DEBUG_UNASSIGNED
2569 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2571 #if defined(TARGET_SPARC)
2572 do_unassigned_access(addr
, 0, 0, 0, 1);
2577 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2579 #ifdef DEBUG_UNASSIGNED
2580 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2582 #if defined(TARGET_SPARC)
2583 do_unassigned_access(addr
, 0, 0, 0, 2);
2588 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2590 #ifdef DEBUG_UNASSIGNED
2591 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2593 #if defined(TARGET_SPARC)
2594 do_unassigned_access(addr
, 0, 0, 0, 4);
2599 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2601 #ifdef DEBUG_UNASSIGNED
2602 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2604 #if defined(TARGET_SPARC)
2605 do_unassigned_access(addr
, 1, 0, 0, 1);
2609 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2611 #ifdef DEBUG_UNASSIGNED
2612 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2614 #if defined(TARGET_SPARC)
2615 do_unassigned_access(addr
, 1, 0, 0, 2);
2619 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2621 #ifdef DEBUG_UNASSIGNED
2622 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2624 #if defined(TARGET_SPARC)
2625 do_unassigned_access(addr
, 1, 0, 0, 4);
2629 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2630 unassigned_mem_readb
,
2631 unassigned_mem_readw
,
2632 unassigned_mem_readl
,
2635 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2636 unassigned_mem_writeb
,
2637 unassigned_mem_writew
,
2638 unassigned_mem_writel
,
2641 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2645 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2646 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2647 #if !defined(CONFIG_USER_ONLY)
2648 tb_invalidate_phys_page_fast(ram_addr
, 1);
2649 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2652 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2654 if (cpu_single_env
->kqemu_enabled
&&
2655 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2656 kqemu_modify_page(cpu_single_env
, ram_addr
);
2658 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2659 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2660 /* we remove the notdirty callback only if the code has been
2662 if (dirty_flags
== 0xff)
2663 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2666 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2670 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2671 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2672 #if !defined(CONFIG_USER_ONLY)
2673 tb_invalidate_phys_page_fast(ram_addr
, 2);
2674 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2677 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2679 if (cpu_single_env
->kqemu_enabled
&&
2680 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2681 kqemu_modify_page(cpu_single_env
, ram_addr
);
2683 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2684 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2685 /* we remove the notdirty callback only if the code has been
2687 if (dirty_flags
== 0xff)
2688 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2691 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2695 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2696 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2697 #if !defined(CONFIG_USER_ONLY)
2698 tb_invalidate_phys_page_fast(ram_addr
, 4);
2699 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2702 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2704 if (cpu_single_env
->kqemu_enabled
&&
2705 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2706 kqemu_modify_page(cpu_single_env
, ram_addr
);
2708 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2709 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2710 /* we remove the notdirty callback only if the code has been
2712 if (dirty_flags
== 0xff)
2713 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2716 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2717 NULL
, /* never used */
2718 NULL
, /* never used */
2719 NULL
, /* never used */
2722 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2723 notdirty_mem_writeb
,
2724 notdirty_mem_writew
,
2725 notdirty_mem_writel
,
2728 /* Generate a debug exception if a watchpoint has been hit. */
2729 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2731 CPUState
*env
= cpu_single_env
;
2732 target_ulong pc
, cs_base
;
2733 TranslationBlock
*tb
;
2738 if (env
->watchpoint_hit
) {
2739 /* We re-entered the check after replacing the TB. Now raise
2740 * the debug interrupt so that is will trigger after the
2741 * current instruction. */
2742 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2745 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2746 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2747 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2748 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2749 wp
->flags
|= BP_WATCHPOINT_HIT
;
2750 if (!env
->watchpoint_hit
) {
2751 env
->watchpoint_hit
= wp
;
2752 tb
= tb_find_pc(env
->mem_io_pc
);
2754 cpu_abort(env
, "check_watchpoint: could not find TB for "
2755 "pc=%p", (void *)env
->mem_io_pc
);
2757 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2758 tb_phys_invalidate(tb
, -1);
2759 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2760 env
->exception_index
= EXCP_DEBUG
;
2762 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2763 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2765 cpu_resume_from_signal(env
, NULL
);
2768 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2773 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2774 so these check for a hit then pass through to the normal out-of-line
2776 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2778 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2779 return ldub_phys(addr
);
2782 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2784 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2785 return lduw_phys(addr
);
2788 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2790 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2791 return ldl_phys(addr
);
2794 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2797 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2798 stb_phys(addr
, val
);
2801 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2804 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2805 stw_phys(addr
, val
);
2808 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2811 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2812 stl_phys(addr
, val
);
2815 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2821 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2827 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2833 idx
= SUBPAGE_IDX(addr
);
2834 #if defined(DEBUG_SUBPAGE)
2835 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2836 mmio
, len
, addr
, idx
);
2838 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2839 addr
+ mmio
->region_offset
[idx
][0][len
]);
2844 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2845 uint32_t value
, unsigned int len
)
2849 idx
= SUBPAGE_IDX(addr
);
2850 #if defined(DEBUG_SUBPAGE)
2851 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2852 mmio
, len
, addr
, idx
, value
);
2854 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2855 addr
+ mmio
->region_offset
[idx
][1][len
],
2859 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2861 #if defined(DEBUG_SUBPAGE)
2862 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2865 return subpage_readlen(opaque
, addr
, 0);
2868 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2871 #if defined(DEBUG_SUBPAGE)
2872 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2874 subpage_writelen(opaque
, addr
, value
, 0);
2877 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2879 #if defined(DEBUG_SUBPAGE)
2880 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2883 return subpage_readlen(opaque
, addr
, 1);
2886 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2889 #if defined(DEBUG_SUBPAGE)
2890 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2892 subpage_writelen(opaque
, addr
, value
, 1);
2895 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2897 #if defined(DEBUG_SUBPAGE)
2898 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2901 return subpage_readlen(opaque
, addr
, 2);
2904 static void subpage_writel (void *opaque
,
2905 target_phys_addr_t addr
, uint32_t value
)
2907 #if defined(DEBUG_SUBPAGE)
2908 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2910 subpage_writelen(opaque
, addr
, value
, 2);
2913 static CPUReadMemoryFunc
*subpage_read
[] = {
2919 static CPUWriteMemoryFunc
*subpage_write
[] = {
2925 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2926 ram_addr_t memory
, ram_addr_t region_offset
)
2931 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2933 idx
= SUBPAGE_IDX(start
);
2934 eidx
= SUBPAGE_IDX(end
);
2935 #if defined(DEBUG_SUBPAGE)
2936 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2937 mmio
, start
, end
, idx
, eidx
, memory
);
2939 memory
>>= IO_MEM_SHIFT
;
2940 for (; idx
<= eidx
; idx
++) {
2941 for (i
= 0; i
< 4; i
++) {
2942 if (io_mem_read
[memory
][i
]) {
2943 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2944 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2945 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2947 if (io_mem_write
[memory
][i
]) {
2948 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2949 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2950 mmio
->region_offset
[idx
][1][i
] = region_offset
;
2958 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2959 ram_addr_t orig_memory
, ram_addr_t region_offset
)
2964 mmio
= qemu_mallocz(sizeof(subpage_t
));
2967 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
2968 #if defined(DEBUG_SUBPAGE)
2969 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2970 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2972 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2973 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
2979 static int get_free_io_mem_idx(void)
2983 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2984 if (!io_mem_used
[i
]) {
2992 /* mem_read and mem_write are arrays of functions containing the
2993 function to access byte (index 0), word (index 1) and dword (index
2994 2). Functions can be omitted with a NULL function pointer.
2995 If io_index is non zero, the corresponding io zone is
2996 modified. If it is zero, a new io zone is allocated. The return
2997 value can be used with cpu_register_physical_memory(). (-1) is
2998 returned if error. */
2999 static int cpu_register_io_memory_fixed(int io_index
,
3000 CPUReadMemoryFunc
**mem_read
,
3001 CPUWriteMemoryFunc
**mem_write
,
3004 int i
, subwidth
= 0;
3006 if (io_index
<= 0) {
3007 io_index
= get_free_io_mem_idx();
3011 io_index
>>= IO_MEM_SHIFT
;
3012 if (io_index
>= IO_MEM_NB_ENTRIES
)
3016 for(i
= 0;i
< 3; i
++) {
3017 if (!mem_read
[i
] || !mem_write
[i
])
3018 subwidth
= IO_MEM_SUBWIDTH
;
3019 io_mem_read
[io_index
][i
] = mem_read
[i
];
3020 io_mem_write
[io_index
][i
] = mem_write
[i
];
3022 io_mem_opaque
[io_index
] = opaque
;
3023 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3026 int cpu_register_io_memory(CPUReadMemoryFunc
**mem_read
,
3027 CPUWriteMemoryFunc
**mem_write
,
3030 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3033 void cpu_unregister_io_memory(int io_table_address
)
3036 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3038 for (i
=0;i
< 3; i
++) {
3039 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3040 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3042 io_mem_opaque
[io_index
] = NULL
;
3043 io_mem_used
[io_index
] = 0;
3046 static void io_mem_init(void)
3050 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3051 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3052 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3056 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3057 watch_mem_write
, NULL
);
3059 if (kqemu_phys_ram_base
) {
3060 /* alloc dirty bits array */
3061 phys_ram_dirty
= qemu_vmalloc(kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
3062 memset(phys_ram_dirty
, 0xff, kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
3067 #endif /* !defined(CONFIG_USER_ONLY) */
3069 /* physical memory access (slow version, mainly for debug) */
3070 #if defined(CONFIG_USER_ONLY)
3071 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3072 int len
, int is_write
)
3079 page
= addr
& TARGET_PAGE_MASK
;
3080 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3083 flags
= page_get_flags(page
);
3084 if (!(flags
& PAGE_VALID
))
3087 if (!(flags
& PAGE_WRITE
))
3089 /* XXX: this code should not depend on lock_user */
3090 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3091 /* FIXME - should this return an error rather than just fail? */
3094 unlock_user(p
, addr
, l
);
3096 if (!(flags
& PAGE_READ
))
3098 /* XXX: this code should not depend on lock_user */
3099 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3100 /* FIXME - should this return an error rather than just fail? */
3103 unlock_user(p
, addr
, 0);
3112 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3113 int len
, int is_write
)
3118 target_phys_addr_t page
;
3123 page
= addr
& TARGET_PAGE_MASK
;
3124 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3127 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3129 pd
= IO_MEM_UNASSIGNED
;
3131 pd
= p
->phys_offset
;
3135 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3136 target_phys_addr_t addr1
= addr
;
3137 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3139 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3140 /* XXX: could force cpu_single_env to NULL to avoid
3142 if (l
>= 4 && ((addr1
& 3) == 0)) {
3143 /* 32 bit write access */
3145 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3147 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3148 /* 16 bit write access */
3150 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3153 /* 8 bit write access */
3155 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3159 unsigned long addr1
;
3160 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3162 ptr
= qemu_get_ram_ptr(addr1
);
3163 memcpy(ptr
, buf
, l
);
3164 if (!cpu_physical_memory_is_dirty(addr1
)) {
3165 /* invalidate code */
3166 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3168 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3169 (0xff & ~CODE_DIRTY_FLAG
);
3173 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3174 !(pd
& IO_MEM_ROMD
)) {
3175 target_phys_addr_t addr1
= addr
;
3177 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3179 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3180 if (l
>= 4 && ((addr1
& 3) == 0)) {
3181 /* 32 bit read access */
3182 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3185 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3186 /* 16 bit read access */
3187 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3191 /* 8 bit read access */
3192 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3198 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3199 (addr
& ~TARGET_PAGE_MASK
);
3200 memcpy(buf
, ptr
, l
);
3209 /* used for ROM loading : can write in RAM and ROM */
3210 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3211 const uint8_t *buf
, int len
)
3215 target_phys_addr_t page
;
3220 page
= addr
& TARGET_PAGE_MASK
;
3221 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3224 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3226 pd
= IO_MEM_UNASSIGNED
;
3228 pd
= p
->phys_offset
;
3231 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3232 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3233 !(pd
& IO_MEM_ROMD
)) {
3236 unsigned long addr1
;
3237 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3239 ptr
= qemu_get_ram_ptr(addr1
);
3240 memcpy(ptr
, buf
, l
);
3250 target_phys_addr_t addr
;
3251 target_phys_addr_t len
;
3254 static BounceBuffer bounce
;
3256 typedef struct MapClient
{
3258 void (*callback
)(void *opaque
);
3259 LIST_ENTRY(MapClient
) link
;
3262 static LIST_HEAD(map_client_list
, MapClient
) map_client_list
3263 = LIST_HEAD_INITIALIZER(map_client_list
);
3265 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3267 MapClient
*client
= qemu_malloc(sizeof(*client
));
3269 client
->opaque
= opaque
;
3270 client
->callback
= callback
;
3271 LIST_INSERT_HEAD(&map_client_list
, client
, link
);
3275 void cpu_unregister_map_client(void *_client
)
3277 MapClient
*client
= (MapClient
*)_client
;
3279 LIST_REMOVE(client
, link
);
3282 static void cpu_notify_map_clients(void)
3286 while (!LIST_EMPTY(&map_client_list
)) {
3287 client
= LIST_FIRST(&map_client_list
);
3288 client
->callback(client
->opaque
);
3289 LIST_REMOVE(client
, link
);
3293 /* Map a physical memory region into a host virtual address.
3294 * May map a subset of the requested range, given by and returned in *plen.
3295 * May return NULL if resources needed to perform the mapping are exhausted.
3296 * Use only for reads OR writes - not for read-modify-write operations.
3297 * Use cpu_register_map_client() to know when retrying the map operation is
3298 * likely to succeed.
3300 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3301 target_phys_addr_t
*plen
,
3304 target_phys_addr_t len
= *plen
;
3305 target_phys_addr_t done
= 0;
3307 uint8_t *ret
= NULL
;
3309 target_phys_addr_t page
;
3312 unsigned long addr1
;
3315 page
= addr
& TARGET_PAGE_MASK
;
3316 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3319 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3321 pd
= IO_MEM_UNASSIGNED
;
3323 pd
= p
->phys_offset
;
3326 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3327 if (done
|| bounce
.buffer
) {
3330 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3334 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3336 ptr
= bounce
.buffer
;
3338 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3339 ptr
= qemu_get_ram_ptr(addr1
);
3343 } else if (ret
+ done
!= ptr
) {
3355 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3356 * Will also mark the memory as dirty if is_write == 1. access_len gives
3357 * the amount of memory that was actually read or written by the caller.
3359 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3360 int is_write
, target_phys_addr_t access_len
)
3362 if (buffer
!= bounce
.buffer
) {
3364 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3365 while (access_len
) {
3367 l
= TARGET_PAGE_SIZE
;
3370 if (!cpu_physical_memory_is_dirty(addr1
)) {
3371 /* invalidate code */
3372 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3374 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3375 (0xff & ~CODE_DIRTY_FLAG
);
3384 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3386 qemu_free(bounce
.buffer
);
3387 bounce
.buffer
= NULL
;
3388 cpu_notify_map_clients();
3391 /* warning: addr must be aligned */
3392 uint32_t ldl_phys(target_phys_addr_t addr
)
3400 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3402 pd
= IO_MEM_UNASSIGNED
;
3404 pd
= p
->phys_offset
;
3407 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3408 !(pd
& IO_MEM_ROMD
)) {
3410 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3412 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3413 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3416 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3417 (addr
& ~TARGET_PAGE_MASK
);
3423 /* warning: addr must be aligned */
3424 uint64_t ldq_phys(target_phys_addr_t addr
)
3432 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3434 pd
= IO_MEM_UNASSIGNED
;
3436 pd
= p
->phys_offset
;
3439 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3440 !(pd
& IO_MEM_ROMD
)) {
3442 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3444 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3445 #ifdef TARGET_WORDS_BIGENDIAN
3446 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3447 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3449 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3450 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3454 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3455 (addr
& ~TARGET_PAGE_MASK
);
3462 uint32_t ldub_phys(target_phys_addr_t addr
)
3465 cpu_physical_memory_read(addr
, &val
, 1);
3470 uint32_t lduw_phys(target_phys_addr_t addr
)
3473 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3474 return tswap16(val
);
3477 /* warning: addr must be aligned. The ram page is not masked as dirty
3478 and the code inside is not invalidated. It is useful if the dirty
3479 bits are used to track modified PTEs */
3480 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3487 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3489 pd
= IO_MEM_UNASSIGNED
;
3491 pd
= p
->phys_offset
;
3494 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3495 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3497 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3498 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3500 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3501 ptr
= qemu_get_ram_ptr(addr1
);
3504 if (unlikely(in_migration
)) {
3505 if (!cpu_physical_memory_is_dirty(addr1
)) {
3506 /* invalidate code */
3507 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3509 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3510 (0xff & ~CODE_DIRTY_FLAG
);
3516 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3523 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3525 pd
= IO_MEM_UNASSIGNED
;
3527 pd
= p
->phys_offset
;
3530 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3531 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3533 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3534 #ifdef TARGET_WORDS_BIGENDIAN
3535 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3536 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3538 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3539 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3542 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3543 (addr
& ~TARGET_PAGE_MASK
);
3548 /* warning: addr must be aligned */
3549 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3556 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3558 pd
= IO_MEM_UNASSIGNED
;
3560 pd
= p
->phys_offset
;
3563 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3564 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3566 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3567 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3569 unsigned long addr1
;
3570 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3572 ptr
= qemu_get_ram_ptr(addr1
);
3574 if (!cpu_physical_memory_is_dirty(addr1
)) {
3575 /* invalidate code */
3576 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3578 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3579 (0xff & ~CODE_DIRTY_FLAG
);
3585 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3588 cpu_physical_memory_write(addr
, &v
, 1);
3592 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3594 uint16_t v
= tswap16(val
);
3595 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3599 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3602 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3607 /* virtual memory access for debug (includes writing to ROM) */
3608 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3609 uint8_t *buf
, int len
, int is_write
)
3612 target_phys_addr_t phys_addr
;
3616 page
= addr
& TARGET_PAGE_MASK
;
3617 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3618 /* if no physical page mapped, return an error */
3619 if (phys_addr
== -1)
3621 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3624 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3625 #if !defined(CONFIG_USER_ONLY)
3627 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3630 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3638 /* in deterministic execution mode, instructions doing device I/Os
3639 must be at the end of the TB */
3640 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3642 TranslationBlock
*tb
;
3644 target_ulong pc
, cs_base
;
3647 tb
= tb_find_pc((unsigned long)retaddr
);
3649 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3652 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3653 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3654 /* Calculate how many instructions had been executed before the fault
3656 n
= n
- env
->icount_decr
.u16
.low
;
3657 /* Generate a new TB ending on the I/O insn. */
3659 /* On MIPS and SH, delay slot instructions can only be restarted if
3660 they were already the first instruction in the TB. If this is not
3661 the first instruction in a TB then re-execute the preceding
3663 #if defined(TARGET_MIPS)
3664 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3665 env
->active_tc
.PC
-= 4;
3666 env
->icount_decr
.u16
.low
++;
3667 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3669 #elif defined(TARGET_SH4)
3670 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3673 env
->icount_decr
.u16
.low
++;
3674 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3677 /* This should never happen. */
3678 if (n
> CF_COUNT_MASK
)
3679 cpu_abort(env
, "TB too big during recompile");
3681 cflags
= n
| CF_LAST_IO
;
3683 cs_base
= tb
->cs_base
;
3685 tb_phys_invalidate(tb
, -1);
3686 /* FIXME: In theory this could raise an exception. In practice
3687 we have already translated the block once so it's probably ok. */
3688 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3689 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3690 the first in the TB) then we end up generating a whole new TB and
3691 repeating the fault, which is horribly inefficient.
3692 Better would be to execute just this insn uncached, or generate a
3694 cpu_resume_from_signal(env
, NULL
);
3697 void dump_exec_info(FILE *f
,
3698 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3700 int i
, target_code_size
, max_target_code_size
;
3701 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3702 TranslationBlock
*tb
;
3704 target_code_size
= 0;
3705 max_target_code_size
= 0;
3707 direct_jmp_count
= 0;
3708 direct_jmp2_count
= 0;
3709 for(i
= 0; i
< nb_tbs
; i
++) {
3711 target_code_size
+= tb
->size
;
3712 if (tb
->size
> max_target_code_size
)
3713 max_target_code_size
= tb
->size
;
3714 if (tb
->page_addr
[1] != -1)
3716 if (tb
->tb_next_offset
[0] != 0xffff) {
3718 if (tb
->tb_next_offset
[1] != 0xffff) {
3719 direct_jmp2_count
++;
3723 /* XXX: avoid using doubles ? */
3724 cpu_fprintf(f
, "Translation buffer state:\n");
3725 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3726 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3727 cpu_fprintf(f
, "TB count %d/%d\n",
3728 nb_tbs
, code_gen_max_blocks
);
3729 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3730 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3731 max_target_code_size
);
3732 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3733 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3734 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3735 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3737 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3738 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3740 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3742 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3743 cpu_fprintf(f
, "\nStatistics:\n");
3744 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3745 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3746 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3747 tcg_dump_info(f
, cpu_fprintf
);
3750 #if !defined(CONFIG_USER_ONLY)
3752 #define MMUSUFFIX _cmmu
3753 #define GETPC() NULL
3754 #define env cpu_single_env
3755 #define SOFTMMU_CODE_ACCESS
3758 #include "softmmu_template.h"
3761 #include "softmmu_template.h"
3764 #include "softmmu_template.h"
3767 #include "softmmu_template.h"