2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
41 #if defined(CONFIG_USER_ONLY)
45 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_UNASSIGNED
50 /* make various TB consistency checks */
51 //#define DEBUG_TB_CHECK
52 //#define DEBUG_TLB_CHECK
54 //#define DEBUG_IOPORT
55 //#define DEBUG_SUBPAGE
57 #if !defined(CONFIG_USER_ONLY)
58 /* TB consistency checks only implemented for usermode emulation. */
62 #define SMC_BITMAP_USE_THRESHOLD 10
64 #if defined(TARGET_SPARC64)
65 #define TARGET_PHYS_ADDR_SPACE_BITS 41
66 #elif defined(TARGET_SPARC)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 36
68 #elif defined(TARGET_ALPHA)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 42
70 #define TARGET_VIRT_ADDR_SPACE_BITS 42
71 #elif defined(TARGET_PPC64)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
79 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 static TranslationBlock
*tbs
;
83 int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 uint8_t *phys_ram_dirty
;
115 static int in_migration
;
117 typedef struct RAMBlock
{
121 struct RAMBlock
*next
;
124 static RAMBlock
*ram_blocks
;
125 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126 then we can no longer assume contiguous ram offsets, and external uses
127 of this variable will break. */
128 ram_addr_t last_ram_offset
;
132 /* current CPU in the current thread. It is only valid inside
134 CPUState
*cpu_single_env
;
135 /* 0 = Do not count executed instructions.
136 1 = Precise instruction counting.
137 2 = Adaptive rate instruction counting. */
139 /* Current instruction counter. While executing translated code this may
140 include some instructions that have not yet been executed. */
143 typedef struct PageDesc
{
144 /* list of TBs intersecting this ram page */
145 TranslationBlock
*first_tb
;
146 /* in order to optimize self modifying code, we count the number
147 of lookups we do to a given page to use a bitmap */
148 unsigned int code_write_count
;
149 uint8_t *code_bitmap
;
150 #if defined(CONFIG_USER_ONLY)
155 typedef struct PhysPageDesc
{
156 /* offset in host memory of the page + io_index in the low bits */
157 ram_addr_t phys_offset
;
158 ram_addr_t region_offset
;
162 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163 /* XXX: this is a temporary hack for alpha target.
164 * In the future, this is to be replaced by a multi-level table
165 * to actually be able to handle the complete 64 bits address space.
167 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
169 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
172 #define L1_SIZE (1 << L1_BITS)
173 #define L2_SIZE (1 << L2_BITS)
175 unsigned long qemu_real_host_page_size
;
176 unsigned long qemu_host_page_bits
;
177 unsigned long qemu_host_page_size
;
178 unsigned long qemu_host_page_mask
;
180 /* XXX: for system emulation, it could just be an array */
181 static PageDesc
*l1_map
[L1_SIZE
];
182 static PhysPageDesc
**l1_phys_map
;
184 #if !defined(CONFIG_USER_ONLY)
185 static void io_mem_init(void);
187 /* io memory support */
188 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
189 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
190 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
191 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
192 static int io_mem_watch
;
196 static const char *logfilename
= "/tmp/qemu.log";
199 static int log_append
= 0;
202 static int tlb_flush_count
;
203 static int tb_flush_count
;
204 static int tb_phys_invalidate_count
;
206 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
207 typedef struct subpage_t
{
208 target_phys_addr_t base
;
209 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
210 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
211 void *opaque
[TARGET_PAGE_SIZE
][2][4];
212 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
216 static void map_exec(void *addr
, long size
)
219 VirtualProtect(addr
, size
,
220 PAGE_EXECUTE_READWRITE
, &old_protect
);
224 static void map_exec(void *addr
, long size
)
226 unsigned long start
, end
, page_size
;
228 page_size
= getpagesize();
229 start
= (unsigned long)addr
;
230 start
&= ~(page_size
- 1);
232 end
= (unsigned long)addr
+ size
;
233 end
+= page_size
- 1;
234 end
&= ~(page_size
- 1);
236 mprotect((void *)start
, end
- start
,
237 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
241 static void page_init(void)
243 /* NOTE: we can always suppose that qemu_host_page_size >=
247 SYSTEM_INFO system_info
;
249 GetSystemInfo(&system_info
);
250 qemu_real_host_page_size
= system_info
.dwPageSize
;
253 qemu_real_host_page_size
= getpagesize();
255 if (qemu_host_page_size
== 0)
256 qemu_host_page_size
= qemu_real_host_page_size
;
257 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
258 qemu_host_page_size
= TARGET_PAGE_SIZE
;
259 qemu_host_page_bits
= 0;
260 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
261 qemu_host_page_bits
++;
262 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
263 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
264 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
266 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
268 long long startaddr
, endaddr
;
273 last_brk
= (unsigned long)sbrk(0);
274 f
= fopen("/proc/self/maps", "r");
277 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
279 startaddr
= MIN(startaddr
,
280 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
281 endaddr
= MIN(endaddr
,
282 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
283 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
284 TARGET_PAGE_ALIGN(endaddr
),
295 static inline PageDesc
**page_l1_map(target_ulong index
)
297 #if TARGET_LONG_BITS > 32
298 /* Host memory outside guest VM. For 32-bit targets we have already
299 excluded high addresses. */
300 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
303 return &l1_map
[index
>> L2_BITS
];
306 static inline PageDesc
*page_find_alloc(target_ulong index
)
309 lp
= page_l1_map(index
);
315 /* allocate if not found */
316 #if defined(CONFIG_USER_ONLY)
317 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
318 /* Don't use qemu_malloc because it may recurse. */
319 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
320 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
323 unsigned long addr
= h2g(p
);
324 page_set_flags(addr
& TARGET_PAGE_MASK
,
325 TARGET_PAGE_ALIGN(addr
+ len
),
329 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
333 return p
+ (index
& (L2_SIZE
- 1));
336 static inline PageDesc
*page_find(target_ulong index
)
339 lp
= page_l1_map(index
);
346 return p
+ (index
& (L2_SIZE
- 1));
349 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
354 p
= (void **)l1_phys_map
;
355 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
357 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
360 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
363 /* allocate if not found */
366 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
367 memset(p
, 0, sizeof(void *) * L1_SIZE
);
371 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
375 /* allocate if not found */
378 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
380 for (i
= 0; i
< L2_SIZE
; i
++) {
381 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
382 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
385 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
388 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
390 return phys_page_find_alloc(index
, 0);
393 #if !defined(CONFIG_USER_ONLY)
394 static void tlb_protect_code(ram_addr_t ram_addr
);
395 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
397 #define mmap_lock() do { } while(0)
398 #define mmap_unlock() do { } while(0)
401 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
403 #if defined(CONFIG_USER_ONLY)
404 /* Currently it is not recommended to allocate big chunks of data in
405 user mode. It will change when a dedicated libc will be used */
406 #define USE_STATIC_CODE_GEN_BUFFER
409 #ifdef USE_STATIC_CODE_GEN_BUFFER
410 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
413 static void code_gen_alloc(unsigned long tb_size
)
415 #ifdef USE_STATIC_CODE_GEN_BUFFER
416 code_gen_buffer
= static_code_gen_buffer
;
417 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
418 map_exec(code_gen_buffer
, code_gen_buffer_size
);
420 code_gen_buffer_size
= tb_size
;
421 if (code_gen_buffer_size
== 0) {
422 #if defined(CONFIG_USER_ONLY)
423 /* in user mode, phys_ram_size is not meaningful */
424 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
426 /* XXX: needs adjustments */
427 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
430 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
431 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
432 /* The code gen buffer location may have constraints depending on
433 the host cpu and OS */
434 #if defined(__linux__)
439 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
440 #if defined(__x86_64__)
442 /* Cannot map more than that */
443 if (code_gen_buffer_size
> (800 * 1024 * 1024))
444 code_gen_buffer_size
= (800 * 1024 * 1024);
445 #elif defined(__sparc_v9__)
446 // Map the buffer below 2G, so we can use direct calls and branches
448 start
= (void *) 0x60000000UL
;
449 if (code_gen_buffer_size
> (512 * 1024 * 1024))
450 code_gen_buffer_size
= (512 * 1024 * 1024);
451 #elif defined(__arm__)
452 /* Map the buffer below 32M, so we can use direct calls and branches */
454 start
= (void *) 0x01000000UL
;
455 if (code_gen_buffer_size
> 16 * 1024 * 1024)
456 code_gen_buffer_size
= 16 * 1024 * 1024;
458 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
459 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
461 if (code_gen_buffer
== MAP_FAILED
) {
462 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
466 #elif defined(__FreeBSD__) || defined(__DragonFly__)
470 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
471 #if defined(__x86_64__)
472 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
473 * 0x40000000 is free */
475 addr
= (void *)0x40000000;
476 /* Cannot map more than that */
477 if (code_gen_buffer_size
> (800 * 1024 * 1024))
478 code_gen_buffer_size
= (800 * 1024 * 1024);
480 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
481 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
483 if (code_gen_buffer
== MAP_FAILED
) {
484 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
489 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
490 map_exec(code_gen_buffer
, code_gen_buffer_size
);
492 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
493 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
494 code_gen_buffer_max_size
= code_gen_buffer_size
-
495 code_gen_max_block_size();
496 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
497 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
500 /* Must be called before using the QEMU cpus. 'tb_size' is the size
501 (in bytes) allocated to the translation buffer. Zero means default
503 void cpu_exec_init_all(unsigned long tb_size
)
506 code_gen_alloc(tb_size
);
507 code_gen_ptr
= code_gen_buffer
;
509 #if !defined(CONFIG_USER_ONLY)
514 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
516 #define CPU_COMMON_SAVE_VERSION 1
518 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
520 CPUState
*env
= opaque
;
522 cpu_synchronize_state(env
, 0);
524 qemu_put_be32s(f
, &env
->halted
);
525 qemu_put_be32s(f
, &env
->interrupt_request
);
528 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
530 CPUState
*env
= opaque
;
532 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
535 qemu_get_be32s(f
, &env
->halted
);
536 qemu_get_be32s(f
, &env
->interrupt_request
);
537 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
538 version_id is increased. */
539 env
->interrupt_request
&= ~0x01;
541 cpu_synchronize_state(env
, 1);
547 CPUState
*qemu_get_cpu(int cpu
)
549 CPUState
*env
= first_cpu
;
552 if (env
->cpu_index
== cpu
)
560 void cpu_exec_init(CPUState
*env
)
565 #if defined(CONFIG_USER_ONLY)
568 env
->next_cpu
= NULL
;
571 while (*penv
!= NULL
) {
572 penv
= &(*penv
)->next_cpu
;
575 env
->cpu_index
= cpu_index
;
577 TAILQ_INIT(&env
->breakpoints
);
578 TAILQ_INIT(&env
->watchpoints
);
580 #if defined(CONFIG_USER_ONLY)
583 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
584 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
585 cpu_common_save
, cpu_common_load
, env
);
586 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
587 cpu_save
, cpu_load
, env
);
591 static inline void invalidate_page_bitmap(PageDesc
*p
)
593 if (p
->code_bitmap
) {
594 qemu_free(p
->code_bitmap
);
595 p
->code_bitmap
= NULL
;
597 p
->code_write_count
= 0;
600 /* set to NULL all the 'first_tb' fields in all PageDescs */
601 static void page_flush_tb(void)
606 for(i
= 0; i
< L1_SIZE
; i
++) {
609 for(j
= 0; j
< L2_SIZE
; j
++) {
611 invalidate_page_bitmap(p
);
618 /* flush all the translation blocks */
619 /* XXX: tb_flush is currently not thread safe */
620 void tb_flush(CPUState
*env1
)
623 #if defined(DEBUG_FLUSH)
624 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
625 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
627 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
629 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
630 cpu_abort(env1
, "Internal error: code buffer overflow\n");
634 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
635 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
638 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
641 code_gen_ptr
= code_gen_buffer
;
642 /* XXX: flush processor icache at this point if cache flush is
647 #ifdef DEBUG_TB_CHECK
649 static void tb_invalidate_check(target_ulong address
)
651 TranslationBlock
*tb
;
653 address
&= TARGET_PAGE_MASK
;
654 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
655 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
656 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
657 address
>= tb
->pc
+ tb
->size
)) {
658 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
659 address
, (long)tb
->pc
, tb
->size
);
665 /* verify that all the pages have correct rights for code */
666 static void tb_page_check(void)
668 TranslationBlock
*tb
;
669 int i
, flags1
, flags2
;
671 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
672 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
673 flags1
= page_get_flags(tb
->pc
);
674 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
675 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
676 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
677 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
683 static void tb_jmp_check(TranslationBlock
*tb
)
685 TranslationBlock
*tb1
;
688 /* suppress any remaining jumps to this TB */
692 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
695 tb1
= tb1
->jmp_next
[n1
];
697 /* check end of list */
699 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
705 /* invalidate one TB */
706 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
709 TranslationBlock
*tb1
;
713 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
716 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
720 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
722 TranslationBlock
*tb1
;
728 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
730 *ptb
= tb1
->page_next
[n1
];
733 ptb
= &tb1
->page_next
[n1
];
737 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
739 TranslationBlock
*tb1
, **ptb
;
742 ptb
= &tb
->jmp_next
[n
];
745 /* find tb(n) in circular list */
749 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
750 if (n1
== n
&& tb1
== tb
)
753 ptb
= &tb1
->jmp_first
;
755 ptb
= &tb1
->jmp_next
[n1
];
758 /* now we can suppress tb(n) from the list */
759 *ptb
= tb
->jmp_next
[n
];
761 tb
->jmp_next
[n
] = NULL
;
765 /* reset the jump entry 'n' of a TB so that it is not chained to
767 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
769 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
772 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
777 target_phys_addr_t phys_pc
;
778 TranslationBlock
*tb1
, *tb2
;
780 /* remove the TB from the hash list */
781 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
782 h
= tb_phys_hash_func(phys_pc
);
783 tb_remove(&tb_phys_hash
[h
], tb
,
784 offsetof(TranslationBlock
, phys_hash_next
));
786 /* remove the TB from the page list */
787 if (tb
->page_addr
[0] != page_addr
) {
788 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
789 tb_page_remove(&p
->first_tb
, tb
);
790 invalidate_page_bitmap(p
);
792 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
793 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
794 tb_page_remove(&p
->first_tb
, tb
);
795 invalidate_page_bitmap(p
);
798 tb_invalidated_flag
= 1;
800 /* remove the TB from the hash list */
801 h
= tb_jmp_cache_hash_func(tb
->pc
);
802 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
803 if (env
->tb_jmp_cache
[h
] == tb
)
804 env
->tb_jmp_cache
[h
] = NULL
;
807 /* suppress this TB from the two jump lists */
808 tb_jmp_remove(tb
, 0);
809 tb_jmp_remove(tb
, 1);
811 /* suppress any remaining jumps to this TB */
817 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
818 tb2
= tb1
->jmp_next
[n1
];
819 tb_reset_jump(tb1
, n1
);
820 tb1
->jmp_next
[n1
] = NULL
;
823 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
825 tb_phys_invalidate_count
++;
828 static inline void set_bits(uint8_t *tab
, int start
, int len
)
834 mask
= 0xff << (start
& 7);
835 if ((start
& ~7) == (end
& ~7)) {
837 mask
&= ~(0xff << (end
& 7));
842 start
= (start
+ 8) & ~7;
844 while (start
< end1
) {
849 mask
= ~(0xff << (end
& 7));
855 static void build_page_bitmap(PageDesc
*p
)
857 int n
, tb_start
, tb_end
;
858 TranslationBlock
*tb
;
860 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
865 tb
= (TranslationBlock
*)((long)tb
& ~3);
866 /* NOTE: this is subtle as a TB may span two physical pages */
868 /* NOTE: tb_end may be after the end of the page, but
869 it is not a problem */
870 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
871 tb_end
= tb_start
+ tb
->size
;
872 if (tb_end
> TARGET_PAGE_SIZE
)
873 tb_end
= TARGET_PAGE_SIZE
;
876 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
878 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
879 tb
= tb
->page_next
[n
];
883 TranslationBlock
*tb_gen_code(CPUState
*env
,
884 target_ulong pc
, target_ulong cs_base
,
885 int flags
, int cflags
)
887 TranslationBlock
*tb
;
889 target_ulong phys_pc
, phys_page2
, virt_page2
;
892 phys_pc
= get_phys_addr_code(env
, pc
);
895 /* flush must be done */
897 /* cannot fail at this point */
899 /* Don't forget to invalidate previous TB info. */
900 tb_invalidated_flag
= 1;
902 tc_ptr
= code_gen_ptr
;
904 tb
->cs_base
= cs_base
;
907 cpu_gen_code(env
, tb
, &code_gen_size
);
908 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
910 /* check next page if needed */
911 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
913 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
914 phys_page2
= get_phys_addr_code(env
, virt_page2
);
916 tb_link_phys(tb
, phys_pc
, phys_page2
);
920 /* invalidate all TBs which intersect with the target physical page
921 starting in range [start;end[. NOTE: start and end must refer to
922 the same physical page. 'is_cpu_write_access' should be true if called
923 from a real cpu write access: the virtual CPU will exit the current
924 TB if code is modified inside this TB. */
925 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
926 int is_cpu_write_access
)
928 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
929 CPUState
*env
= cpu_single_env
;
930 target_ulong tb_start
, tb_end
;
933 #ifdef TARGET_HAS_PRECISE_SMC
934 int current_tb_not_found
= is_cpu_write_access
;
935 TranslationBlock
*current_tb
= NULL
;
936 int current_tb_modified
= 0;
937 target_ulong current_pc
= 0;
938 target_ulong current_cs_base
= 0;
939 int current_flags
= 0;
940 #endif /* TARGET_HAS_PRECISE_SMC */
942 p
= page_find(start
>> TARGET_PAGE_BITS
);
945 if (!p
->code_bitmap
&&
946 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
947 is_cpu_write_access
) {
948 /* build code bitmap */
949 build_page_bitmap(p
);
952 /* we remove all the TBs in the range [start, end[ */
953 /* XXX: see if in some cases it could be faster to invalidate all the code */
957 tb
= (TranslationBlock
*)((long)tb
& ~3);
958 tb_next
= tb
->page_next
[n
];
959 /* NOTE: this is subtle as a TB may span two physical pages */
961 /* NOTE: tb_end may be after the end of the page, but
962 it is not a problem */
963 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
964 tb_end
= tb_start
+ tb
->size
;
966 tb_start
= tb
->page_addr
[1];
967 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
969 if (!(tb_end
<= start
|| tb_start
>= end
)) {
970 #ifdef TARGET_HAS_PRECISE_SMC
971 if (current_tb_not_found
) {
972 current_tb_not_found
= 0;
974 if (env
->mem_io_pc
) {
975 /* now we have a real cpu fault */
976 current_tb
= tb_find_pc(env
->mem_io_pc
);
979 if (current_tb
== tb
&&
980 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
981 /* If we are modifying the current TB, we must stop
982 its execution. We could be more precise by checking
983 that the modification is after the current PC, but it
984 would require a specialized function to partially
985 restore the CPU state */
987 current_tb_modified
= 1;
988 cpu_restore_state(current_tb
, env
,
989 env
->mem_io_pc
, NULL
);
990 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
993 #endif /* TARGET_HAS_PRECISE_SMC */
994 /* we need to do that to handle the case where a signal
995 occurs while doing tb_phys_invalidate() */
998 saved_tb
= env
->current_tb
;
999 env
->current_tb
= NULL
;
1001 tb_phys_invalidate(tb
, -1);
1003 env
->current_tb
= saved_tb
;
1004 if (env
->interrupt_request
&& env
->current_tb
)
1005 cpu_interrupt(env
, env
->interrupt_request
);
1010 #if !defined(CONFIG_USER_ONLY)
1011 /* if no code remaining, no need to continue to use slow writes */
1013 invalidate_page_bitmap(p
);
1014 if (is_cpu_write_access
) {
1015 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1019 #ifdef TARGET_HAS_PRECISE_SMC
1020 if (current_tb_modified
) {
1021 /* we generate a block containing just the instruction
1022 modifying the memory. It will ensure that it cannot modify
1024 env
->current_tb
= NULL
;
1025 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1026 cpu_resume_from_signal(env
, NULL
);
1031 /* len must be <= 8 and start must be a multiple of len */
1032 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1038 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1039 cpu_single_env
->mem_io_vaddr
, len
,
1040 cpu_single_env
->eip
,
1041 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1044 p
= page_find(start
>> TARGET_PAGE_BITS
);
1047 if (p
->code_bitmap
) {
1048 offset
= start
& ~TARGET_PAGE_MASK
;
1049 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1050 if (b
& ((1 << len
) - 1))
1054 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1058 #if !defined(CONFIG_SOFTMMU)
1059 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1060 unsigned long pc
, void *puc
)
1062 TranslationBlock
*tb
;
1065 #ifdef TARGET_HAS_PRECISE_SMC
1066 TranslationBlock
*current_tb
= NULL
;
1067 CPUState
*env
= cpu_single_env
;
1068 int current_tb_modified
= 0;
1069 target_ulong current_pc
= 0;
1070 target_ulong current_cs_base
= 0;
1071 int current_flags
= 0;
1074 addr
&= TARGET_PAGE_MASK
;
1075 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1079 #ifdef TARGET_HAS_PRECISE_SMC
1080 if (tb
&& pc
!= 0) {
1081 current_tb
= tb_find_pc(pc
);
1084 while (tb
!= NULL
) {
1086 tb
= (TranslationBlock
*)((long)tb
& ~3);
1087 #ifdef TARGET_HAS_PRECISE_SMC
1088 if (current_tb
== tb
&&
1089 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
1096 current_tb_modified
= 1;
1097 cpu_restore_state(current_tb
, env
, pc
, puc
);
1098 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1101 #endif /* TARGET_HAS_PRECISE_SMC */
1102 tb_phys_invalidate(tb
, addr
);
1103 tb
= tb
->page_next
[n
];
1106 #ifdef TARGET_HAS_PRECISE_SMC
1107 if (current_tb_modified
) {
1108 /* we generate a block containing just the instruction
1109 modifying the memory. It will ensure that it cannot modify
1111 env
->current_tb
= NULL
;
1112 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1113 cpu_resume_from_signal(env
, puc
);
1119 /* add the tb in the target page and protect it if necessary */
1120 static inline void tb_alloc_page(TranslationBlock
*tb
,
1121 unsigned int n
, target_ulong page_addr
)
1124 TranslationBlock
*last_first_tb
;
1126 tb
->page_addr
[n
] = page_addr
;
1127 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1128 tb
->page_next
[n
] = p
->first_tb
;
1129 last_first_tb
= p
->first_tb
;
1130 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1131 invalidate_page_bitmap(p
);
1133 #if defined(TARGET_HAS_SMC) || 1
1135 #if defined(CONFIG_USER_ONLY)
1136 if (p
->flags
& PAGE_WRITE
) {
1141 /* force the host page as non writable (writes will have a
1142 page fault + mprotect overhead) */
1143 page_addr
&= qemu_host_page_mask
;
1145 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1146 addr
+= TARGET_PAGE_SIZE
) {
1148 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1152 p2
->flags
&= ~PAGE_WRITE
;
1153 page_get_flags(addr
);
1155 mprotect(g2h(page_addr
), qemu_host_page_size
,
1156 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1157 #ifdef DEBUG_TB_INVALIDATE
1158 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1163 /* if some code is already present, then the pages are already
1164 protected. So we handle the case where only the first TB is
1165 allocated in a physical page */
1166 if (!last_first_tb
) {
1167 tlb_protect_code(page_addr
);
1171 #endif /* TARGET_HAS_SMC */
1174 /* Allocate a new translation block. Flush the translation buffer if
1175 too many translation blocks or too much generated code. */
1176 TranslationBlock
*tb_alloc(target_ulong pc
)
1178 TranslationBlock
*tb
;
1180 if (nb_tbs
>= code_gen_max_blocks
||
1181 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1183 tb
= &tbs
[nb_tbs
++];
1189 void tb_free(TranslationBlock
*tb
)
1191 /* In practice this is mostly used for single use temporary TB
1192 Ignore the hard cases and just back up if this TB happens to
1193 be the last one generated. */
1194 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1195 code_gen_ptr
= tb
->tc_ptr
;
1200 /* add a new TB and link it to the physical page tables. phys_page2 is
1201 (-1) to indicate that only one page contains the TB. */
1202 void tb_link_phys(TranslationBlock
*tb
,
1203 target_ulong phys_pc
, target_ulong phys_page2
)
1206 TranslationBlock
**ptb
;
1208 /* Grab the mmap lock to stop another thread invalidating this TB
1209 before we are done. */
1211 /* add in the physical hash table */
1212 h
= tb_phys_hash_func(phys_pc
);
1213 ptb
= &tb_phys_hash
[h
];
1214 tb
->phys_hash_next
= *ptb
;
1217 /* add in the page list */
1218 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1219 if (phys_page2
!= -1)
1220 tb_alloc_page(tb
, 1, phys_page2
);
1222 tb
->page_addr
[1] = -1;
1224 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1225 tb
->jmp_next
[0] = NULL
;
1226 tb
->jmp_next
[1] = NULL
;
1228 /* init original jump addresses */
1229 if (tb
->tb_next_offset
[0] != 0xffff)
1230 tb_reset_jump(tb
, 0);
1231 if (tb
->tb_next_offset
[1] != 0xffff)
1232 tb_reset_jump(tb
, 1);
1234 #ifdef DEBUG_TB_CHECK
1240 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1241 tb[1].tc_ptr. Return NULL if not found */
1242 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1244 int m_min
, m_max
, m
;
1246 TranslationBlock
*tb
;
1250 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1251 tc_ptr
>= (unsigned long)code_gen_ptr
)
1253 /* binary search (cf Knuth) */
1256 while (m_min
<= m_max
) {
1257 m
= (m_min
+ m_max
) >> 1;
1259 v
= (unsigned long)tb
->tc_ptr
;
1262 else if (tc_ptr
< v
) {
1271 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1273 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1275 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1278 tb1
= tb
->jmp_next
[n
];
1280 /* find head of list */
1283 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1286 tb1
= tb1
->jmp_next
[n1
];
1288 /* we are now sure now that tb jumps to tb1 */
1291 /* remove tb from the jmp_first list */
1292 ptb
= &tb_next
->jmp_first
;
1296 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1297 if (n1
== n
&& tb1
== tb
)
1299 ptb
= &tb1
->jmp_next
[n1
];
1301 *ptb
= tb
->jmp_next
[n
];
1302 tb
->jmp_next
[n
] = NULL
;
1304 /* suppress the jump to next tb in generated code */
1305 tb_reset_jump(tb
, n
);
1307 /* suppress jumps in the tb on which we could have jumped */
1308 tb_reset_jump_recursive(tb_next
);
1312 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1314 tb_reset_jump_recursive2(tb
, 0);
1315 tb_reset_jump_recursive2(tb
, 1);
1318 #if defined(TARGET_HAS_ICE)
1319 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1321 target_phys_addr_t addr
;
1323 ram_addr_t ram_addr
;
1326 addr
= cpu_get_phys_page_debug(env
, pc
);
1327 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1329 pd
= IO_MEM_UNASSIGNED
;
1331 pd
= p
->phys_offset
;
1333 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1334 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1338 /* Add a watchpoint. */
1339 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1340 int flags
, CPUWatchpoint
**watchpoint
)
1342 target_ulong len_mask
= ~(len
- 1);
1345 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1346 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1347 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1348 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1351 wp
= qemu_malloc(sizeof(*wp
));
1354 wp
->len_mask
= len_mask
;
1357 /* keep all GDB-injected watchpoints in front */
1359 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1361 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1363 tlb_flush_page(env
, addr
);
1370 /* Remove a specific watchpoint. */
1371 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1374 target_ulong len_mask
= ~(len
- 1);
1377 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1378 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1379 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1380 cpu_watchpoint_remove_by_ref(env
, wp
);
1387 /* Remove a specific watchpoint by reference. */
1388 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1390 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1392 tlb_flush_page(env
, watchpoint
->vaddr
);
1394 qemu_free(watchpoint
);
1397 /* Remove all matching watchpoints. */
1398 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1400 CPUWatchpoint
*wp
, *next
;
1402 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1403 if (wp
->flags
& mask
)
1404 cpu_watchpoint_remove_by_ref(env
, wp
);
1408 /* Add a breakpoint. */
1409 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1410 CPUBreakpoint
**breakpoint
)
1412 #if defined(TARGET_HAS_ICE)
1415 bp
= qemu_malloc(sizeof(*bp
));
1420 /* keep all GDB-injected breakpoints in front */
1422 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1424 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1426 breakpoint_invalidate(env
, pc
);
1436 /* Remove a specific breakpoint. */
1437 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1439 #if defined(TARGET_HAS_ICE)
1442 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1443 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1444 cpu_breakpoint_remove_by_ref(env
, bp
);
1454 /* Remove a specific breakpoint by reference. */
1455 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1457 #if defined(TARGET_HAS_ICE)
1458 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1460 breakpoint_invalidate(env
, breakpoint
->pc
);
1462 qemu_free(breakpoint
);
1466 /* Remove all matching breakpoints. */
1467 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1469 #if defined(TARGET_HAS_ICE)
1470 CPUBreakpoint
*bp
, *next
;
1472 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1473 if (bp
->flags
& mask
)
1474 cpu_breakpoint_remove_by_ref(env
, bp
);
1479 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1480 CPU loop after each instruction */
1481 void cpu_single_step(CPUState
*env
, int enabled
)
1483 #if defined(TARGET_HAS_ICE)
1484 if (env
->singlestep_enabled
!= enabled
) {
1485 env
->singlestep_enabled
= enabled
;
1487 kvm_update_guest_debug(env
, 0);
1489 /* must flush all the translated code to avoid inconsistencies */
1490 /* XXX: only flush what is necessary */
1497 /* enable or disable low levels log */
1498 void cpu_set_log(int log_flags
)
1500 loglevel
= log_flags
;
1501 if (loglevel
&& !logfile
) {
1502 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1504 perror(logfilename
);
1507 #if !defined(CONFIG_SOFTMMU)
1508 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1510 static char logfile_buf
[4096];
1511 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1514 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1518 if (!loglevel
&& logfile
) {
1524 void cpu_set_log_filename(const char *filename
)
1526 logfilename
= strdup(filename
);
1531 cpu_set_log(loglevel
);
1534 static void cpu_unlink_tb(CPUState
*env
)
1536 #if defined(USE_NPTL)
1537 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1538 problem and hope the cpu will stop of its own accord. For userspace
1539 emulation this often isn't actually as bad as it sounds. Often
1540 signals are used primarily to interrupt blocking syscalls. */
1542 TranslationBlock
*tb
;
1543 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1545 tb
= env
->current_tb
;
1546 /* if the cpu is currently executing code, we must unlink it and
1547 all the potentially executing TB */
1548 if (tb
&& !testandset(&interrupt_lock
)) {
1549 env
->current_tb
= NULL
;
1550 tb_reset_jump_recursive(tb
);
1551 resetlock(&interrupt_lock
);
1556 /* mask must never be zero, except for A20 change call */
1557 void cpu_interrupt(CPUState
*env
, int mask
)
1561 old_mask
= env
->interrupt_request
;
1562 env
->interrupt_request
|= mask
;
1564 #ifndef CONFIG_USER_ONLY
1566 * If called from iothread context, wake the target cpu in
1569 if (!qemu_cpu_self(env
)) {
1576 env
->icount_decr
.u16
.high
= 0xffff;
1577 #ifndef CONFIG_USER_ONLY
1579 && (mask
& ~old_mask
) != 0) {
1580 cpu_abort(env
, "Raised interrupt while not in I/O function");
1588 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1590 env
->interrupt_request
&= ~mask
;
1593 void cpu_exit(CPUState
*env
)
1595 env
->exit_request
= 1;
1599 const CPULogItem cpu_log_items
[] = {
1600 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1601 "show generated host assembly code for each compiled TB" },
1602 { CPU_LOG_TB_IN_ASM
, "in_asm",
1603 "show target assembly code for each compiled TB" },
1604 { CPU_LOG_TB_OP
, "op",
1605 "show micro ops for each compiled TB" },
1606 { CPU_LOG_TB_OP_OPT
, "op_opt",
1609 "before eflags optimization and "
1611 "after liveness analysis" },
1612 { CPU_LOG_INT
, "int",
1613 "show interrupts/exceptions in short format" },
1614 { CPU_LOG_EXEC
, "exec",
1615 "show trace before each executed TB (lots of logs)" },
1616 { CPU_LOG_TB_CPU
, "cpu",
1617 "show CPU state before block translation" },
1619 { CPU_LOG_PCALL
, "pcall",
1620 "show protected mode far calls/returns/exceptions" },
1621 { CPU_LOG_RESET
, "cpu_reset",
1622 "show CPU state before CPU resets" },
1625 { CPU_LOG_IOPORT
, "ioport",
1626 "show all i/o ports accesses" },
1631 static int cmp1(const char *s1
, int n
, const char *s2
)
1633 if (strlen(s2
) != n
)
1635 return memcmp(s1
, s2
, n
) == 0;
1638 /* takes a comma separated list of log masks. Return 0 if error. */
1639 int cpu_str_to_log_mask(const char *str
)
1641 const CPULogItem
*item
;
1648 p1
= strchr(p
, ',');
1651 if(cmp1(p
,p1
-p
,"all")) {
1652 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1656 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1657 if (cmp1(p
, p1
- p
, item
->name
))
1671 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1678 fprintf(stderr
, "qemu: fatal: ");
1679 vfprintf(stderr
, fmt
, ap
);
1680 fprintf(stderr
, "\n");
1682 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1684 cpu_dump_state(env
, stderr
, fprintf
, 0);
1686 if (qemu_log_enabled()) {
1687 qemu_log("qemu: fatal: ");
1688 qemu_log_vprintf(fmt
, ap2
);
1691 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1693 log_cpu_state(env
, 0);
1703 CPUState
*cpu_copy(CPUState
*env
)
1705 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1706 CPUState
*next_cpu
= new_env
->next_cpu
;
1707 int cpu_index
= new_env
->cpu_index
;
1708 #if defined(TARGET_HAS_ICE)
1713 memcpy(new_env
, env
, sizeof(CPUState
));
1715 /* Preserve chaining and index. */
1716 new_env
->next_cpu
= next_cpu
;
1717 new_env
->cpu_index
= cpu_index
;
1719 /* Clone all break/watchpoints.
1720 Note: Once we support ptrace with hw-debug register access, make sure
1721 BP_CPU break/watchpoints are handled correctly on clone. */
1722 TAILQ_INIT(&env
->breakpoints
);
1723 TAILQ_INIT(&env
->watchpoints
);
1724 #if defined(TARGET_HAS_ICE)
1725 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1726 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1728 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1729 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1737 #if !defined(CONFIG_USER_ONLY)
1739 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1743 /* Discard jump cache entries for any tb which might potentially
1744 overlap the flushed page. */
1745 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1746 memset (&env
->tb_jmp_cache
[i
], 0,
1747 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1749 i
= tb_jmp_cache_hash_page(addr
);
1750 memset (&env
->tb_jmp_cache
[i
], 0,
1751 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1754 /* NOTE: if flush_global is true, also flush global entries (not
1756 void tlb_flush(CPUState
*env
, int flush_global
)
1760 #if defined(DEBUG_TLB)
1761 printf("tlb_flush:\n");
1763 /* must reset current TB so that interrupts cannot modify the
1764 links while we are modifying them */
1765 env
->current_tb
= NULL
;
1767 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1769 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1770 env
->tlb_table
[mmu_idx
][i
].addr_read
= -1;
1771 env
->tlb_table
[mmu_idx
][i
].addr_write
= -1;
1772 env
->tlb_table
[mmu_idx
][i
].addr_code
= -1;
1776 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1779 if (env
->kqemu_enabled
) {
1780 kqemu_flush(env
, flush_global
);
1786 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1788 if (addr
== (tlb_entry
->addr_read
&
1789 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1790 addr
== (tlb_entry
->addr_write
&
1791 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1792 addr
== (tlb_entry
->addr_code
&
1793 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1794 tlb_entry
->addr_read
= -1;
1795 tlb_entry
->addr_write
= -1;
1796 tlb_entry
->addr_code
= -1;
1800 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1805 #if defined(DEBUG_TLB)
1806 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1808 /* must reset current TB so that interrupts cannot modify the
1809 links while we are modifying them */
1810 env
->current_tb
= NULL
;
1812 addr
&= TARGET_PAGE_MASK
;
1813 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1814 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1815 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1817 tlb_flush_jmp_cache(env
, addr
);
1820 if (env
->kqemu_enabled
) {
1821 kqemu_flush_page(env
, addr
);
1826 /* update the TLBs so that writes to code in the virtual page 'addr'
1828 static void tlb_protect_code(ram_addr_t ram_addr
)
1830 cpu_physical_memory_reset_dirty(ram_addr
,
1831 ram_addr
+ TARGET_PAGE_SIZE
,
1835 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1836 tested for self modifying code */
1837 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1840 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1843 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1844 unsigned long start
, unsigned long length
)
1847 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1848 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1849 if ((addr
- start
) < length
) {
1850 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1855 /* Note: start and end must be within the same ram block. */
1856 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1860 unsigned long length
, start1
;
1864 start
&= TARGET_PAGE_MASK
;
1865 end
= TARGET_PAGE_ALIGN(end
);
1867 length
= end
- start
;
1870 len
= length
>> TARGET_PAGE_BITS
;
1872 /* XXX: should not depend on cpu context */
1874 if (env
->kqemu_enabled
) {
1877 for(i
= 0; i
< len
; i
++) {
1878 kqemu_set_notdirty(env
, addr
);
1879 addr
+= TARGET_PAGE_SIZE
;
1883 mask
= ~dirty_flags
;
1884 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1885 for(i
= 0; i
< len
; i
++)
1888 /* we modify the TLB cache so that the dirty bit will be set again
1889 when accessing the range */
1890 start1
= (unsigned long)qemu_get_ram_ptr(start
);
1891 /* Chek that we don't span multiple blocks - this breaks the
1892 address comparisons below. */
1893 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
1894 != (end
- 1) - start
) {
1898 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1900 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1901 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1902 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
1908 int cpu_physical_memory_set_dirty_tracking(int enable
)
1910 in_migration
= enable
;
1911 if (kvm_enabled()) {
1912 return kvm_set_migration_log(enable
);
1917 int cpu_physical_memory_get_dirty_tracking(void)
1919 return in_migration
;
1922 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
1923 target_phys_addr_t end_addr
)
1928 ret
= kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1932 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1934 ram_addr_t ram_addr
;
1937 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1938 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
1939 + tlb_entry
->addend
);
1940 ram_addr
= qemu_ram_addr_from_host(p
);
1941 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1942 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1947 /* update the TLB according to the current state of the dirty bits */
1948 void cpu_tlb_update_dirty(CPUState
*env
)
1952 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1953 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1954 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
1958 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1960 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1961 tlb_entry
->addr_write
= vaddr
;
1964 /* update the TLB corresponding to virtual page vaddr
1965 so that it is no longer dirty */
1966 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1971 vaddr
&= TARGET_PAGE_MASK
;
1972 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1973 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1974 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
1977 /* add a new TLB entry. At most one entry for a given virtual address
1978 is permitted. Return 0 if OK or 2 if the page could not be mapped
1979 (can only happen in non SOFTMMU mode for I/O pages or pages
1980 conflicting with the host address space). */
1981 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1982 target_phys_addr_t paddr
, int prot
,
1983 int mmu_idx
, int is_softmmu
)
1988 target_ulong address
;
1989 target_ulong code_address
;
1990 target_phys_addr_t addend
;
1994 target_phys_addr_t iotlb
;
1996 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1998 pd
= IO_MEM_UNASSIGNED
;
2000 pd
= p
->phys_offset
;
2002 #if defined(DEBUG_TLB)
2003 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2004 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2009 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2010 /* IO memory case (romd handled later) */
2011 address
|= TLB_MMIO
;
2013 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2014 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2016 iotlb
= pd
& TARGET_PAGE_MASK
;
2017 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2018 iotlb
|= IO_MEM_NOTDIRTY
;
2020 iotlb
|= IO_MEM_ROM
;
2022 /* IO handlers are currently passed a physical address.
2023 It would be nice to pass an offset from the base address
2024 of that region. This would avoid having to special case RAM,
2025 and avoid full address decoding in every device.
2026 We can't use the high bits of pd for this because
2027 IO_MEM_ROMD uses these as a ram address. */
2028 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2030 iotlb
+= p
->region_offset
;
2036 code_address
= address
;
2037 /* Make accesses to pages with watchpoints go via the
2038 watchpoint trap routines. */
2039 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2040 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2041 iotlb
= io_mem_watch
+ paddr
;
2042 /* TODO: The memory case can be optimized by not trapping
2043 reads of pages with a write breakpoint. */
2044 address
|= TLB_MMIO
;
2048 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2049 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2050 te
= &env
->tlb_table
[mmu_idx
][index
];
2051 te
->addend
= addend
- vaddr
;
2052 if (prot
& PAGE_READ
) {
2053 te
->addr_read
= address
;
2058 if (prot
& PAGE_EXEC
) {
2059 te
->addr_code
= code_address
;
2063 if (prot
& PAGE_WRITE
) {
2064 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2065 (pd
& IO_MEM_ROMD
)) {
2066 /* Write access calls the I/O callback. */
2067 te
->addr_write
= address
| TLB_MMIO
;
2068 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2069 !cpu_physical_memory_is_dirty(pd
)) {
2070 te
->addr_write
= address
| TLB_NOTDIRTY
;
2072 te
->addr_write
= address
;
2075 te
->addr_write
= -1;
2082 void tlb_flush(CPUState
*env
, int flush_global
)
2086 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2090 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2091 target_phys_addr_t paddr
, int prot
,
2092 int mmu_idx
, int is_softmmu
)
2098 * Walks guest process memory "regions" one by one
2099 * and calls callback function 'fn' for each region.
2101 int walk_memory_regions(void *priv
,
2102 int (*fn
)(void *, unsigned long, unsigned long, unsigned long))
2104 unsigned long start
, end
;
2106 int i
, j
, prot
, prot1
;
2112 for (i
= 0; i
<= L1_SIZE
; i
++) {
2113 p
= (i
< L1_SIZE
) ? l1_map
[i
] : NULL
;
2114 for (j
= 0; j
< L2_SIZE
; j
++) {
2115 prot1
= (p
== NULL
) ? 0 : p
[j
].flags
;
2117 * "region" is one continuous chunk of memory
2118 * that has same protection flags set.
2120 if (prot1
!= prot
) {
2121 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2123 rc
= (*fn
)(priv
, start
, end
, prot
);
2124 /* callback can stop iteration by returning != 0 */
2141 static int dump_region(void *priv
, unsigned long start
,
2142 unsigned long end
, unsigned long prot
)
2144 FILE *f
= (FILE *)priv
;
2146 (void) fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2147 start
, end
, end
- start
,
2148 ((prot
& PAGE_READ
) ? 'r' : '-'),
2149 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2150 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2155 /* dump memory mappings */
2156 void page_dump(FILE *f
)
2158 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2159 "start", "end", "size", "prot");
2160 walk_memory_regions(f
, dump_region
);
2163 int page_get_flags(target_ulong address
)
2167 p
= page_find(address
>> TARGET_PAGE_BITS
);
2173 /* modify the flags of a page and invalidate the code if
2174 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2175 depending on PAGE_WRITE */
2176 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2181 /* mmap_lock should already be held. */
2182 start
= start
& TARGET_PAGE_MASK
;
2183 end
= TARGET_PAGE_ALIGN(end
);
2184 if (flags
& PAGE_WRITE
)
2185 flags
|= PAGE_WRITE_ORG
;
2186 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2187 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2188 /* We may be called for host regions that are outside guest
2192 /* if the write protection is set, then we invalidate the code
2194 if (!(p
->flags
& PAGE_WRITE
) &&
2195 (flags
& PAGE_WRITE
) &&
2197 tb_invalidate_phys_page(addr
, 0, NULL
);
2203 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2209 if (start
+ len
< start
)
2210 /* we've wrapped around */
2213 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2214 start
= start
& TARGET_PAGE_MASK
;
2216 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2217 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2220 if( !(p
->flags
& PAGE_VALID
) )
2223 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2225 if (flags
& PAGE_WRITE
) {
2226 if (!(p
->flags
& PAGE_WRITE_ORG
))
2228 /* unprotect the page if it was put read-only because it
2229 contains translated code */
2230 if (!(p
->flags
& PAGE_WRITE
)) {
2231 if (!page_unprotect(addr
, 0, NULL
))
2240 /* called from signal handler: invalidate the code and unprotect the
2241 page. Return TRUE if the fault was successfully handled. */
2242 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2244 unsigned int page_index
, prot
, pindex
;
2246 target_ulong host_start
, host_end
, addr
;
2248 /* Technically this isn't safe inside a signal handler. However we
2249 know this only ever happens in a synchronous SEGV handler, so in
2250 practice it seems to be ok. */
2253 host_start
= address
& qemu_host_page_mask
;
2254 page_index
= host_start
>> TARGET_PAGE_BITS
;
2255 p1
= page_find(page_index
);
2260 host_end
= host_start
+ qemu_host_page_size
;
2263 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2267 /* if the page was really writable, then we change its
2268 protection back to writable */
2269 if (prot
& PAGE_WRITE_ORG
) {
2270 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2271 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2272 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2273 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2274 p1
[pindex
].flags
|= PAGE_WRITE
;
2275 /* and since the content will be modified, we must invalidate
2276 the corresponding translated code. */
2277 tb_invalidate_phys_page(address
, pc
, puc
);
2278 #ifdef DEBUG_TB_CHECK
2279 tb_invalidate_check(address
);
2289 static inline void tlb_set_dirty(CPUState
*env
,
2290 unsigned long addr
, target_ulong vaddr
)
2293 #endif /* defined(CONFIG_USER_ONLY) */
2295 #if !defined(CONFIG_USER_ONLY)
2297 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2298 ram_addr_t memory
, ram_addr_t region_offset
);
2299 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2300 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2301 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2304 if (addr > start_addr) \
2307 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2308 if (start_addr2 > 0) \
2312 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2313 end_addr2 = TARGET_PAGE_SIZE - 1; \
2315 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2316 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2321 /* register physical memory. 'size' must be a multiple of the target
2322 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2323 io memory page. The address used when calling the IO function is
2324 the offset from the start of the region, plus region_offset. Both
2325 start_addr and region_offset are rounded down to a page boundary
2326 before calculating this offset. This should not be a problem unless
2327 the low bits of start_addr and region_offset differ. */
2328 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2330 ram_addr_t phys_offset
,
2331 ram_addr_t region_offset
)
2333 target_phys_addr_t addr
, end_addr
;
2336 ram_addr_t orig_size
= size
;
2340 /* XXX: should not depend on cpu context */
2342 if (env
->kqemu_enabled
) {
2343 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2347 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2349 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2350 region_offset
= start_addr
;
2352 region_offset
&= TARGET_PAGE_MASK
;
2353 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2354 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2355 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2356 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2357 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2358 ram_addr_t orig_memory
= p
->phys_offset
;
2359 target_phys_addr_t start_addr2
, end_addr2
;
2360 int need_subpage
= 0;
2362 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2364 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2365 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2366 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2367 &p
->phys_offset
, orig_memory
,
2370 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2373 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2375 p
->region_offset
= 0;
2377 p
->phys_offset
= phys_offset
;
2378 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2379 (phys_offset
& IO_MEM_ROMD
))
2380 phys_offset
+= TARGET_PAGE_SIZE
;
2383 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2384 p
->phys_offset
= phys_offset
;
2385 p
->region_offset
= region_offset
;
2386 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2387 (phys_offset
& IO_MEM_ROMD
)) {
2388 phys_offset
+= TARGET_PAGE_SIZE
;
2390 target_phys_addr_t start_addr2
, end_addr2
;
2391 int need_subpage
= 0;
2393 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2394 end_addr2
, need_subpage
);
2396 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2397 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2398 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2399 addr
& TARGET_PAGE_MASK
);
2400 subpage_register(subpage
, start_addr2
, end_addr2
,
2401 phys_offset
, region_offset
);
2402 p
->region_offset
= 0;
2406 region_offset
+= TARGET_PAGE_SIZE
;
2409 /* since each CPU stores ram addresses in its TLB cache, we must
2410 reset the modified entries */
2412 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2417 /* XXX: temporary until new memory mapping API */
2418 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2422 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2424 return IO_MEM_UNASSIGNED
;
2425 return p
->phys_offset
;
2428 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2431 kvm_coalesce_mmio_region(addr
, size
);
2434 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2437 kvm_uncoalesce_mmio_region(addr
, size
);
2441 /* XXX: better than nothing */
2442 static ram_addr_t
kqemu_ram_alloc(ram_addr_t size
)
2445 if ((last_ram_offset
+ size
) > kqemu_phys_ram_size
) {
2446 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2447 (uint64_t)size
, (uint64_t)kqemu_phys_ram_size
);
2450 addr
= last_ram_offset
;
2451 last_ram_offset
= TARGET_PAGE_ALIGN(last_ram_offset
+ size
);
2456 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2458 RAMBlock
*new_block
;
2461 if (kqemu_phys_ram_base
) {
2462 return kqemu_ram_alloc(size
);
2466 size
= TARGET_PAGE_ALIGN(size
);
2467 new_block
= qemu_malloc(sizeof(*new_block
));
2469 new_block
->host
= qemu_vmalloc(size
);
2470 new_block
->offset
= last_ram_offset
;
2471 new_block
->length
= size
;
2473 new_block
->next
= ram_blocks
;
2474 ram_blocks
= new_block
;
2476 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2477 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2478 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2479 0xff, size
>> TARGET_PAGE_BITS
);
2481 last_ram_offset
+= size
;
2484 kvm_setup_guest_memory(new_block
->host
, size
);
2486 return new_block
->offset
;
2489 void qemu_ram_free(ram_addr_t addr
)
2491 /* TODO: implement this. */
2494 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2495 With the exception of the softmmu code in this file, this should
2496 only be used for local memory (e.g. video ram) that the device owns,
2497 and knows it isn't going to access beyond the end of the block.
2499 It should not be used for general purpose DMA.
2500 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2502 void *qemu_get_ram_ptr(ram_addr_t addr
)
2509 if (kqemu_phys_ram_base
) {
2510 return kqemu_phys_ram_base
+ addr
;
2515 prevp
= &ram_blocks
;
2517 while (block
&& (block
->offset
> addr
2518 || block
->offset
+ block
->length
<= addr
)) {
2520 prevp
= &prev
->next
;
2522 block
= block
->next
;
2525 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2528 /* Move this entry to to start of the list. */
2530 prev
->next
= block
->next
;
2531 block
->next
= *prevp
;
2534 return block
->host
+ (addr
- block
->offset
);
2537 /* Some of the softmmu routines need to translate from a host pointer
2538 (typically a TLB entry) back to a ram offset. */
2539 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2544 uint8_t *host
= ptr
;
2547 if (kqemu_phys_ram_base
) {
2548 return host
- kqemu_phys_ram_base
;
2553 prevp
= &ram_blocks
;
2555 while (block
&& (block
->host
> host
2556 || block
->host
+ block
->length
<= host
)) {
2558 prevp
= &prev
->next
;
2560 block
= block
->next
;
2563 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2566 return block
->offset
+ (host
- block
->host
);
2569 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2571 #ifdef DEBUG_UNASSIGNED
2572 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2574 #if defined(TARGET_SPARC)
2575 do_unassigned_access(addr
, 0, 0, 0, 1);
2580 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2582 #ifdef DEBUG_UNASSIGNED
2583 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2585 #if defined(TARGET_SPARC)
2586 do_unassigned_access(addr
, 0, 0, 0, 2);
2591 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2593 #ifdef DEBUG_UNASSIGNED
2594 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2596 #if defined(TARGET_SPARC)
2597 do_unassigned_access(addr
, 0, 0, 0, 4);
2602 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2604 #ifdef DEBUG_UNASSIGNED
2605 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2607 #if defined(TARGET_SPARC)
2608 do_unassigned_access(addr
, 1, 0, 0, 1);
2612 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2614 #ifdef DEBUG_UNASSIGNED
2615 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2617 #if defined(TARGET_SPARC)
2618 do_unassigned_access(addr
, 1, 0, 0, 2);
2622 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2624 #ifdef DEBUG_UNASSIGNED
2625 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2627 #if defined(TARGET_SPARC)
2628 do_unassigned_access(addr
, 1, 0, 0, 4);
2632 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2633 unassigned_mem_readb
,
2634 unassigned_mem_readw
,
2635 unassigned_mem_readl
,
2638 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2639 unassigned_mem_writeb
,
2640 unassigned_mem_writew
,
2641 unassigned_mem_writel
,
2644 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2648 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2649 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2650 #if !defined(CONFIG_USER_ONLY)
2651 tb_invalidate_phys_page_fast(ram_addr
, 1);
2652 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2655 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2657 if (cpu_single_env
->kqemu_enabled
&&
2658 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2659 kqemu_modify_page(cpu_single_env
, ram_addr
);
2661 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2662 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2663 /* we remove the notdirty callback only if the code has been
2665 if (dirty_flags
== 0xff)
2666 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2669 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2673 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2674 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2675 #if !defined(CONFIG_USER_ONLY)
2676 tb_invalidate_phys_page_fast(ram_addr
, 2);
2677 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2680 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2682 if (cpu_single_env
->kqemu_enabled
&&
2683 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2684 kqemu_modify_page(cpu_single_env
, ram_addr
);
2686 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2687 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2688 /* we remove the notdirty callback only if the code has been
2690 if (dirty_flags
== 0xff)
2691 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2694 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2698 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2699 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2700 #if !defined(CONFIG_USER_ONLY)
2701 tb_invalidate_phys_page_fast(ram_addr
, 4);
2702 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2705 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2707 if (cpu_single_env
->kqemu_enabled
&&
2708 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2709 kqemu_modify_page(cpu_single_env
, ram_addr
);
2711 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2712 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2713 /* we remove the notdirty callback only if the code has been
2715 if (dirty_flags
== 0xff)
2716 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2719 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2720 NULL
, /* never used */
2721 NULL
, /* never used */
2722 NULL
, /* never used */
2725 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2726 notdirty_mem_writeb
,
2727 notdirty_mem_writew
,
2728 notdirty_mem_writel
,
2731 /* Generate a debug exception if a watchpoint has been hit. */
2732 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2734 CPUState
*env
= cpu_single_env
;
2735 target_ulong pc
, cs_base
;
2736 TranslationBlock
*tb
;
2741 if (env
->watchpoint_hit
) {
2742 /* We re-entered the check after replacing the TB. Now raise
2743 * the debug interrupt so that is will trigger after the
2744 * current instruction. */
2745 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2748 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2749 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2750 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2751 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2752 wp
->flags
|= BP_WATCHPOINT_HIT
;
2753 if (!env
->watchpoint_hit
) {
2754 env
->watchpoint_hit
= wp
;
2755 tb
= tb_find_pc(env
->mem_io_pc
);
2757 cpu_abort(env
, "check_watchpoint: could not find TB for "
2758 "pc=%p", (void *)env
->mem_io_pc
);
2760 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2761 tb_phys_invalidate(tb
, -1);
2762 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2763 env
->exception_index
= EXCP_DEBUG
;
2765 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2766 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2768 cpu_resume_from_signal(env
, NULL
);
2771 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2776 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2777 so these check for a hit then pass through to the normal out-of-line
2779 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2781 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2782 return ldub_phys(addr
);
2785 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2787 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2788 return lduw_phys(addr
);
2791 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2793 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2794 return ldl_phys(addr
);
2797 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2800 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2801 stb_phys(addr
, val
);
2804 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2807 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2808 stw_phys(addr
, val
);
2811 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2814 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2815 stl_phys(addr
, val
);
2818 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2824 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2830 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2836 idx
= SUBPAGE_IDX(addr
);
2837 #if defined(DEBUG_SUBPAGE)
2838 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2839 mmio
, len
, addr
, idx
);
2841 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2842 addr
+ mmio
->region_offset
[idx
][0][len
]);
2847 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2848 uint32_t value
, unsigned int len
)
2852 idx
= SUBPAGE_IDX(addr
);
2853 #if defined(DEBUG_SUBPAGE)
2854 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2855 mmio
, len
, addr
, idx
, value
);
2857 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2858 addr
+ mmio
->region_offset
[idx
][1][len
],
2862 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2864 #if defined(DEBUG_SUBPAGE)
2865 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2868 return subpage_readlen(opaque
, addr
, 0);
2871 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2874 #if defined(DEBUG_SUBPAGE)
2875 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2877 subpage_writelen(opaque
, addr
, value
, 0);
2880 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2882 #if defined(DEBUG_SUBPAGE)
2883 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2886 return subpage_readlen(opaque
, addr
, 1);
2889 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2892 #if defined(DEBUG_SUBPAGE)
2893 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2895 subpage_writelen(opaque
, addr
, value
, 1);
2898 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2900 #if defined(DEBUG_SUBPAGE)
2901 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2904 return subpage_readlen(opaque
, addr
, 2);
2907 static void subpage_writel (void *opaque
,
2908 target_phys_addr_t addr
, uint32_t value
)
2910 #if defined(DEBUG_SUBPAGE)
2911 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2913 subpage_writelen(opaque
, addr
, value
, 2);
2916 static CPUReadMemoryFunc
*subpage_read
[] = {
2922 static CPUWriteMemoryFunc
*subpage_write
[] = {
2928 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2929 ram_addr_t memory
, ram_addr_t region_offset
)
2934 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2936 idx
= SUBPAGE_IDX(start
);
2937 eidx
= SUBPAGE_IDX(end
);
2938 #if defined(DEBUG_SUBPAGE)
2939 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2940 mmio
, start
, end
, idx
, eidx
, memory
);
2942 memory
>>= IO_MEM_SHIFT
;
2943 for (; idx
<= eidx
; idx
++) {
2944 for (i
= 0; i
< 4; i
++) {
2945 if (io_mem_read
[memory
][i
]) {
2946 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2947 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2948 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2950 if (io_mem_write
[memory
][i
]) {
2951 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2952 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2953 mmio
->region_offset
[idx
][1][i
] = region_offset
;
2961 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2962 ram_addr_t orig_memory
, ram_addr_t region_offset
)
2967 mmio
= qemu_mallocz(sizeof(subpage_t
));
2970 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
2971 #if defined(DEBUG_SUBPAGE)
2972 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2973 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2975 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2976 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
2982 static int get_free_io_mem_idx(void)
2986 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2987 if (!io_mem_used
[i
]) {
2995 /* mem_read and mem_write are arrays of functions containing the
2996 function to access byte (index 0), word (index 1) and dword (index
2997 2). Functions can be omitted with a NULL function pointer.
2998 If io_index is non zero, the corresponding io zone is
2999 modified. If it is zero, a new io zone is allocated. The return
3000 value can be used with cpu_register_physical_memory(). (-1) is
3001 returned if error. */
3002 static int cpu_register_io_memory_fixed(int io_index
,
3003 CPUReadMemoryFunc
**mem_read
,
3004 CPUWriteMemoryFunc
**mem_write
,
3007 int i
, subwidth
= 0;
3009 if (io_index
<= 0) {
3010 io_index
= get_free_io_mem_idx();
3014 io_index
>>= IO_MEM_SHIFT
;
3015 if (io_index
>= IO_MEM_NB_ENTRIES
)
3019 for(i
= 0;i
< 3; i
++) {
3020 if (!mem_read
[i
] || !mem_write
[i
])
3021 subwidth
= IO_MEM_SUBWIDTH
;
3022 io_mem_read
[io_index
][i
] = mem_read
[i
];
3023 io_mem_write
[io_index
][i
] = mem_write
[i
];
3025 io_mem_opaque
[io_index
] = opaque
;
3026 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3029 int cpu_register_io_memory(CPUReadMemoryFunc
**mem_read
,
3030 CPUWriteMemoryFunc
**mem_write
,
3033 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3036 void cpu_unregister_io_memory(int io_table_address
)
3039 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3041 for (i
=0;i
< 3; i
++) {
3042 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3043 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3045 io_mem_opaque
[io_index
] = NULL
;
3046 io_mem_used
[io_index
] = 0;
3049 static void io_mem_init(void)
3053 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3054 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3055 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3059 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3060 watch_mem_write
, NULL
);
3062 if (kqemu_phys_ram_base
) {
3063 /* alloc dirty bits array */
3064 phys_ram_dirty
= qemu_vmalloc(kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
3065 memset(phys_ram_dirty
, 0xff, kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
3070 #endif /* !defined(CONFIG_USER_ONLY) */
3072 /* physical memory access (slow version, mainly for debug) */
3073 #if defined(CONFIG_USER_ONLY)
3074 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3075 int len
, int is_write
)
3082 page
= addr
& TARGET_PAGE_MASK
;
3083 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3086 flags
= page_get_flags(page
);
3087 if (!(flags
& PAGE_VALID
))
3090 if (!(flags
& PAGE_WRITE
))
3092 /* XXX: this code should not depend on lock_user */
3093 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3094 /* FIXME - should this return an error rather than just fail? */
3097 unlock_user(p
, addr
, l
);
3099 if (!(flags
& PAGE_READ
))
3101 /* XXX: this code should not depend on lock_user */
3102 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3103 /* FIXME - should this return an error rather than just fail? */
3106 unlock_user(p
, addr
, 0);
3115 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3116 int len
, int is_write
)
3121 target_phys_addr_t page
;
3126 page
= addr
& TARGET_PAGE_MASK
;
3127 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3130 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3132 pd
= IO_MEM_UNASSIGNED
;
3134 pd
= p
->phys_offset
;
3138 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3139 target_phys_addr_t addr1
= addr
;
3140 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3142 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3143 /* XXX: could force cpu_single_env to NULL to avoid
3145 if (l
>= 4 && ((addr1
& 3) == 0)) {
3146 /* 32 bit write access */
3148 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3150 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3151 /* 16 bit write access */
3153 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3156 /* 8 bit write access */
3158 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3162 unsigned long addr1
;
3163 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3165 ptr
= qemu_get_ram_ptr(addr1
);
3166 memcpy(ptr
, buf
, l
);
3167 if (!cpu_physical_memory_is_dirty(addr1
)) {
3168 /* invalidate code */
3169 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3171 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3172 (0xff & ~CODE_DIRTY_FLAG
);
3176 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3177 !(pd
& IO_MEM_ROMD
)) {
3178 target_phys_addr_t addr1
= addr
;
3180 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3182 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3183 if (l
>= 4 && ((addr1
& 3) == 0)) {
3184 /* 32 bit read access */
3185 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3188 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3189 /* 16 bit read access */
3190 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3194 /* 8 bit read access */
3195 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3201 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3202 (addr
& ~TARGET_PAGE_MASK
);
3203 memcpy(buf
, ptr
, l
);
3212 /* used for ROM loading : can write in RAM and ROM */
3213 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3214 const uint8_t *buf
, int len
)
3218 target_phys_addr_t page
;
3223 page
= addr
& TARGET_PAGE_MASK
;
3224 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3227 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3229 pd
= IO_MEM_UNASSIGNED
;
3231 pd
= p
->phys_offset
;
3234 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3235 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3236 !(pd
& IO_MEM_ROMD
)) {
3239 unsigned long addr1
;
3240 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3242 ptr
= qemu_get_ram_ptr(addr1
);
3243 memcpy(ptr
, buf
, l
);
3253 target_phys_addr_t addr
;
3254 target_phys_addr_t len
;
3257 static BounceBuffer bounce
;
3259 typedef struct MapClient
{
3261 void (*callback
)(void *opaque
);
3262 LIST_ENTRY(MapClient
) link
;
3265 static LIST_HEAD(map_client_list
, MapClient
) map_client_list
3266 = LIST_HEAD_INITIALIZER(map_client_list
);
3268 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3270 MapClient
*client
= qemu_malloc(sizeof(*client
));
3272 client
->opaque
= opaque
;
3273 client
->callback
= callback
;
3274 LIST_INSERT_HEAD(&map_client_list
, client
, link
);
3278 void cpu_unregister_map_client(void *_client
)
3280 MapClient
*client
= (MapClient
*)_client
;
3282 LIST_REMOVE(client
, link
);
3286 static void cpu_notify_map_clients(void)
3290 while (!LIST_EMPTY(&map_client_list
)) {
3291 client
= LIST_FIRST(&map_client_list
);
3292 client
->callback(client
->opaque
);
3293 cpu_unregister_map_client(client
);
3297 /* Map a physical memory region into a host virtual address.
3298 * May map a subset of the requested range, given by and returned in *plen.
3299 * May return NULL if resources needed to perform the mapping are exhausted.
3300 * Use only for reads OR writes - not for read-modify-write operations.
3301 * Use cpu_register_map_client() to know when retrying the map operation is
3302 * likely to succeed.
3304 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3305 target_phys_addr_t
*plen
,
3308 target_phys_addr_t len
= *plen
;
3309 target_phys_addr_t done
= 0;
3311 uint8_t *ret
= NULL
;
3313 target_phys_addr_t page
;
3316 unsigned long addr1
;
3319 page
= addr
& TARGET_PAGE_MASK
;
3320 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3323 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3325 pd
= IO_MEM_UNASSIGNED
;
3327 pd
= p
->phys_offset
;
3330 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3331 if (done
|| bounce
.buffer
) {
3334 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3338 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3340 ptr
= bounce
.buffer
;
3342 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3343 ptr
= qemu_get_ram_ptr(addr1
);
3347 } else if (ret
+ done
!= ptr
) {
3359 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3360 * Will also mark the memory as dirty if is_write == 1. access_len gives
3361 * the amount of memory that was actually read or written by the caller.
3363 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3364 int is_write
, target_phys_addr_t access_len
)
3366 if (buffer
!= bounce
.buffer
) {
3368 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3369 while (access_len
) {
3371 l
= TARGET_PAGE_SIZE
;
3374 if (!cpu_physical_memory_is_dirty(addr1
)) {
3375 /* invalidate code */
3376 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3378 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3379 (0xff & ~CODE_DIRTY_FLAG
);
3388 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3390 qemu_free(bounce
.buffer
);
3391 bounce
.buffer
= NULL
;
3392 cpu_notify_map_clients();
3395 /* warning: addr must be aligned */
3396 uint32_t ldl_phys(target_phys_addr_t addr
)
3404 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3406 pd
= IO_MEM_UNASSIGNED
;
3408 pd
= p
->phys_offset
;
3411 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3412 !(pd
& IO_MEM_ROMD
)) {
3414 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3416 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3417 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3420 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3421 (addr
& ~TARGET_PAGE_MASK
);
3427 /* warning: addr must be aligned */
3428 uint64_t ldq_phys(target_phys_addr_t addr
)
3436 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3438 pd
= IO_MEM_UNASSIGNED
;
3440 pd
= p
->phys_offset
;
3443 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3444 !(pd
& IO_MEM_ROMD
)) {
3446 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3448 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3449 #ifdef TARGET_WORDS_BIGENDIAN
3450 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3451 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3453 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3454 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3458 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3459 (addr
& ~TARGET_PAGE_MASK
);
3466 uint32_t ldub_phys(target_phys_addr_t addr
)
3469 cpu_physical_memory_read(addr
, &val
, 1);
3474 uint32_t lduw_phys(target_phys_addr_t addr
)
3477 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3478 return tswap16(val
);
3481 /* warning: addr must be aligned. The ram page is not masked as dirty
3482 and the code inside is not invalidated. It is useful if the dirty
3483 bits are used to track modified PTEs */
3484 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3491 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3493 pd
= IO_MEM_UNASSIGNED
;
3495 pd
= p
->phys_offset
;
3498 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3499 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3501 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3502 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3504 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3505 ptr
= qemu_get_ram_ptr(addr1
);
3508 if (unlikely(in_migration
)) {
3509 if (!cpu_physical_memory_is_dirty(addr1
)) {
3510 /* invalidate code */
3511 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3513 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3514 (0xff & ~CODE_DIRTY_FLAG
);
3520 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3527 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3529 pd
= IO_MEM_UNASSIGNED
;
3531 pd
= p
->phys_offset
;
3534 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3535 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3537 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3538 #ifdef TARGET_WORDS_BIGENDIAN
3539 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3540 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3542 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3543 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3546 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3547 (addr
& ~TARGET_PAGE_MASK
);
3552 /* warning: addr must be aligned */
3553 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3560 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3562 pd
= IO_MEM_UNASSIGNED
;
3564 pd
= p
->phys_offset
;
3567 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3568 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3570 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3571 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3573 unsigned long addr1
;
3574 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3576 ptr
= qemu_get_ram_ptr(addr1
);
3578 if (!cpu_physical_memory_is_dirty(addr1
)) {
3579 /* invalidate code */
3580 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3582 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3583 (0xff & ~CODE_DIRTY_FLAG
);
3589 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3592 cpu_physical_memory_write(addr
, &v
, 1);
3596 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3598 uint16_t v
= tswap16(val
);
3599 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3603 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3606 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3611 /* virtual memory access for debug (includes writing to ROM) */
3612 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3613 uint8_t *buf
, int len
, int is_write
)
3616 target_phys_addr_t phys_addr
;
3620 page
= addr
& TARGET_PAGE_MASK
;
3621 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3622 /* if no physical page mapped, return an error */
3623 if (phys_addr
== -1)
3625 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3628 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3629 #if !defined(CONFIG_USER_ONLY)
3631 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3634 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3642 /* in deterministic execution mode, instructions doing device I/Os
3643 must be at the end of the TB */
3644 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3646 TranslationBlock
*tb
;
3648 target_ulong pc
, cs_base
;
3651 tb
= tb_find_pc((unsigned long)retaddr
);
3653 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3656 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3657 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3658 /* Calculate how many instructions had been executed before the fault
3660 n
= n
- env
->icount_decr
.u16
.low
;
3661 /* Generate a new TB ending on the I/O insn. */
3663 /* On MIPS and SH, delay slot instructions can only be restarted if
3664 they were already the first instruction in the TB. If this is not
3665 the first instruction in a TB then re-execute the preceding
3667 #if defined(TARGET_MIPS)
3668 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3669 env
->active_tc
.PC
-= 4;
3670 env
->icount_decr
.u16
.low
++;
3671 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3673 #elif defined(TARGET_SH4)
3674 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3677 env
->icount_decr
.u16
.low
++;
3678 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3681 /* This should never happen. */
3682 if (n
> CF_COUNT_MASK
)
3683 cpu_abort(env
, "TB too big during recompile");
3685 cflags
= n
| CF_LAST_IO
;
3687 cs_base
= tb
->cs_base
;
3689 tb_phys_invalidate(tb
, -1);
3690 /* FIXME: In theory this could raise an exception. In practice
3691 we have already translated the block once so it's probably ok. */
3692 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3693 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3694 the first in the TB) then we end up generating a whole new TB and
3695 repeating the fault, which is horribly inefficient.
3696 Better would be to execute just this insn uncached, or generate a
3698 cpu_resume_from_signal(env
, NULL
);
3701 void dump_exec_info(FILE *f
,
3702 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3704 int i
, target_code_size
, max_target_code_size
;
3705 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3706 TranslationBlock
*tb
;
3708 target_code_size
= 0;
3709 max_target_code_size
= 0;
3711 direct_jmp_count
= 0;
3712 direct_jmp2_count
= 0;
3713 for(i
= 0; i
< nb_tbs
; i
++) {
3715 target_code_size
+= tb
->size
;
3716 if (tb
->size
> max_target_code_size
)
3717 max_target_code_size
= tb
->size
;
3718 if (tb
->page_addr
[1] != -1)
3720 if (tb
->tb_next_offset
[0] != 0xffff) {
3722 if (tb
->tb_next_offset
[1] != 0xffff) {
3723 direct_jmp2_count
++;
3727 /* XXX: avoid using doubles ? */
3728 cpu_fprintf(f
, "Translation buffer state:\n");
3729 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3730 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3731 cpu_fprintf(f
, "TB count %d/%d\n",
3732 nb_tbs
, code_gen_max_blocks
);
3733 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3734 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3735 max_target_code_size
);
3736 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3737 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3738 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3739 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3741 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3742 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3744 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3746 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3747 cpu_fprintf(f
, "\nStatistics:\n");
3748 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3749 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3750 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3751 tcg_dump_info(f
, cpu_fprintf
);
3754 #if !defined(CONFIG_USER_ONLY)
3756 #define MMUSUFFIX _cmmu
3757 #define GETPC() NULL
3758 #define env cpu_single_env
3759 #define SOFTMMU_CODE_ACCESS
3762 #include "softmmu_template.h"
3765 #include "softmmu_template.h"
3768 #include "softmmu_template.h"
3771 #include "softmmu_template.h"