2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
63 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 static uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 static int in_migration
;
116 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
118 static MemoryRegion
*system_memory
;
119 static MemoryRegion
*system_io
;
121 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
122 static MemoryRegion io_mem_subpage_ram
;
127 /* current CPU in the current thread. It is only valid inside
129 DEFINE_TLS(CPUState
*,cpu_single_env
);
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
135 typedef struct PageDesc
{
136 /* list of TBs intersecting this ram page */
137 TranslationBlock
*first_tb
;
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count
;
141 uint8_t *code_bitmap
;
142 #if defined(CONFIG_USER_ONLY)
147 /* In system mode we want L1_MAP to be based on ram offsets,
148 while in user mode we want it to be based on virtual addresses. */
149 #if !defined(CONFIG_USER_ONLY)
150 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
156 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
159 /* Size of the L2 (and L3, etc) page tables. */
161 #define L2_SIZE (1 << L2_BITS)
163 #define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
166 /* The bits remaining after N lower levels of page tables. */
167 #define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
170 #if V_L1_BITS_REM < 4
171 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
173 #define V_L1_BITS V_L1_BITS_REM
176 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
178 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
180 unsigned long qemu_real_host_page_size
;
181 unsigned long qemu_host_page_size
;
182 unsigned long qemu_host_page_mask
;
184 /* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186 static void *l1_map
[V_L1_SIZE
];
188 #if !defined(CONFIG_USER_ONLY)
189 typedef struct PhysPageEntry PhysPageEntry
;
191 static MemoryRegionSection
*phys_sections
;
192 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
193 static uint16_t phys_section_unassigned
;
195 struct PhysPageEntry
{
197 uint16_t leaf
; /* index into phys_sections */
198 uint16_t node
; /* index into phys_map_nodes */
202 /* Simple allocator for PhysPageEntry nodes */
203 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
204 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
206 #define PHYS_MAP_NODE_NIL ((uint16_t)~0)
208 /* This is a multi-level map on the physical address space.
209 The bottom level has pointers to MemoryRegionSections. */
210 static PhysPageEntry phys_map
= { .u
.node
= PHYS_MAP_NODE_NIL
};
212 static void io_mem_init(void);
213 static void memory_map_init(void);
215 /* io memory support */
216 MemoryRegion
*io_mem_region
[IO_MEM_NB_ENTRIES
];
217 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
218 static MemoryRegion io_mem_watch
;
223 static const char *logfilename
= "qemu.log";
225 static const char *logfilename
= "/tmp/qemu.log";
229 static int log_append
= 0;
232 #if !defined(CONFIG_USER_ONLY)
233 static int tlb_flush_count
;
235 static int tb_flush_count
;
236 static int tb_phys_invalidate_count
;
239 static void map_exec(void *addr
, long size
)
242 VirtualProtect(addr
, size
,
243 PAGE_EXECUTE_READWRITE
, &old_protect
);
247 static void map_exec(void *addr
, long size
)
249 unsigned long start
, end
, page_size
;
251 page_size
= getpagesize();
252 start
= (unsigned long)addr
;
253 start
&= ~(page_size
- 1);
255 end
= (unsigned long)addr
+ size
;
256 end
+= page_size
- 1;
257 end
&= ~(page_size
- 1);
259 mprotect((void *)start
, end
- start
,
260 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
264 static void page_init(void)
266 /* NOTE: we can always suppose that qemu_host_page_size >=
270 SYSTEM_INFO system_info
;
272 GetSystemInfo(&system_info
);
273 qemu_real_host_page_size
= system_info
.dwPageSize
;
276 qemu_real_host_page_size
= getpagesize();
278 if (qemu_host_page_size
== 0)
279 qemu_host_page_size
= qemu_real_host_page_size
;
280 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
281 qemu_host_page_size
= TARGET_PAGE_SIZE
;
282 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
284 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
286 #ifdef HAVE_KINFO_GETVMMAP
287 struct kinfo_vmentry
*freep
;
290 freep
= kinfo_getvmmap(getpid(), &cnt
);
293 for (i
= 0; i
< cnt
; i
++) {
294 unsigned long startaddr
, endaddr
;
296 startaddr
= freep
[i
].kve_start
;
297 endaddr
= freep
[i
].kve_end
;
298 if (h2g_valid(startaddr
)) {
299 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
301 if (h2g_valid(endaddr
)) {
302 endaddr
= h2g(endaddr
);
303 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
305 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
307 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
318 last_brk
= (unsigned long)sbrk(0);
320 f
= fopen("/compat/linux/proc/self/maps", "r");
325 unsigned long startaddr
, endaddr
;
328 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
330 if (n
== 2 && h2g_valid(startaddr
)) {
331 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
333 if (h2g_valid(endaddr
)) {
334 endaddr
= h2g(endaddr
);
338 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
350 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
356 #if defined(CONFIG_USER_ONLY)
357 /* We can't use g_malloc because it may recurse into a locked mutex. */
358 # define ALLOC(P, SIZE) \
360 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
361 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
364 # define ALLOC(P, SIZE) \
365 do { P = g_malloc0(SIZE); } while (0)
368 /* Level 1. Always allocated. */
369 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
372 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
379 ALLOC(p
, sizeof(void *) * L2_SIZE
);
383 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
391 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
397 return pd
+ (index
& (L2_SIZE
- 1));
400 static inline PageDesc
*page_find(tb_page_addr_t index
)
402 return page_find_alloc(index
, 0);
405 #if !defined(CONFIG_USER_ONLY)
407 static PhysPageEntry
*phys_map_node_alloc(uint16_t *ptr
)
412 /* Assign early to avoid the pointer being invalidated by g_renew() */
413 *ptr
= ret
= phys_map_nodes_nb
++;
414 assert(ret
!= PHYS_MAP_NODE_NIL
);
415 if (ret
== phys_map_nodes_nb_alloc
) {
416 typedef PhysPageEntry Node
[L2_SIZE
];
417 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
418 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
419 phys_map_nodes_nb_alloc
);
421 for (i
= 0; i
< L2_SIZE
; ++i
) {
422 phys_map_nodes
[ret
][i
].u
.node
= PHYS_MAP_NODE_NIL
;
424 return phys_map_nodes
[ret
];
427 static void phys_map_nodes_reset(void)
429 phys_map_nodes_nb
= 0;
432 static uint16_t *phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
434 PhysPageEntry
*lp
, *p
;
440 for (i
= P_L2_LEVELS
- 1; i
>= 0; i
--) {
441 if (lp
->u
.node
== PHYS_MAP_NODE_NIL
) {
445 p
= phys_map_node_alloc(&lp
->u
.node
);
447 for (j
= 0; j
< L2_SIZE
; j
++) {
448 p
[j
].u
.leaf
= phys_section_unassigned
;
452 p
= phys_map_nodes
[lp
->u
.node
];
454 lp
= &p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
460 static MemoryRegionSection
phys_page_find(target_phys_addr_t index
)
462 PhysPageEntry lp
= phys_map
;
465 MemoryRegionSection section
;
466 target_phys_addr_t delta
;
467 uint16_t s_index
= phys_section_unassigned
;
469 for (i
= P_L2_LEVELS
- 1; i
>= 0; i
--) {
470 if (lp
.u
.node
== PHYS_MAP_NODE_NIL
) {
473 p
= phys_map_nodes
[lp
.u
.node
];
474 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
479 section
= phys_sections
[s_index
];
480 index
<<= TARGET_PAGE_BITS
;
481 assert(section
.offset_within_address_space
<= index
482 && index
<= section
.offset_within_address_space
+ section
.size
-1);
483 delta
= index
- section
.offset_within_address_space
;
484 section
.offset_within_address_space
+= delta
;
485 section
.offset_within_region
+= delta
;
486 section
.size
-= delta
;
490 static void tlb_protect_code(ram_addr_t ram_addr
);
491 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
493 #define mmap_lock() do { } while(0)
494 #define mmap_unlock() do { } while(0)
497 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
499 #if defined(CONFIG_USER_ONLY)
500 /* Currently it is not recommended to allocate big chunks of data in
501 user mode. It will change when a dedicated libc will be used */
502 #define USE_STATIC_CODE_GEN_BUFFER
505 #ifdef USE_STATIC_CODE_GEN_BUFFER
506 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
507 __attribute__((aligned (CODE_GEN_ALIGN
)));
510 static void code_gen_alloc(unsigned long tb_size
)
512 #ifdef USE_STATIC_CODE_GEN_BUFFER
513 code_gen_buffer
= static_code_gen_buffer
;
514 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
515 map_exec(code_gen_buffer
, code_gen_buffer_size
);
517 code_gen_buffer_size
= tb_size
;
518 if (code_gen_buffer_size
== 0) {
519 #if defined(CONFIG_USER_ONLY)
520 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
522 /* XXX: needs adjustments */
523 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
526 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
527 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
528 /* The code gen buffer location may have constraints depending on
529 the host cpu and OS */
530 #if defined(__linux__)
535 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
536 #if defined(__x86_64__)
538 /* Cannot map more than that */
539 if (code_gen_buffer_size
> (800 * 1024 * 1024))
540 code_gen_buffer_size
= (800 * 1024 * 1024);
541 #elif defined(__sparc_v9__)
542 // Map the buffer below 2G, so we can use direct calls and branches
544 start
= (void *) 0x60000000UL
;
545 if (code_gen_buffer_size
> (512 * 1024 * 1024))
546 code_gen_buffer_size
= (512 * 1024 * 1024);
547 #elif defined(__arm__)
548 /* Keep the buffer no bigger than 16MB to branch between blocks */
549 if (code_gen_buffer_size
> 16 * 1024 * 1024)
550 code_gen_buffer_size
= 16 * 1024 * 1024;
551 #elif defined(__s390x__)
552 /* Map the buffer so that we can use direct calls and branches. */
553 /* We have a +- 4GB range on the branches; leave some slop. */
554 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
555 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
557 start
= (void *)0x90000000UL
;
559 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
560 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
562 if (code_gen_buffer
== MAP_FAILED
) {
563 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
567 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
568 || defined(__DragonFly__) || defined(__OpenBSD__) \
569 || defined(__NetBSD__)
573 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
574 #if defined(__x86_64__)
575 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
576 * 0x40000000 is free */
578 addr
= (void *)0x40000000;
579 /* Cannot map more than that */
580 if (code_gen_buffer_size
> (800 * 1024 * 1024))
581 code_gen_buffer_size
= (800 * 1024 * 1024);
582 #elif defined(__sparc_v9__)
583 // Map the buffer below 2G, so we can use direct calls and branches
585 addr
= (void *) 0x60000000UL
;
586 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
587 code_gen_buffer_size
= (512 * 1024 * 1024);
590 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
591 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
593 if (code_gen_buffer
== MAP_FAILED
) {
594 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
599 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
600 map_exec(code_gen_buffer
, code_gen_buffer_size
);
602 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
603 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
604 code_gen_buffer_max_size
= code_gen_buffer_size
-
605 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
606 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
607 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
610 /* Must be called before using the QEMU cpus. 'tb_size' is the size
611 (in bytes) allocated to the translation buffer. Zero means default
613 void tcg_exec_init(unsigned long tb_size
)
616 code_gen_alloc(tb_size
);
617 code_gen_ptr
= code_gen_buffer
;
619 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
620 /* There's no guest base to take into account, so go ahead and
621 initialize the prologue now. */
622 tcg_prologue_init(&tcg_ctx
);
626 bool tcg_enabled(void)
628 return code_gen_buffer
!= NULL
;
631 void cpu_exec_init_all(void)
633 #if !defined(CONFIG_USER_ONLY)
639 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
641 static int cpu_common_post_load(void *opaque
, int version_id
)
643 CPUState
*env
= opaque
;
645 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
646 version_id is increased. */
647 env
->interrupt_request
&= ~0x01;
653 static const VMStateDescription vmstate_cpu_common
= {
654 .name
= "cpu_common",
656 .minimum_version_id
= 1,
657 .minimum_version_id_old
= 1,
658 .post_load
= cpu_common_post_load
,
659 .fields
= (VMStateField
[]) {
660 VMSTATE_UINT32(halted
, CPUState
),
661 VMSTATE_UINT32(interrupt_request
, CPUState
),
662 VMSTATE_END_OF_LIST()
667 CPUState
*qemu_get_cpu(int cpu
)
669 CPUState
*env
= first_cpu
;
672 if (env
->cpu_index
== cpu
)
680 void cpu_exec_init(CPUState
*env
)
685 #if defined(CONFIG_USER_ONLY)
688 env
->next_cpu
= NULL
;
691 while (*penv
!= NULL
) {
692 penv
= &(*penv
)->next_cpu
;
695 env
->cpu_index
= cpu_index
;
697 QTAILQ_INIT(&env
->breakpoints
);
698 QTAILQ_INIT(&env
->watchpoints
);
699 #ifndef CONFIG_USER_ONLY
700 env
->thread_id
= qemu_get_thread_id();
703 #if defined(CONFIG_USER_ONLY)
706 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
707 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
708 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
709 cpu_save
, cpu_load
, env
);
713 /* Allocate a new translation block. Flush the translation buffer if
714 too many translation blocks or too much generated code. */
715 static TranslationBlock
*tb_alloc(target_ulong pc
)
717 TranslationBlock
*tb
;
719 if (nb_tbs
>= code_gen_max_blocks
||
720 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
728 void tb_free(TranslationBlock
*tb
)
730 /* In practice this is mostly used for single use temporary TB
731 Ignore the hard cases and just back up if this TB happens to
732 be the last one generated. */
733 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
734 code_gen_ptr
= tb
->tc_ptr
;
739 static inline void invalidate_page_bitmap(PageDesc
*p
)
741 if (p
->code_bitmap
) {
742 g_free(p
->code_bitmap
);
743 p
->code_bitmap
= NULL
;
745 p
->code_write_count
= 0;
748 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
750 static void page_flush_tb_1 (int level
, void **lp
)
759 for (i
= 0; i
< L2_SIZE
; ++i
) {
760 pd
[i
].first_tb
= NULL
;
761 invalidate_page_bitmap(pd
+ i
);
765 for (i
= 0; i
< L2_SIZE
; ++i
) {
766 page_flush_tb_1 (level
- 1, pp
+ i
);
771 static void page_flush_tb(void)
774 for (i
= 0; i
< V_L1_SIZE
; i
++) {
775 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
779 /* flush all the translation blocks */
780 /* XXX: tb_flush is currently not thread safe */
781 void tb_flush(CPUState
*env1
)
784 #if defined(DEBUG_FLUSH)
785 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
786 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
788 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
790 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
791 cpu_abort(env1
, "Internal error: code buffer overflow\n");
795 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
796 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
799 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
802 code_gen_ptr
= code_gen_buffer
;
803 /* XXX: flush processor icache at this point if cache flush is
808 #ifdef DEBUG_TB_CHECK
810 static void tb_invalidate_check(target_ulong address
)
812 TranslationBlock
*tb
;
814 address
&= TARGET_PAGE_MASK
;
815 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
816 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
817 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
818 address
>= tb
->pc
+ tb
->size
)) {
819 printf("ERROR invalidate: address=" TARGET_FMT_lx
820 " PC=%08lx size=%04x\n",
821 address
, (long)tb
->pc
, tb
->size
);
827 /* verify that all the pages have correct rights for code */
828 static void tb_page_check(void)
830 TranslationBlock
*tb
;
831 int i
, flags1
, flags2
;
833 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
834 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
835 flags1
= page_get_flags(tb
->pc
);
836 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
837 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
838 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
839 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
847 /* invalidate one TB */
848 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
851 TranslationBlock
*tb1
;
855 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
858 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
862 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
864 TranslationBlock
*tb1
;
870 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
872 *ptb
= tb1
->page_next
[n1
];
875 ptb
= &tb1
->page_next
[n1
];
879 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
881 TranslationBlock
*tb1
, **ptb
;
884 ptb
= &tb
->jmp_next
[n
];
887 /* find tb(n) in circular list */
891 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
892 if (n1
== n
&& tb1
== tb
)
895 ptb
= &tb1
->jmp_first
;
897 ptb
= &tb1
->jmp_next
[n1
];
900 /* now we can suppress tb(n) from the list */
901 *ptb
= tb
->jmp_next
[n
];
903 tb
->jmp_next
[n
] = NULL
;
907 /* reset the jump entry 'n' of a TB so that it is not chained to
909 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
911 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
914 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
919 tb_page_addr_t phys_pc
;
920 TranslationBlock
*tb1
, *tb2
;
922 /* remove the TB from the hash list */
923 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
924 h
= tb_phys_hash_func(phys_pc
);
925 tb_remove(&tb_phys_hash
[h
], tb
,
926 offsetof(TranslationBlock
, phys_hash_next
));
928 /* remove the TB from the page list */
929 if (tb
->page_addr
[0] != page_addr
) {
930 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
931 tb_page_remove(&p
->first_tb
, tb
);
932 invalidate_page_bitmap(p
);
934 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
935 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
936 tb_page_remove(&p
->first_tb
, tb
);
937 invalidate_page_bitmap(p
);
940 tb_invalidated_flag
= 1;
942 /* remove the TB from the hash list */
943 h
= tb_jmp_cache_hash_func(tb
->pc
);
944 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
945 if (env
->tb_jmp_cache
[h
] == tb
)
946 env
->tb_jmp_cache
[h
] = NULL
;
949 /* suppress this TB from the two jump lists */
950 tb_jmp_remove(tb
, 0);
951 tb_jmp_remove(tb
, 1);
953 /* suppress any remaining jumps to this TB */
959 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
960 tb2
= tb1
->jmp_next
[n1
];
961 tb_reset_jump(tb1
, n1
);
962 tb1
->jmp_next
[n1
] = NULL
;
965 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
967 tb_phys_invalidate_count
++;
970 static inline void set_bits(uint8_t *tab
, int start
, int len
)
976 mask
= 0xff << (start
& 7);
977 if ((start
& ~7) == (end
& ~7)) {
979 mask
&= ~(0xff << (end
& 7));
984 start
= (start
+ 8) & ~7;
986 while (start
< end1
) {
991 mask
= ~(0xff << (end
& 7));
997 static void build_page_bitmap(PageDesc
*p
)
999 int n
, tb_start
, tb_end
;
1000 TranslationBlock
*tb
;
1002 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
1005 while (tb
!= NULL
) {
1007 tb
= (TranslationBlock
*)((long)tb
& ~3);
1008 /* NOTE: this is subtle as a TB may span two physical pages */
1010 /* NOTE: tb_end may be after the end of the page, but
1011 it is not a problem */
1012 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1013 tb_end
= tb_start
+ tb
->size
;
1014 if (tb_end
> TARGET_PAGE_SIZE
)
1015 tb_end
= TARGET_PAGE_SIZE
;
1018 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1020 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1021 tb
= tb
->page_next
[n
];
1025 TranslationBlock
*tb_gen_code(CPUState
*env
,
1026 target_ulong pc
, target_ulong cs_base
,
1027 int flags
, int cflags
)
1029 TranslationBlock
*tb
;
1031 tb_page_addr_t phys_pc
, phys_page2
;
1032 target_ulong virt_page2
;
1035 phys_pc
= get_page_addr_code(env
, pc
);
1038 /* flush must be done */
1040 /* cannot fail at this point */
1042 /* Don't forget to invalidate previous TB info. */
1043 tb_invalidated_flag
= 1;
1045 tc_ptr
= code_gen_ptr
;
1046 tb
->tc_ptr
= tc_ptr
;
1047 tb
->cs_base
= cs_base
;
1049 tb
->cflags
= cflags
;
1050 cpu_gen_code(env
, tb
, &code_gen_size
);
1051 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1053 /* check next page if needed */
1054 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1056 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1057 phys_page2
= get_page_addr_code(env
, virt_page2
);
1059 tb_link_page(tb
, phys_pc
, phys_page2
);
1063 /* invalidate all TBs which intersect with the target physical page
1064 starting in range [start;end[. NOTE: start and end must refer to
1065 the same physical page. 'is_cpu_write_access' should be true if called
1066 from a real cpu write access: the virtual CPU will exit the current
1067 TB if code is modified inside this TB. */
1068 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1069 int is_cpu_write_access
)
1071 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1072 CPUState
*env
= cpu_single_env
;
1073 tb_page_addr_t tb_start
, tb_end
;
1076 #ifdef TARGET_HAS_PRECISE_SMC
1077 int current_tb_not_found
= is_cpu_write_access
;
1078 TranslationBlock
*current_tb
= NULL
;
1079 int current_tb_modified
= 0;
1080 target_ulong current_pc
= 0;
1081 target_ulong current_cs_base
= 0;
1082 int current_flags
= 0;
1083 #endif /* TARGET_HAS_PRECISE_SMC */
1085 p
= page_find(start
>> TARGET_PAGE_BITS
);
1088 if (!p
->code_bitmap
&&
1089 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1090 is_cpu_write_access
) {
1091 /* build code bitmap */
1092 build_page_bitmap(p
);
1095 /* we remove all the TBs in the range [start, end[ */
1096 /* XXX: see if in some cases it could be faster to invalidate all the code */
1098 while (tb
!= NULL
) {
1100 tb
= (TranslationBlock
*)((long)tb
& ~3);
1101 tb_next
= tb
->page_next
[n
];
1102 /* NOTE: this is subtle as a TB may span two physical pages */
1104 /* NOTE: tb_end may be after the end of the page, but
1105 it is not a problem */
1106 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1107 tb_end
= tb_start
+ tb
->size
;
1109 tb_start
= tb
->page_addr
[1];
1110 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1112 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1113 #ifdef TARGET_HAS_PRECISE_SMC
1114 if (current_tb_not_found
) {
1115 current_tb_not_found
= 0;
1117 if (env
->mem_io_pc
) {
1118 /* now we have a real cpu fault */
1119 current_tb
= tb_find_pc(env
->mem_io_pc
);
1122 if (current_tb
== tb
&&
1123 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1124 /* If we are modifying the current TB, we must stop
1125 its execution. We could be more precise by checking
1126 that the modification is after the current PC, but it
1127 would require a specialized function to partially
1128 restore the CPU state */
1130 current_tb_modified
= 1;
1131 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1132 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1135 #endif /* TARGET_HAS_PRECISE_SMC */
1136 /* we need to do that to handle the case where a signal
1137 occurs while doing tb_phys_invalidate() */
1140 saved_tb
= env
->current_tb
;
1141 env
->current_tb
= NULL
;
1143 tb_phys_invalidate(tb
, -1);
1145 env
->current_tb
= saved_tb
;
1146 if (env
->interrupt_request
&& env
->current_tb
)
1147 cpu_interrupt(env
, env
->interrupt_request
);
1152 #if !defined(CONFIG_USER_ONLY)
1153 /* if no code remaining, no need to continue to use slow writes */
1155 invalidate_page_bitmap(p
);
1156 if (is_cpu_write_access
) {
1157 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1161 #ifdef TARGET_HAS_PRECISE_SMC
1162 if (current_tb_modified
) {
1163 /* we generate a block containing just the instruction
1164 modifying the memory. It will ensure that it cannot modify
1166 env
->current_tb
= NULL
;
1167 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1168 cpu_resume_from_signal(env
, NULL
);
1173 /* len must be <= 8 and start must be a multiple of len */
1174 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1180 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1181 cpu_single_env
->mem_io_vaddr
, len
,
1182 cpu_single_env
->eip
,
1183 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1186 p
= page_find(start
>> TARGET_PAGE_BITS
);
1189 if (p
->code_bitmap
) {
1190 offset
= start
& ~TARGET_PAGE_MASK
;
1191 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1192 if (b
& ((1 << len
) - 1))
1196 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1200 #if !defined(CONFIG_SOFTMMU)
1201 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1202 unsigned long pc
, void *puc
)
1204 TranslationBlock
*tb
;
1207 #ifdef TARGET_HAS_PRECISE_SMC
1208 TranslationBlock
*current_tb
= NULL
;
1209 CPUState
*env
= cpu_single_env
;
1210 int current_tb_modified
= 0;
1211 target_ulong current_pc
= 0;
1212 target_ulong current_cs_base
= 0;
1213 int current_flags
= 0;
1216 addr
&= TARGET_PAGE_MASK
;
1217 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1221 #ifdef TARGET_HAS_PRECISE_SMC
1222 if (tb
&& pc
!= 0) {
1223 current_tb
= tb_find_pc(pc
);
1226 while (tb
!= NULL
) {
1228 tb
= (TranslationBlock
*)((long)tb
& ~3);
1229 #ifdef TARGET_HAS_PRECISE_SMC
1230 if (current_tb
== tb
&&
1231 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1232 /* If we are modifying the current TB, we must stop
1233 its execution. We could be more precise by checking
1234 that the modification is after the current PC, but it
1235 would require a specialized function to partially
1236 restore the CPU state */
1238 current_tb_modified
= 1;
1239 cpu_restore_state(current_tb
, env
, pc
);
1240 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1243 #endif /* TARGET_HAS_PRECISE_SMC */
1244 tb_phys_invalidate(tb
, addr
);
1245 tb
= tb
->page_next
[n
];
1248 #ifdef TARGET_HAS_PRECISE_SMC
1249 if (current_tb_modified
) {
1250 /* we generate a block containing just the instruction
1251 modifying the memory. It will ensure that it cannot modify
1253 env
->current_tb
= NULL
;
1254 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1255 cpu_resume_from_signal(env
, puc
);
1261 /* add the tb in the target page and protect it if necessary */
1262 static inline void tb_alloc_page(TranslationBlock
*tb
,
1263 unsigned int n
, tb_page_addr_t page_addr
)
1266 #ifndef CONFIG_USER_ONLY
1267 bool page_already_protected
;
1270 tb
->page_addr
[n
] = page_addr
;
1271 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1272 tb
->page_next
[n
] = p
->first_tb
;
1273 #ifndef CONFIG_USER_ONLY
1274 page_already_protected
= p
->first_tb
!= NULL
;
1276 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1277 invalidate_page_bitmap(p
);
1279 #if defined(TARGET_HAS_SMC) || 1
1281 #if defined(CONFIG_USER_ONLY)
1282 if (p
->flags
& PAGE_WRITE
) {
1287 /* force the host page as non writable (writes will have a
1288 page fault + mprotect overhead) */
1289 page_addr
&= qemu_host_page_mask
;
1291 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1292 addr
+= TARGET_PAGE_SIZE
) {
1294 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1298 p2
->flags
&= ~PAGE_WRITE
;
1300 mprotect(g2h(page_addr
), qemu_host_page_size
,
1301 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1302 #ifdef DEBUG_TB_INVALIDATE
1303 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1308 /* if some code is already present, then the pages are already
1309 protected. So we handle the case where only the first TB is
1310 allocated in a physical page */
1311 if (!page_already_protected
) {
1312 tlb_protect_code(page_addr
);
1316 #endif /* TARGET_HAS_SMC */
1319 /* add a new TB and link it to the physical page tables. phys_page2 is
1320 (-1) to indicate that only one page contains the TB. */
1321 void tb_link_page(TranslationBlock
*tb
,
1322 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1325 TranslationBlock
**ptb
;
1327 /* Grab the mmap lock to stop another thread invalidating this TB
1328 before we are done. */
1330 /* add in the physical hash table */
1331 h
= tb_phys_hash_func(phys_pc
);
1332 ptb
= &tb_phys_hash
[h
];
1333 tb
->phys_hash_next
= *ptb
;
1336 /* add in the page list */
1337 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1338 if (phys_page2
!= -1)
1339 tb_alloc_page(tb
, 1, phys_page2
);
1341 tb
->page_addr
[1] = -1;
1343 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1344 tb
->jmp_next
[0] = NULL
;
1345 tb
->jmp_next
[1] = NULL
;
1347 /* init original jump addresses */
1348 if (tb
->tb_next_offset
[0] != 0xffff)
1349 tb_reset_jump(tb
, 0);
1350 if (tb
->tb_next_offset
[1] != 0xffff)
1351 tb_reset_jump(tb
, 1);
1353 #ifdef DEBUG_TB_CHECK
1359 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1360 tb[1].tc_ptr. Return NULL if not found */
1361 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1363 int m_min
, m_max
, m
;
1365 TranslationBlock
*tb
;
1369 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1370 tc_ptr
>= (unsigned long)code_gen_ptr
)
1372 /* binary search (cf Knuth) */
1375 while (m_min
<= m_max
) {
1376 m
= (m_min
+ m_max
) >> 1;
1378 v
= (unsigned long)tb
->tc_ptr
;
1381 else if (tc_ptr
< v
) {
1390 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1392 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1394 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1397 tb1
= tb
->jmp_next
[n
];
1399 /* find head of list */
1402 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1405 tb1
= tb1
->jmp_next
[n1
];
1407 /* we are now sure now that tb jumps to tb1 */
1410 /* remove tb from the jmp_first list */
1411 ptb
= &tb_next
->jmp_first
;
1415 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1416 if (n1
== n
&& tb1
== tb
)
1418 ptb
= &tb1
->jmp_next
[n1
];
1420 *ptb
= tb
->jmp_next
[n
];
1421 tb
->jmp_next
[n
] = NULL
;
1423 /* suppress the jump to next tb in generated code */
1424 tb_reset_jump(tb
, n
);
1426 /* suppress jumps in the tb on which we could have jumped */
1427 tb_reset_jump_recursive(tb_next
);
1431 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1433 tb_reset_jump_recursive2(tb
, 0);
1434 tb_reset_jump_recursive2(tb
, 1);
1437 #if defined(TARGET_HAS_ICE)
1438 #if defined(CONFIG_USER_ONLY)
1439 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1441 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1444 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1446 target_phys_addr_t addr
;
1447 ram_addr_t ram_addr
;
1448 MemoryRegionSection section
;
1450 addr
= cpu_get_phys_page_debug(env
, pc
);
1451 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1452 if (!(memory_region_is_ram(section
.mr
)
1453 || (section
.mr
->rom_device
&& section
.mr
->readable
))) {
1456 ram_addr
= (memory_region_get_ram_addr(section
.mr
)
1457 + section
.offset_within_region
) & TARGET_PAGE_MASK
;
1458 ram_addr
|= (pc
& ~TARGET_PAGE_MASK
);
1459 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1462 #endif /* TARGET_HAS_ICE */
1464 #if defined(CONFIG_USER_ONLY)
1465 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1470 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1471 int flags
, CPUWatchpoint
**watchpoint
)
1476 /* Add a watchpoint. */
1477 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1478 int flags
, CPUWatchpoint
**watchpoint
)
1480 target_ulong len_mask
= ~(len
- 1);
1483 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1484 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1485 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1486 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1489 wp
= g_malloc(sizeof(*wp
));
1492 wp
->len_mask
= len_mask
;
1495 /* keep all GDB-injected watchpoints in front */
1497 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1499 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1501 tlb_flush_page(env
, addr
);
1508 /* Remove a specific watchpoint. */
1509 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1512 target_ulong len_mask
= ~(len
- 1);
1515 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1516 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1517 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1518 cpu_watchpoint_remove_by_ref(env
, wp
);
1525 /* Remove a specific watchpoint by reference. */
1526 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1528 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1530 tlb_flush_page(env
, watchpoint
->vaddr
);
1535 /* Remove all matching watchpoints. */
1536 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1538 CPUWatchpoint
*wp
, *next
;
1540 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1541 if (wp
->flags
& mask
)
1542 cpu_watchpoint_remove_by_ref(env
, wp
);
1547 /* Add a breakpoint. */
1548 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1549 CPUBreakpoint
**breakpoint
)
1551 #if defined(TARGET_HAS_ICE)
1554 bp
= g_malloc(sizeof(*bp
));
1559 /* keep all GDB-injected breakpoints in front */
1561 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1563 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1565 breakpoint_invalidate(env
, pc
);
1575 /* Remove a specific breakpoint. */
1576 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1578 #if defined(TARGET_HAS_ICE)
1581 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1582 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1583 cpu_breakpoint_remove_by_ref(env
, bp
);
1593 /* Remove a specific breakpoint by reference. */
1594 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1596 #if defined(TARGET_HAS_ICE)
1597 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1599 breakpoint_invalidate(env
, breakpoint
->pc
);
1605 /* Remove all matching breakpoints. */
1606 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1608 #if defined(TARGET_HAS_ICE)
1609 CPUBreakpoint
*bp
, *next
;
1611 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1612 if (bp
->flags
& mask
)
1613 cpu_breakpoint_remove_by_ref(env
, bp
);
1618 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1619 CPU loop after each instruction */
1620 void cpu_single_step(CPUState
*env
, int enabled
)
1622 #if defined(TARGET_HAS_ICE)
1623 if (env
->singlestep_enabled
!= enabled
) {
1624 env
->singlestep_enabled
= enabled
;
1626 kvm_update_guest_debug(env
, 0);
1628 /* must flush all the translated code to avoid inconsistencies */
1629 /* XXX: only flush what is necessary */
1636 /* enable or disable low levels log */
1637 void cpu_set_log(int log_flags
)
1639 loglevel
= log_flags
;
1640 if (loglevel
&& !logfile
) {
1641 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1643 perror(logfilename
);
1646 #if !defined(CONFIG_SOFTMMU)
1647 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1649 static char logfile_buf
[4096];
1650 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1652 #elif defined(_WIN32)
1653 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1654 setvbuf(logfile
, NULL
, _IONBF
, 0);
1656 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1660 if (!loglevel
&& logfile
) {
1666 void cpu_set_log_filename(const char *filename
)
1668 logfilename
= strdup(filename
);
1673 cpu_set_log(loglevel
);
1676 static void cpu_unlink_tb(CPUState
*env
)
1678 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1679 problem and hope the cpu will stop of its own accord. For userspace
1680 emulation this often isn't actually as bad as it sounds. Often
1681 signals are used primarily to interrupt blocking syscalls. */
1682 TranslationBlock
*tb
;
1683 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1685 spin_lock(&interrupt_lock
);
1686 tb
= env
->current_tb
;
1687 /* if the cpu is currently executing code, we must unlink it and
1688 all the potentially executing TB */
1690 env
->current_tb
= NULL
;
1691 tb_reset_jump_recursive(tb
);
1693 spin_unlock(&interrupt_lock
);
1696 #ifndef CONFIG_USER_ONLY
1697 /* mask must never be zero, except for A20 change call */
1698 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1702 old_mask
= env
->interrupt_request
;
1703 env
->interrupt_request
|= mask
;
1706 * If called from iothread context, wake the target cpu in
1709 if (!qemu_cpu_is_self(env
)) {
1715 env
->icount_decr
.u16
.high
= 0xffff;
1717 && (mask
& ~old_mask
) != 0) {
1718 cpu_abort(env
, "Raised interrupt while not in I/O function");
1725 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1727 #else /* CONFIG_USER_ONLY */
1729 void cpu_interrupt(CPUState
*env
, int mask
)
1731 env
->interrupt_request
|= mask
;
1734 #endif /* CONFIG_USER_ONLY */
1736 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1738 env
->interrupt_request
&= ~mask
;
1741 void cpu_exit(CPUState
*env
)
1743 env
->exit_request
= 1;
1747 const CPULogItem cpu_log_items
[] = {
1748 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1749 "show generated host assembly code for each compiled TB" },
1750 { CPU_LOG_TB_IN_ASM
, "in_asm",
1751 "show target assembly code for each compiled TB" },
1752 { CPU_LOG_TB_OP
, "op",
1753 "show micro ops for each compiled TB" },
1754 { CPU_LOG_TB_OP_OPT
, "op_opt",
1757 "before eflags optimization and "
1759 "after liveness analysis" },
1760 { CPU_LOG_INT
, "int",
1761 "show interrupts/exceptions in short format" },
1762 { CPU_LOG_EXEC
, "exec",
1763 "show trace before each executed TB (lots of logs)" },
1764 { CPU_LOG_TB_CPU
, "cpu",
1765 "show CPU state before block translation" },
1767 { CPU_LOG_PCALL
, "pcall",
1768 "show protected mode far calls/returns/exceptions" },
1769 { CPU_LOG_RESET
, "cpu_reset",
1770 "show CPU state before CPU resets" },
1773 { CPU_LOG_IOPORT
, "ioport",
1774 "show all i/o ports accesses" },
1779 static int cmp1(const char *s1
, int n
, const char *s2
)
1781 if (strlen(s2
) != n
)
1783 return memcmp(s1
, s2
, n
) == 0;
1786 /* takes a comma separated list of log masks. Return 0 if error. */
1787 int cpu_str_to_log_mask(const char *str
)
1789 const CPULogItem
*item
;
1796 p1
= strchr(p
, ',');
1799 if(cmp1(p
,p1
-p
,"all")) {
1800 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1804 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1805 if (cmp1(p
, p1
- p
, item
->name
))
1819 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1826 fprintf(stderr
, "qemu: fatal: ");
1827 vfprintf(stderr
, fmt
, ap
);
1828 fprintf(stderr
, "\n");
1830 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1832 cpu_dump_state(env
, stderr
, fprintf
, 0);
1834 if (qemu_log_enabled()) {
1835 qemu_log("qemu: fatal: ");
1836 qemu_log_vprintf(fmt
, ap2
);
1839 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1841 log_cpu_state(env
, 0);
1848 #if defined(CONFIG_USER_ONLY)
1850 struct sigaction act
;
1851 sigfillset(&act
.sa_mask
);
1852 act
.sa_handler
= SIG_DFL
;
1853 sigaction(SIGABRT
, &act
, NULL
);
1859 CPUState
*cpu_copy(CPUState
*env
)
1861 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1862 CPUState
*next_cpu
= new_env
->next_cpu
;
1863 int cpu_index
= new_env
->cpu_index
;
1864 #if defined(TARGET_HAS_ICE)
1869 memcpy(new_env
, env
, sizeof(CPUState
));
1871 /* Preserve chaining and index. */
1872 new_env
->next_cpu
= next_cpu
;
1873 new_env
->cpu_index
= cpu_index
;
1875 /* Clone all break/watchpoints.
1876 Note: Once we support ptrace with hw-debug register access, make sure
1877 BP_CPU break/watchpoints are handled correctly on clone. */
1878 QTAILQ_INIT(&env
->breakpoints
);
1879 QTAILQ_INIT(&env
->watchpoints
);
1880 #if defined(TARGET_HAS_ICE)
1881 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1882 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1884 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1885 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1893 #if !defined(CONFIG_USER_ONLY)
1895 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1899 /* Discard jump cache entries for any tb which might potentially
1900 overlap the flushed page. */
1901 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1902 memset (&env
->tb_jmp_cache
[i
], 0,
1903 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1905 i
= tb_jmp_cache_hash_page(addr
);
1906 memset (&env
->tb_jmp_cache
[i
], 0,
1907 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1910 static CPUTLBEntry s_cputlb_empty_entry
= {
1918 * If flush_global is true (the usual case), flush all tlb entries.
1919 * If flush_global is false, flush (at least) all tlb entries not
1922 * Since QEMU doesn't currently implement a global/not-global flag
1923 * for tlb entries, at the moment tlb_flush() will also flush all
1924 * tlb entries in the flush_global == false case. This is OK because
1925 * CPU architectures generally permit an implementation to drop
1926 * entries from the TLB at any time, so flushing more entries than
1927 * required is only an efficiency issue, not a correctness issue.
1929 void tlb_flush(CPUState
*env
, int flush_global
)
1933 #if defined(DEBUG_TLB)
1934 printf("tlb_flush:\n");
1936 /* must reset current TB so that interrupts cannot modify the
1937 links while we are modifying them */
1938 env
->current_tb
= NULL
;
1940 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1942 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1943 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1947 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1949 env
->tlb_flush_addr
= -1;
1950 env
->tlb_flush_mask
= 0;
1954 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1956 if (addr
== (tlb_entry
->addr_read
&
1957 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1958 addr
== (tlb_entry
->addr_write
&
1959 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1960 addr
== (tlb_entry
->addr_code
&
1961 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1962 *tlb_entry
= s_cputlb_empty_entry
;
1966 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1971 #if defined(DEBUG_TLB)
1972 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1974 /* Check if we need to flush due to large pages. */
1975 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1976 #if defined(DEBUG_TLB)
1977 printf("tlb_flush_page: forced full flush ("
1978 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1979 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1984 /* must reset current TB so that interrupts cannot modify the
1985 links while we are modifying them */
1986 env
->current_tb
= NULL
;
1988 addr
&= TARGET_PAGE_MASK
;
1989 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1990 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1991 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1993 tlb_flush_jmp_cache(env
, addr
);
1996 /* update the TLBs so that writes to code in the virtual page 'addr'
1998 static void tlb_protect_code(ram_addr_t ram_addr
)
2000 cpu_physical_memory_reset_dirty(ram_addr
,
2001 ram_addr
+ TARGET_PAGE_SIZE
,
2005 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2006 tested for self modifying code */
2007 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2010 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2013 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2014 unsigned long start
, unsigned long length
)
2017 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
2018 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2019 if ((addr
- start
) < length
) {
2020 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2025 /* Note: start and end must be within the same ram block. */
2026 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2030 unsigned long length
, start1
;
2033 start
&= TARGET_PAGE_MASK
;
2034 end
= TARGET_PAGE_ALIGN(end
);
2036 length
= end
- start
;
2039 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2041 /* we modify the TLB cache so that the dirty bit will be set again
2042 when accessing the range */
2043 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2044 /* Check that we don't span multiple blocks - this breaks the
2045 address comparisons below. */
2046 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2047 != (end
- 1) - start
) {
2051 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2053 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2054 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2055 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2061 int cpu_physical_memory_set_dirty_tracking(int enable
)
2064 in_migration
= enable
;
2068 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2070 ram_addr_t ram_addr
;
2073 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
2074 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2075 + tlb_entry
->addend
);
2076 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2077 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2078 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2083 /* update the TLB according to the current state of the dirty bits */
2084 void cpu_tlb_update_dirty(CPUState
*env
)
2088 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2089 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2090 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2094 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2096 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2097 tlb_entry
->addr_write
= vaddr
;
2100 /* update the TLB corresponding to virtual page vaddr
2101 so that it is no longer dirty */
2102 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2107 vaddr
&= TARGET_PAGE_MASK
;
2108 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2109 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2110 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2113 /* Our TLB does not support large pages, so remember the area covered by
2114 large pages and trigger a full TLB flush if these are invalidated. */
2115 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2118 target_ulong mask
= ~(size
- 1);
2120 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2121 env
->tlb_flush_addr
= vaddr
& mask
;
2122 env
->tlb_flush_mask
= mask
;
2125 /* Extend the existing region to include the new page.
2126 This is a compromise between unnecessary flushes and the cost
2127 of maintaining a full variable size TLB. */
2128 mask
&= env
->tlb_flush_mask
;
2129 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2132 env
->tlb_flush_addr
&= mask
;
2133 env
->tlb_flush_mask
= mask
;
2136 static bool is_ram_rom(MemoryRegionSection
*s
)
2138 return memory_region_is_ram(s
->mr
);
2141 static bool is_romd(MemoryRegionSection
*s
)
2143 MemoryRegion
*mr
= s
->mr
;
2145 return mr
->rom_device
&& mr
->readable
;
2148 static bool is_ram_rom_romd(MemoryRegionSection
*s
)
2150 return is_ram_rom(s
) || is_romd(s
);
2153 /* Add a new TLB entry. At most one entry for a given virtual address
2154 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2155 supplied size is only used by tlb_flush_page. */
2156 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2157 target_phys_addr_t paddr
, int prot
,
2158 int mmu_idx
, target_ulong size
)
2160 MemoryRegionSection section
;
2162 target_ulong address
;
2163 target_ulong code_address
;
2164 unsigned long addend
;
2167 target_phys_addr_t iotlb
;
2169 assert(size
>= TARGET_PAGE_SIZE
);
2170 if (size
!= TARGET_PAGE_SIZE
) {
2171 tlb_add_large_page(env
, vaddr
, size
);
2173 section
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2174 #if defined(DEBUG_TLB)
2175 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2176 " prot=%x idx=%d pd=0x%08lx\n",
2177 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2181 if (!is_ram_rom_romd(§ion
)) {
2182 /* IO memory case (romd handled later) */
2183 address
|= TLB_MMIO
;
2185 if (is_ram_rom_romd(§ion
)) {
2186 addend
= (unsigned long)(memory_region_get_ram_ptr(section
.mr
)
2187 + section
.offset_within_region
);
2191 if (is_ram_rom(§ion
)) {
2193 iotlb
= (memory_region_get_ram_addr(section
.mr
)
2194 + section
.offset_within_region
) & TARGET_PAGE_MASK
;
2195 if (!section
.readonly
)
2196 iotlb
|= io_mem_notdirty
.ram_addr
;
2198 iotlb
|= io_mem_rom
.ram_addr
;
2200 /* IO handlers are currently passed a physical address.
2201 It would be nice to pass an offset from the base address
2202 of that region. This would avoid having to special case RAM,
2203 and avoid full address decoding in every device.
2204 We can't use the high bits of pd for this because
2205 IO_MEM_ROMD uses these as a ram address. */
2206 iotlb
= memory_region_get_ram_addr(section
.mr
) & ~TARGET_PAGE_MASK
;
2207 iotlb
+= section
.offset_within_region
;
2210 code_address
= address
;
2211 /* Make accesses to pages with watchpoints go via the
2212 watchpoint trap routines. */
2213 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2214 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2215 /* Avoid trapping reads of pages with a write breakpoint. */
2216 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2217 iotlb
= io_mem_watch
.ram_addr
+ paddr
;
2218 address
|= TLB_MMIO
;
2224 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2225 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2226 te
= &env
->tlb_table
[mmu_idx
][index
];
2227 te
->addend
= addend
- vaddr
;
2228 if (prot
& PAGE_READ
) {
2229 te
->addr_read
= address
;
2234 if (prot
& PAGE_EXEC
) {
2235 te
->addr_code
= code_address
;
2239 if (prot
& PAGE_WRITE
) {
2240 if ((memory_region_is_ram(section
.mr
) && section
.readonly
)
2241 || is_romd(§ion
)) {
2242 /* Write access calls the I/O callback. */
2243 te
->addr_write
= address
| TLB_MMIO
;
2244 } else if (memory_region_is_ram(section
.mr
)
2245 && !cpu_physical_memory_is_dirty(
2246 section
.mr
->ram_addr
2247 + section
.offset_within_region
)) {
2248 te
->addr_write
= address
| TLB_NOTDIRTY
;
2250 te
->addr_write
= address
;
2253 te
->addr_write
= -1;
2259 void tlb_flush(CPUState
*env
, int flush_global
)
2263 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2268 * Walks guest process memory "regions" one by one
2269 * and calls callback function 'fn' for each region.
2272 struct walk_memory_regions_data
2274 walk_memory_regions_fn fn
;
2276 unsigned long start
;
2280 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2281 abi_ulong end
, int new_prot
)
2283 if (data
->start
!= -1ul) {
2284 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2290 data
->start
= (new_prot
? end
: -1ul);
2291 data
->prot
= new_prot
;
2296 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2297 abi_ulong base
, int level
, void **lp
)
2303 return walk_memory_regions_end(data
, base
, 0);
2308 for (i
= 0; i
< L2_SIZE
; ++i
) {
2309 int prot
= pd
[i
].flags
;
2311 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2312 if (prot
!= data
->prot
) {
2313 rc
= walk_memory_regions_end(data
, pa
, prot
);
2321 for (i
= 0; i
< L2_SIZE
; ++i
) {
2322 pa
= base
| ((abi_ulong
)i
<<
2323 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2324 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2334 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2336 struct walk_memory_regions_data data
;
2344 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2345 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2346 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2352 return walk_memory_regions_end(&data
, 0, 0);
2355 static int dump_region(void *priv
, abi_ulong start
,
2356 abi_ulong end
, unsigned long prot
)
2358 FILE *f
= (FILE *)priv
;
2360 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2361 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2362 start
, end
, end
- start
,
2363 ((prot
& PAGE_READ
) ? 'r' : '-'),
2364 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2365 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2370 /* dump memory mappings */
2371 void page_dump(FILE *f
)
2373 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2374 "start", "end", "size", "prot");
2375 walk_memory_regions(f
, dump_region
);
2378 int page_get_flags(target_ulong address
)
2382 p
= page_find(address
>> TARGET_PAGE_BITS
);
2388 /* Modify the flags of a page and invalidate the code if necessary.
2389 The flag PAGE_WRITE_ORG is positioned automatically depending
2390 on PAGE_WRITE. The mmap_lock should already be held. */
2391 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2393 target_ulong addr
, len
;
2395 /* This function should never be called with addresses outside the
2396 guest address space. If this assert fires, it probably indicates
2397 a missing call to h2g_valid. */
2398 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2399 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2401 assert(start
< end
);
2403 start
= start
& TARGET_PAGE_MASK
;
2404 end
= TARGET_PAGE_ALIGN(end
);
2406 if (flags
& PAGE_WRITE
) {
2407 flags
|= PAGE_WRITE_ORG
;
2410 for (addr
= start
, len
= end
- start
;
2412 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2413 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2415 /* If the write protection bit is set, then we invalidate
2417 if (!(p
->flags
& PAGE_WRITE
) &&
2418 (flags
& PAGE_WRITE
) &&
2420 tb_invalidate_phys_page(addr
, 0, NULL
);
2426 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2432 /* This function should never be called with addresses outside the
2433 guest address space. If this assert fires, it probably indicates
2434 a missing call to h2g_valid. */
2435 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2436 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2442 if (start
+ len
- 1 < start
) {
2443 /* We've wrapped around. */
2447 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2448 start
= start
& TARGET_PAGE_MASK
;
2450 for (addr
= start
, len
= end
- start
;
2452 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2453 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2456 if( !(p
->flags
& PAGE_VALID
) )
2459 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2461 if (flags
& PAGE_WRITE
) {
2462 if (!(p
->flags
& PAGE_WRITE_ORG
))
2464 /* unprotect the page if it was put read-only because it
2465 contains translated code */
2466 if (!(p
->flags
& PAGE_WRITE
)) {
2467 if (!page_unprotect(addr
, 0, NULL
))
2476 /* called from signal handler: invalidate the code and unprotect the
2477 page. Return TRUE if the fault was successfully handled. */
2478 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2482 target_ulong host_start
, host_end
, addr
;
2484 /* Technically this isn't safe inside a signal handler. However we
2485 know this only ever happens in a synchronous SEGV handler, so in
2486 practice it seems to be ok. */
2489 p
= page_find(address
>> TARGET_PAGE_BITS
);
2495 /* if the page was really writable, then we change its
2496 protection back to writable */
2497 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2498 host_start
= address
& qemu_host_page_mask
;
2499 host_end
= host_start
+ qemu_host_page_size
;
2502 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2503 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2504 p
->flags
|= PAGE_WRITE
;
2507 /* and since the content will be modified, we must invalidate
2508 the corresponding translated code. */
2509 tb_invalidate_phys_page(addr
, pc
, puc
);
2510 #ifdef DEBUG_TB_CHECK
2511 tb_invalidate_check(addr
);
2514 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2524 static inline void tlb_set_dirty(CPUState
*env
,
2525 unsigned long addr
, target_ulong vaddr
)
2528 #endif /* defined(CONFIG_USER_ONLY) */
2530 #if !defined(CONFIG_USER_ONLY)
2532 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2533 typedef struct subpage_t
{
2535 target_phys_addr_t base
;
2536 uint16_t sub_section
[TARGET_PAGE_SIZE
];
2539 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2541 static subpage_t
*subpage_init(target_phys_addr_t base
);
2542 static void destroy_page_desc(uint16_t section_index
)
2544 MemoryRegionSection
*section
= &phys_sections
[section_index
];
2545 MemoryRegion
*mr
= section
->mr
;
2548 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
2549 memory_region_destroy(&subpage
->iomem
);
2554 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
2559 if (lp
->u
.node
== PHYS_MAP_NODE_NIL
) {
2563 p
= phys_map_nodes
[lp
->u
.node
];
2564 for (i
= 0; i
< L2_SIZE
; ++i
) {
2566 destroy_l2_mapping(&p
[i
], level
- 1);
2568 destroy_page_desc(p
[i
].u
.leaf
);
2571 lp
->u
.node
= PHYS_MAP_NODE_NIL
;
2574 static void destroy_all_mappings(void)
2576 destroy_l2_mapping(&phys_map
, P_L2_LEVELS
- 1);
2577 phys_map_nodes_reset();
2580 static uint16_t phys_section_add(MemoryRegionSection
*section
)
2582 if (phys_sections_nb
== phys_sections_nb_alloc
) {
2583 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
2584 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
2585 phys_sections_nb_alloc
);
2587 phys_sections
[phys_sections_nb
] = *section
;
2588 return phys_sections_nb
++;
2591 static void phys_sections_clear(void)
2593 phys_sections_nb
= 0;
2596 /* register physical memory.
2597 For RAM, 'size' must be a multiple of the target page size.
2598 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2599 io memory page. The address used when calling the IO function is
2600 the offset from the start of the region, plus region_offset. Both
2601 start_addr and region_offset are rounded down to a page boundary
2602 before calculating this offset. This should not be a problem unless
2603 the low bits of start_addr and region_offset differ. */
2604 static void register_subpage(MemoryRegionSection
*section
)
2607 target_phys_addr_t base
= section
->offset_within_address_space
2609 MemoryRegionSection existing
= phys_page_find(base
>> TARGET_PAGE_BITS
);
2610 MemoryRegionSection subsection
= {
2611 .offset_within_address_space
= base
,
2612 .size
= TARGET_PAGE_SIZE
,
2615 target_phys_addr_t start
, end
;
2617 assert(existing
.mr
->subpage
|| existing
.mr
== &io_mem_unassigned
);
2619 if (!(existing
.mr
->subpage
)) {
2620 subpage
= subpage_init(base
);
2621 subsection
.mr
= &subpage
->iomem
;
2622 ptr
= phys_page_find_alloc(base
>> TARGET_PAGE_BITS
, 1);
2623 *ptr
= phys_section_add(&subsection
);
2625 subpage
= container_of(existing
.mr
, subpage_t
, iomem
);
2627 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
2628 end
= start
+ section
->size
;
2629 subpage_register(subpage
, start
, end
, phys_section_add(section
));
2633 static void register_multipage(MemoryRegionSection
*section
)
2635 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2636 ram_addr_t size
= section
->size
;
2637 target_phys_addr_t addr
, end_addr
;
2638 uint16_t section_index
= phys_section_add(section
);
2642 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2646 uint16_t *p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2647 assert(*p
== phys_section_unassigned
);
2649 addr
+= TARGET_PAGE_SIZE
;
2650 } while (addr
!= end_addr
);
2653 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2656 MemoryRegionSection now
= *section
, remain
= *section
;
2658 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
2659 || (now
.size
< TARGET_PAGE_SIZE
)) {
2660 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
2661 - now
.offset_within_address_space
,
2663 register_subpage(&now
);
2664 remain
.size
-= now
.size
;
2665 remain
.offset_within_address_space
+= now
.size
;
2666 remain
.offset_within_region
+= now
.size
;
2669 now
.size
&= TARGET_PAGE_MASK
;
2671 register_multipage(&now
);
2672 remain
.size
-= now
.size
;
2673 remain
.offset_within_address_space
+= now
.size
;
2674 remain
.offset_within_region
+= now
.size
;
2678 register_subpage(&now
);
2683 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2686 kvm_coalesce_mmio_region(addr
, size
);
2689 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2692 kvm_uncoalesce_mmio_region(addr
, size
);
2695 void qemu_flush_coalesced_mmio_buffer(void)
2698 kvm_flush_coalesced_mmio_buffer();
2701 #if defined(__linux__) && !defined(TARGET_S390X)
2703 #include <sys/vfs.h>
2705 #define HUGETLBFS_MAGIC 0x958458f6
2707 static long gethugepagesize(const char *path
)
2713 ret
= statfs(path
, &fs
);
2714 } while (ret
!= 0 && errno
== EINTR
);
2721 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2722 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2727 static void *file_ram_alloc(RAMBlock
*block
,
2737 unsigned long hpagesize
;
2739 hpagesize
= gethugepagesize(path
);
2744 if (memory
< hpagesize
) {
2748 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2749 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2753 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2757 fd
= mkstemp(filename
);
2759 perror("unable to create backing store for hugepages");
2766 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2769 * ftruncate is not supported by hugetlbfs in older
2770 * hosts, so don't bother bailing out on errors.
2771 * If anything goes wrong with it under other filesystems,
2774 if (ftruncate(fd
, memory
))
2775 perror("ftruncate");
2778 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2779 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2780 * to sidestep this quirk.
2782 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2783 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2785 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2787 if (area
== MAP_FAILED
) {
2788 perror("file_ram_alloc: can't mmap RAM pages");
2797 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2799 RAMBlock
*block
, *next_block
;
2800 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2802 if (QLIST_EMPTY(&ram_list
.blocks
))
2805 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2806 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2808 end
= block
->offset
+ block
->length
;
2810 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2811 if (next_block
->offset
>= end
) {
2812 next
= MIN(next
, next_block
->offset
);
2815 if (next
- end
>= size
&& next
- end
< mingap
) {
2817 mingap
= next
- end
;
2821 if (offset
== RAM_ADDR_MAX
) {
2822 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2830 static ram_addr_t
last_ram_offset(void)
2833 ram_addr_t last
= 0;
2835 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2836 last
= MAX(last
, block
->offset
+ block
->length
);
2841 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2843 RAMBlock
*new_block
, *block
;
2846 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2847 if (block
->offset
== addr
) {
2853 assert(!new_block
->idstr
[0]);
2855 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2856 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2858 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2862 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2864 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2865 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2866 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2873 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2876 RAMBlock
*new_block
;
2878 size
= TARGET_PAGE_ALIGN(size
);
2879 new_block
= g_malloc0(sizeof(*new_block
));
2882 new_block
->offset
= find_ram_offset(size
);
2884 new_block
->host
= host
;
2885 new_block
->flags
|= RAM_PREALLOC_MASK
;
2888 #if defined (__linux__) && !defined(TARGET_S390X)
2889 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2890 if (!new_block
->host
) {
2891 new_block
->host
= qemu_vmalloc(size
);
2892 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2895 fprintf(stderr
, "-mem-path option unsupported\n");
2899 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2900 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2901 an system defined value, which is at least 256GB. Larger systems
2902 have larger values. We put the guest between the end of data
2903 segment (system break) and this value. We use 32GB as a base to
2904 have enough room for the system break to grow. */
2905 new_block
->host
= mmap((void*)0x800000000, size
,
2906 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2907 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2908 if (new_block
->host
== MAP_FAILED
) {
2909 fprintf(stderr
, "Allocating RAM failed\n");
2913 if (xen_enabled()) {
2914 xen_ram_alloc(new_block
->offset
, size
, mr
);
2916 new_block
->host
= qemu_vmalloc(size
);
2919 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2922 new_block
->length
= size
;
2924 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2926 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2927 last_ram_offset() >> TARGET_PAGE_BITS
);
2928 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2929 0xff, size
>> TARGET_PAGE_BITS
);
2932 kvm_setup_guest_memory(new_block
->host
, size
);
2934 return new_block
->offset
;
2937 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2939 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2942 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2946 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2947 if (addr
== block
->offset
) {
2948 QLIST_REMOVE(block
, next
);
2955 void qemu_ram_free(ram_addr_t addr
)
2959 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2960 if (addr
== block
->offset
) {
2961 QLIST_REMOVE(block
, next
);
2962 if (block
->flags
& RAM_PREALLOC_MASK
) {
2964 } else if (mem_path
) {
2965 #if defined (__linux__) && !defined(TARGET_S390X)
2967 munmap(block
->host
, block
->length
);
2970 qemu_vfree(block
->host
);
2976 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2977 munmap(block
->host
, block
->length
);
2979 if (xen_enabled()) {
2980 xen_invalidate_map_cache_entry(block
->host
);
2982 qemu_vfree(block
->host
);
2994 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
3001 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3002 offset
= addr
- block
->offset
;
3003 if (offset
< block
->length
) {
3004 vaddr
= block
->host
+ offset
;
3005 if (block
->flags
& RAM_PREALLOC_MASK
) {
3009 munmap(vaddr
, length
);
3011 #if defined(__linux__) && !defined(TARGET_S390X)
3014 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
3017 flags
|= MAP_PRIVATE
;
3019 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3020 flags
, block
->fd
, offset
);
3022 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3023 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3030 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3031 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
3032 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
3035 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3036 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3040 if (area
!= vaddr
) {
3041 fprintf(stderr
, "Could not remap addr: "
3042 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
3046 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3052 #endif /* !_WIN32 */
3054 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3055 With the exception of the softmmu code in this file, this should
3056 only be used for local memory (e.g. video ram) that the device owns,
3057 and knows it isn't going to access beyond the end of the block.
3059 It should not be used for general purpose DMA.
3060 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3062 void *qemu_get_ram_ptr(ram_addr_t addr
)
3066 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3067 if (addr
- block
->offset
< block
->length
) {
3068 /* Move this entry to to start of the list. */
3069 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3070 QLIST_REMOVE(block
, next
);
3071 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3073 if (xen_enabled()) {
3074 /* We need to check if the requested address is in the RAM
3075 * because we don't want to map the entire memory in QEMU.
3076 * In that case just map until the end of the page.
3078 if (block
->offset
== 0) {
3079 return xen_map_cache(addr
, 0, 0);
3080 } else if (block
->host
== NULL
) {
3082 xen_map_cache(block
->offset
, block
->length
, 1);
3085 return block
->host
+ (addr
- block
->offset
);
3089 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3095 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3096 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3098 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3102 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3103 if (addr
- block
->offset
< block
->length
) {
3104 if (xen_enabled()) {
3105 /* We need to check if the requested address is in the RAM
3106 * because we don't want to map the entire memory in QEMU.
3107 * In that case just map until the end of the page.
3109 if (block
->offset
== 0) {
3110 return xen_map_cache(addr
, 0, 0);
3111 } else if (block
->host
== NULL
) {
3113 xen_map_cache(block
->offset
, block
->length
, 1);
3116 return block
->host
+ (addr
- block
->offset
);
3120 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3126 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3127 * but takes a size argument */
3128 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3133 if (xen_enabled()) {
3134 return xen_map_cache(addr
, *size
, 1);
3138 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3139 if (addr
- block
->offset
< block
->length
) {
3140 if (addr
- block
->offset
+ *size
> block
->length
)
3141 *size
= block
->length
- addr
+ block
->offset
;
3142 return block
->host
+ (addr
- block
->offset
);
3146 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3151 void qemu_put_ram_ptr(void *addr
)
3153 trace_qemu_put_ram_ptr(addr
);
3156 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3159 uint8_t *host
= ptr
;
3161 if (xen_enabled()) {
3162 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3166 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3167 /* This case append when the block is not mapped. */
3168 if (block
->host
== NULL
) {
3171 if (host
- block
->host
< block
->length
) {
3172 *ram_addr
= block
->offset
+ (host
- block
->host
);
3180 /* Some of the softmmu routines need to translate from a host pointer
3181 (typically a TLB entry) back to a ram offset. */
3182 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3184 ram_addr_t ram_addr
;
3186 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3187 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3193 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
3196 #ifdef DEBUG_UNASSIGNED
3197 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3199 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3200 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
3205 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
3206 uint64_t val
, unsigned size
)
3208 #ifdef DEBUG_UNASSIGNED
3209 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
3211 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3212 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
3216 static const MemoryRegionOps unassigned_mem_ops
= {
3217 .read
= unassigned_mem_read
,
3218 .write
= unassigned_mem_write
,
3219 .endianness
= DEVICE_NATIVE_ENDIAN
,
3222 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
3228 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
3229 uint64_t value
, unsigned size
)
3234 static const MemoryRegionOps error_mem_ops
= {
3235 .read
= error_mem_read
,
3236 .write
= error_mem_write
,
3237 .endianness
= DEVICE_NATIVE_ENDIAN
,
3240 static const MemoryRegionOps rom_mem_ops
= {
3241 .read
= error_mem_read
,
3242 .write
= unassigned_mem_write
,
3243 .endianness
= DEVICE_NATIVE_ENDIAN
,
3246 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
3247 uint64_t val
, unsigned size
)
3250 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3251 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3252 #if !defined(CONFIG_USER_ONLY)
3253 tb_invalidate_phys_page_fast(ram_addr
, size
);
3254 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3259 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3262 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3265 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3270 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3271 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3272 /* we remove the notdirty callback only if the code has been
3274 if (dirty_flags
== 0xff)
3275 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3278 static const MemoryRegionOps notdirty_mem_ops
= {
3279 .read
= error_mem_read
,
3280 .write
= notdirty_mem_write
,
3281 .endianness
= DEVICE_NATIVE_ENDIAN
,
3284 /* Generate a debug exception if a watchpoint has been hit. */
3285 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3287 CPUState
*env
= cpu_single_env
;
3288 target_ulong pc
, cs_base
;
3289 TranslationBlock
*tb
;
3294 if (env
->watchpoint_hit
) {
3295 /* We re-entered the check after replacing the TB. Now raise
3296 * the debug interrupt so that is will trigger after the
3297 * current instruction. */
3298 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3301 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3302 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3303 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3304 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3305 wp
->flags
|= BP_WATCHPOINT_HIT
;
3306 if (!env
->watchpoint_hit
) {
3307 env
->watchpoint_hit
= wp
;
3308 tb
= tb_find_pc(env
->mem_io_pc
);
3310 cpu_abort(env
, "check_watchpoint: could not find TB for "
3311 "pc=%p", (void *)env
->mem_io_pc
);
3313 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3314 tb_phys_invalidate(tb
, -1);
3315 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3316 env
->exception_index
= EXCP_DEBUG
;
3318 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3319 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3321 cpu_resume_from_signal(env
, NULL
);
3324 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3329 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3330 so these check for a hit then pass through to the normal out-of-line
3332 static uint64_t watch_mem_read(void *opaque
, target_phys_addr_t addr
,
3335 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
3337 case 1: return ldub_phys(addr
);
3338 case 2: return lduw_phys(addr
);
3339 case 4: return ldl_phys(addr
);
3344 static void watch_mem_write(void *opaque
, target_phys_addr_t addr
,
3345 uint64_t val
, unsigned size
)
3347 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
3349 case 1: stb_phys(addr
, val
);
3350 case 2: stw_phys(addr
, val
);
3351 case 4: stl_phys(addr
, val
);
3356 static const MemoryRegionOps watch_mem_ops
= {
3357 .read
= watch_mem_read
,
3358 .write
= watch_mem_write
,
3359 .endianness
= DEVICE_NATIVE_ENDIAN
,
3362 static uint64_t subpage_read(void *opaque
, target_phys_addr_t addr
,
3365 subpage_t
*mmio
= opaque
;
3366 unsigned int idx
= SUBPAGE_IDX(addr
);
3367 MemoryRegionSection
*section
;
3368 #if defined(DEBUG_SUBPAGE)
3369 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3370 mmio
, len
, addr
, idx
);
3373 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3375 addr
-= section
->offset_within_address_space
;
3376 addr
+= section
->offset_within_region
;
3377 return io_mem_read(section
->mr
->ram_addr
, addr
, len
);
3380 static void subpage_write(void *opaque
, target_phys_addr_t addr
,
3381 uint64_t value
, unsigned len
)
3383 subpage_t
*mmio
= opaque
;
3384 unsigned int idx
= SUBPAGE_IDX(addr
);
3385 MemoryRegionSection
*section
;
3386 #if defined(DEBUG_SUBPAGE)
3387 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3388 " idx %d value %"PRIx64
"\n",
3389 __func__
, mmio
, len
, addr
, idx
, value
);
3392 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3394 addr
-= section
->offset_within_address_space
;
3395 addr
+= section
->offset_within_region
;
3396 io_mem_write(section
->mr
->ram_addr
, addr
, value
, len
);
3399 static const MemoryRegionOps subpage_ops
= {
3400 .read
= subpage_read
,
3401 .write
= subpage_write
,
3402 .endianness
= DEVICE_NATIVE_ENDIAN
,
3405 static uint64_t subpage_ram_read(void *opaque
, target_phys_addr_t addr
,
3408 ram_addr_t raddr
= addr
;
3409 void *ptr
= qemu_get_ram_ptr(raddr
);
3411 case 1: return ldub_p(ptr
);
3412 case 2: return lduw_p(ptr
);
3413 case 4: return ldl_p(ptr
);
3418 static void subpage_ram_write(void *opaque
, target_phys_addr_t addr
,
3419 uint64_t value
, unsigned size
)
3421 ram_addr_t raddr
= addr
;
3422 void *ptr
= qemu_get_ram_ptr(raddr
);
3424 case 1: return stb_p(ptr
, value
);
3425 case 2: return stw_p(ptr
, value
);
3426 case 4: return stl_p(ptr
, value
);
3431 static const MemoryRegionOps subpage_ram_ops
= {
3432 .read
= subpage_ram_read
,
3433 .write
= subpage_ram_write
,
3434 .endianness
= DEVICE_NATIVE_ENDIAN
,
3437 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3442 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3444 idx
= SUBPAGE_IDX(start
);
3445 eidx
= SUBPAGE_IDX(end
);
3446 #if defined(DEBUG_SUBPAGE)
3447 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3448 mmio
, start
, end
, idx
, eidx
, memory
);
3450 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
3451 MemoryRegionSection new_section
= phys_sections
[section
];
3452 new_section
.mr
= &io_mem_subpage_ram
;
3453 section
= phys_section_add(&new_section
);
3455 for (; idx
<= eidx
; idx
++) {
3456 mmio
->sub_section
[idx
] = section
;
3462 static subpage_t
*subpage_init(target_phys_addr_t base
)
3466 mmio
= g_malloc0(sizeof(subpage_t
));
3469 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3470 "subpage", TARGET_PAGE_SIZE
);
3471 mmio
->iomem
.subpage
= true;
3472 #if defined(DEBUG_SUBPAGE)
3473 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3474 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3476 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
3481 static int get_free_io_mem_idx(void)
3485 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3486 if (!io_mem_used
[i
]) {
3490 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3494 /* mem_read and mem_write are arrays of functions containing the
3495 function to access byte (index 0), word (index 1) and dword (index
3496 2). Functions can be omitted with a NULL function pointer.
3497 If io_index is non zero, the corresponding io zone is
3498 modified. If it is zero, a new io zone is allocated. The return
3499 value can be used with cpu_register_physical_memory(). (-1) is
3500 returned if error. */
3501 static int cpu_register_io_memory_fixed(int io_index
, MemoryRegion
*mr
)
3503 if (io_index
<= 0) {
3504 io_index
= get_free_io_mem_idx();
3508 if (io_index
>= IO_MEM_NB_ENTRIES
)
3512 io_mem_region
[io_index
] = mr
;
3517 int cpu_register_io_memory(MemoryRegion
*mr
)
3519 return cpu_register_io_memory_fixed(0, mr
);
3522 void cpu_unregister_io_memory(int io_index
)
3524 io_mem_region
[io_index
] = NULL
;
3525 io_mem_used
[io_index
] = 0;
3528 static uint16_t dummy_section(MemoryRegion
*mr
)
3530 MemoryRegionSection section
= {
3532 .offset_within_address_space
= 0,
3533 .offset_within_region
= 0,
3537 return phys_section_add(§ion
);
3540 static void io_mem_init(void)
3544 /* Must be first: */
3545 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3546 assert(io_mem_ram
.ram_addr
== 0);
3547 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3548 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3549 "unassigned", UINT64_MAX
);
3550 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3551 "notdirty", UINT64_MAX
);
3552 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3553 "subpage-ram", UINT64_MAX
);
3557 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3558 "watch", UINT64_MAX
);
3561 static void core_begin(MemoryListener
*listener
)
3563 destroy_all_mappings();
3564 phys_sections_clear();
3565 phys_map
.u
.node
= PHYS_MAP_NODE_NIL
;
3566 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
3569 static void core_commit(MemoryListener
*listener
)
3573 /* since each CPU stores ram addresses in its TLB cache, we must
3574 reset the modified entries */
3576 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
3581 static void core_region_add(MemoryListener
*listener
,
3582 MemoryRegionSection
*section
)
3584 cpu_register_physical_memory_log(section
, section
->readonly
);
3587 static void core_region_del(MemoryListener
*listener
,
3588 MemoryRegionSection
*section
)
3592 static void core_region_nop(MemoryListener
*listener
,
3593 MemoryRegionSection
*section
)
3595 cpu_register_physical_memory_log(section
, section
->readonly
);
3598 static void core_log_start(MemoryListener
*listener
,
3599 MemoryRegionSection
*section
)
3603 static void core_log_stop(MemoryListener
*listener
,
3604 MemoryRegionSection
*section
)
3608 static void core_log_sync(MemoryListener
*listener
,
3609 MemoryRegionSection
*section
)
3613 static void core_log_global_start(MemoryListener
*listener
)
3615 cpu_physical_memory_set_dirty_tracking(1);
3618 static void core_log_global_stop(MemoryListener
*listener
)
3620 cpu_physical_memory_set_dirty_tracking(0);
3623 static void core_eventfd_add(MemoryListener
*listener
,
3624 MemoryRegionSection
*section
,
3625 bool match_data
, uint64_t data
, int fd
)
3629 static void core_eventfd_del(MemoryListener
*listener
,
3630 MemoryRegionSection
*section
,
3631 bool match_data
, uint64_t data
, int fd
)
3635 static void io_begin(MemoryListener
*listener
)
3639 static void io_commit(MemoryListener
*listener
)
3643 static void io_region_add(MemoryListener
*listener
,
3644 MemoryRegionSection
*section
)
3646 iorange_init(§ion
->mr
->iorange
, &memory_region_iorange_ops
,
3647 section
->offset_within_address_space
, section
->size
);
3648 ioport_register(§ion
->mr
->iorange
);
3651 static void io_region_del(MemoryListener
*listener
,
3652 MemoryRegionSection
*section
)
3654 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
3657 static void io_region_nop(MemoryListener
*listener
,
3658 MemoryRegionSection
*section
)
3662 static void io_log_start(MemoryListener
*listener
,
3663 MemoryRegionSection
*section
)
3667 static void io_log_stop(MemoryListener
*listener
,
3668 MemoryRegionSection
*section
)
3672 static void io_log_sync(MemoryListener
*listener
,
3673 MemoryRegionSection
*section
)
3677 static void io_log_global_start(MemoryListener
*listener
)
3681 static void io_log_global_stop(MemoryListener
*listener
)
3685 static void io_eventfd_add(MemoryListener
*listener
,
3686 MemoryRegionSection
*section
,
3687 bool match_data
, uint64_t data
, int fd
)
3691 static void io_eventfd_del(MemoryListener
*listener
,
3692 MemoryRegionSection
*section
,
3693 bool match_data
, uint64_t data
, int fd
)
3697 static MemoryListener core_memory_listener
= {
3698 .begin
= core_begin
,
3699 .commit
= core_commit
,
3700 .region_add
= core_region_add
,
3701 .region_del
= core_region_del
,
3702 .region_nop
= core_region_nop
,
3703 .log_start
= core_log_start
,
3704 .log_stop
= core_log_stop
,
3705 .log_sync
= core_log_sync
,
3706 .log_global_start
= core_log_global_start
,
3707 .log_global_stop
= core_log_global_stop
,
3708 .eventfd_add
= core_eventfd_add
,
3709 .eventfd_del
= core_eventfd_del
,
3713 static MemoryListener io_memory_listener
= {
3715 .commit
= io_commit
,
3716 .region_add
= io_region_add
,
3717 .region_del
= io_region_del
,
3718 .region_nop
= io_region_nop
,
3719 .log_start
= io_log_start
,
3720 .log_stop
= io_log_stop
,
3721 .log_sync
= io_log_sync
,
3722 .log_global_start
= io_log_global_start
,
3723 .log_global_stop
= io_log_global_stop
,
3724 .eventfd_add
= io_eventfd_add
,
3725 .eventfd_del
= io_eventfd_del
,
3729 static void memory_map_init(void)
3731 system_memory
= g_malloc(sizeof(*system_memory
));
3732 memory_region_init(system_memory
, "system", INT64_MAX
);
3733 set_system_memory_map(system_memory
);
3735 system_io
= g_malloc(sizeof(*system_io
));
3736 memory_region_init(system_io
, "io", 65536);
3737 set_system_io_map(system_io
);
3739 memory_listener_register(&core_memory_listener
, system_memory
);
3740 memory_listener_register(&io_memory_listener
, system_io
);
3743 MemoryRegion
*get_system_memory(void)
3745 return system_memory
;
3748 MemoryRegion
*get_system_io(void)
3753 #endif /* !defined(CONFIG_USER_ONLY) */
3755 /* physical memory access (slow version, mainly for debug) */
3756 #if defined(CONFIG_USER_ONLY)
3757 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3758 uint8_t *buf
, int len
, int is_write
)
3765 page
= addr
& TARGET_PAGE_MASK
;
3766 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3769 flags
= page_get_flags(page
);
3770 if (!(flags
& PAGE_VALID
))
3773 if (!(flags
& PAGE_WRITE
))
3775 /* XXX: this code should not depend on lock_user */
3776 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3779 unlock_user(p
, addr
, l
);
3781 if (!(flags
& PAGE_READ
))
3783 /* XXX: this code should not depend on lock_user */
3784 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3787 unlock_user(p
, addr
, 0);
3797 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3798 int len
, int is_write
)
3803 target_phys_addr_t page
;
3804 MemoryRegionSection section
;
3807 page
= addr
& TARGET_PAGE_MASK
;
3808 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3811 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3814 if (!memory_region_is_ram(section
.mr
)) {
3815 target_phys_addr_t addr1
;
3816 io_index
= memory_region_get_ram_addr(section
.mr
)
3817 & (IO_MEM_NB_ENTRIES
- 1);
3818 addr1
= (addr
& ~TARGET_PAGE_MASK
)
3819 + section
.offset_within_region
;
3820 /* XXX: could force cpu_single_env to NULL to avoid
3822 if (l
>= 4 && ((addr1
& 3) == 0)) {
3823 /* 32 bit write access */
3825 io_mem_write(io_index
, addr1
, val
, 4);
3827 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3828 /* 16 bit write access */
3830 io_mem_write(io_index
, addr1
, val
, 2);
3833 /* 8 bit write access */
3835 io_mem_write(io_index
, addr1
, val
, 1);
3838 } else if (!section
.readonly
) {
3840 addr1
= (memory_region_get_ram_addr(section
.mr
)
3841 + section
.offset_within_region
)
3842 | (addr
& ~TARGET_PAGE_MASK
);
3844 ptr
= qemu_get_ram_ptr(addr1
);
3845 memcpy(ptr
, buf
, l
);
3846 if (!cpu_physical_memory_is_dirty(addr1
)) {
3847 /* invalidate code */
3848 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3850 cpu_physical_memory_set_dirty_flags(
3851 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3853 qemu_put_ram_ptr(ptr
);
3856 if (!is_ram_rom_romd(§ion
)) {
3857 target_phys_addr_t addr1
;
3859 io_index
= memory_region_get_ram_addr(section
.mr
)
3860 & (IO_MEM_NB_ENTRIES
- 1);
3861 addr1
= (addr
& ~TARGET_PAGE_MASK
)
3862 + section
.offset_within_region
;
3863 if (l
>= 4 && ((addr1
& 3) == 0)) {
3864 /* 32 bit read access */
3865 val
= io_mem_read(io_index
, addr1
, 4);
3868 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3869 /* 16 bit read access */
3870 val
= io_mem_read(io_index
, addr1
, 2);
3874 /* 8 bit read access */
3875 val
= io_mem_read(io_index
, addr1
, 1);
3881 ptr
= qemu_get_ram_ptr(section
.mr
->ram_addr
3882 + section
.offset_within_region
);
3883 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3884 qemu_put_ram_ptr(ptr
);
3893 /* used for ROM loading : can write in RAM and ROM */
3894 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3895 const uint8_t *buf
, int len
)
3899 target_phys_addr_t page
;
3900 MemoryRegionSection section
;
3903 page
= addr
& TARGET_PAGE_MASK
;
3904 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3907 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3909 if (!is_ram_rom_romd(§ion
)) {
3912 unsigned long addr1
;
3913 addr1
= (memory_region_get_ram_addr(section
.mr
)
3914 + section
.offset_within_region
)
3915 + (addr
& ~TARGET_PAGE_MASK
);
3917 ptr
= qemu_get_ram_ptr(addr1
);
3918 memcpy(ptr
, buf
, l
);
3919 qemu_put_ram_ptr(ptr
);
3929 target_phys_addr_t addr
;
3930 target_phys_addr_t len
;
3933 static BounceBuffer bounce
;
3935 typedef struct MapClient
{
3937 void (*callback
)(void *opaque
);
3938 QLIST_ENTRY(MapClient
) link
;
3941 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3942 = QLIST_HEAD_INITIALIZER(map_client_list
);
3944 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3946 MapClient
*client
= g_malloc(sizeof(*client
));
3948 client
->opaque
= opaque
;
3949 client
->callback
= callback
;
3950 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3954 void cpu_unregister_map_client(void *_client
)
3956 MapClient
*client
= (MapClient
*)_client
;
3958 QLIST_REMOVE(client
, link
);
3962 static void cpu_notify_map_clients(void)
3966 while (!QLIST_EMPTY(&map_client_list
)) {
3967 client
= QLIST_FIRST(&map_client_list
);
3968 client
->callback(client
->opaque
);
3969 cpu_unregister_map_client(client
);
3973 /* Map a physical memory region into a host virtual address.
3974 * May map a subset of the requested range, given by and returned in *plen.
3975 * May return NULL if resources needed to perform the mapping are exhausted.
3976 * Use only for reads OR writes - not for read-modify-write operations.
3977 * Use cpu_register_map_client() to know when retrying the map operation is
3978 * likely to succeed.
3980 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3981 target_phys_addr_t
*plen
,
3984 target_phys_addr_t len
= *plen
;
3985 target_phys_addr_t todo
= 0;
3987 target_phys_addr_t page
;
3988 MemoryRegionSection section
;
3989 ram_addr_t raddr
= RAM_ADDR_MAX
;
3994 page
= addr
& TARGET_PAGE_MASK
;
3995 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3998 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
4000 if (!(memory_region_is_ram(section
.mr
) && !section
.readonly
)) {
4001 if (todo
|| bounce
.buffer
) {
4004 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
4008 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
4012 return bounce
.buffer
;
4015 raddr
= memory_region_get_ram_addr(section
.mr
)
4016 + section
.offset_within_region
4017 + (addr
& ~TARGET_PAGE_MASK
);
4025 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
4030 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4031 * Will also mark the memory as dirty if is_write == 1. access_len gives
4032 * the amount of memory that was actually read or written by the caller.
4034 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
4035 int is_write
, target_phys_addr_t access_len
)
4037 if (buffer
!= bounce
.buffer
) {
4039 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
4040 while (access_len
) {
4042 l
= TARGET_PAGE_SIZE
;
4045 if (!cpu_physical_memory_is_dirty(addr1
)) {
4046 /* invalidate code */
4047 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
4049 cpu_physical_memory_set_dirty_flags(
4050 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4056 if (xen_enabled()) {
4057 xen_invalidate_map_cache_entry(buffer
);
4062 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
4064 qemu_vfree(bounce
.buffer
);
4065 bounce
.buffer
= NULL
;
4066 cpu_notify_map_clients();
4069 /* warning: addr must be aligned */
4070 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
4071 enum device_endian endian
)
4076 MemoryRegionSection section
;
4078 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4080 if (!is_ram_rom_romd(§ion
)) {
4082 io_index
= memory_region_get_ram_addr(section
.mr
)
4083 & (IO_MEM_NB_ENTRIES
- 1);
4084 addr
= (addr
& ~TARGET_PAGE_MASK
) + section
.offset_within_region
;
4085 val
= io_mem_read(io_index
, addr
, 4);
4086 #if defined(TARGET_WORDS_BIGENDIAN)
4087 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4091 if (endian
== DEVICE_BIG_ENDIAN
) {
4097 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
.mr
)
4099 + section
.offset_within_region
) +
4100 (addr
& ~TARGET_PAGE_MASK
);
4102 case DEVICE_LITTLE_ENDIAN
:
4103 val
= ldl_le_p(ptr
);
4105 case DEVICE_BIG_ENDIAN
:
4106 val
= ldl_be_p(ptr
);
4116 uint32_t ldl_phys(target_phys_addr_t addr
)
4118 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4121 uint32_t ldl_le_phys(target_phys_addr_t addr
)
4123 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4126 uint32_t ldl_be_phys(target_phys_addr_t addr
)
4128 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4131 /* warning: addr must be aligned */
4132 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
4133 enum device_endian endian
)
4138 MemoryRegionSection section
;
4140 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4142 if (!is_ram_rom_romd(§ion
)) {
4144 io_index
= memory_region_get_ram_addr(section
.mr
)
4145 & (IO_MEM_NB_ENTRIES
- 1);
4146 addr
= (addr
& ~TARGET_PAGE_MASK
) + section
.offset_within_region
;
4148 /* XXX This is broken when device endian != cpu endian.
4149 Fix and add "endian" variable check */
4150 #ifdef TARGET_WORDS_BIGENDIAN
4151 val
= io_mem_read(io_index
, addr
, 4) << 32;
4152 val
|= io_mem_read(io_index
, addr
+ 4, 4);
4154 val
= io_mem_read(io_index
, addr
, 4);
4155 val
|= io_mem_read(io_index
, addr
+ 4, 4) << 32;
4159 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
.mr
)
4161 + section
.offset_within_region
)
4162 + (addr
& ~TARGET_PAGE_MASK
);
4164 case DEVICE_LITTLE_ENDIAN
:
4165 val
= ldq_le_p(ptr
);
4167 case DEVICE_BIG_ENDIAN
:
4168 val
= ldq_be_p(ptr
);
4178 uint64_t ldq_phys(target_phys_addr_t addr
)
4180 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4183 uint64_t ldq_le_phys(target_phys_addr_t addr
)
4185 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4188 uint64_t ldq_be_phys(target_phys_addr_t addr
)
4190 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4194 uint32_t ldub_phys(target_phys_addr_t addr
)
4197 cpu_physical_memory_read(addr
, &val
, 1);
4201 /* warning: addr must be aligned */
4202 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
4203 enum device_endian endian
)
4208 MemoryRegionSection section
;
4210 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4212 if (!is_ram_rom_romd(§ion
)) {
4214 io_index
= memory_region_get_ram_addr(section
.mr
)
4215 & (IO_MEM_NB_ENTRIES
- 1);
4216 addr
= (addr
& ~TARGET_PAGE_MASK
) + section
.offset_within_region
;
4217 val
= io_mem_read(io_index
, addr
, 2);
4218 #if defined(TARGET_WORDS_BIGENDIAN)
4219 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4223 if (endian
== DEVICE_BIG_ENDIAN
) {
4229 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
.mr
)
4231 + section
.offset_within_region
)
4232 + (addr
& ~TARGET_PAGE_MASK
);
4234 case DEVICE_LITTLE_ENDIAN
:
4235 val
= lduw_le_p(ptr
);
4237 case DEVICE_BIG_ENDIAN
:
4238 val
= lduw_be_p(ptr
);
4248 uint32_t lduw_phys(target_phys_addr_t addr
)
4250 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4253 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4255 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4258 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4260 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4263 /* warning: addr must be aligned. The ram page is not masked as dirty
4264 and the code inside is not invalidated. It is useful if the dirty
4265 bits are used to track modified PTEs */
4266 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4270 MemoryRegionSection section
;
4272 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4274 if (!memory_region_is_ram(section
.mr
) || section
.readonly
) {
4275 if (memory_region_is_ram(section
.mr
)) {
4276 io_index
= io_mem_rom
.ram_addr
;
4278 io_index
= memory_region_get_ram_addr(section
.mr
);
4280 addr
= (addr
& ~TARGET_PAGE_MASK
) + section
.offset_within_region
;
4281 io_mem_write(io_index
, addr
, val
, 4);
4283 unsigned long addr1
= (memory_region_get_ram_addr(section
.mr
)
4285 + section
.offset_within_region
4286 + (addr
& ~TARGET_PAGE_MASK
);
4287 ptr
= qemu_get_ram_ptr(addr1
);
4290 if (unlikely(in_migration
)) {
4291 if (!cpu_physical_memory_is_dirty(addr1
)) {
4292 /* invalidate code */
4293 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4295 cpu_physical_memory_set_dirty_flags(
4296 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4302 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4306 MemoryRegionSection section
;
4308 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4310 if (!memory_region_is_ram(section
.mr
) || section
.readonly
) {
4311 if (memory_region_is_ram(section
.mr
)) {
4312 io_index
= io_mem_rom
.ram_addr
;
4314 io_index
= memory_region_get_ram_addr(section
.mr
)
4315 & (IO_MEM_NB_ENTRIES
- 1);
4317 addr
= (addr
& ~TARGET_PAGE_MASK
) + section
.offset_within_region
;
4318 #ifdef TARGET_WORDS_BIGENDIAN
4319 io_mem_write(io_index
, addr
, val
>> 32, 4);
4320 io_mem_write(io_index
, addr
+ 4, (uint32_t)val
, 4);
4322 io_mem_write(io_index
, addr
, (uint32_t)val
, 4);
4323 io_mem_write(io_index
, addr
+ 4, val
>> 32, 4);
4326 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
.mr
)
4328 + section
.offset_within_region
)
4329 + (addr
& ~TARGET_PAGE_MASK
);
4334 /* warning: addr must be aligned */
4335 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4336 enum device_endian endian
)
4340 MemoryRegionSection section
;
4342 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4344 if (!memory_region_is_ram(section
.mr
) || section
.readonly
) {
4345 if (memory_region_is_ram(section
.mr
)) {
4346 io_index
= io_mem_rom
.ram_addr
;
4348 io_index
= memory_region_get_ram_addr(section
.mr
)
4349 & (IO_MEM_NB_ENTRIES
- 1);
4351 addr
= (addr
& ~TARGET_PAGE_MASK
) + section
.offset_within_region
;
4352 #if defined(TARGET_WORDS_BIGENDIAN)
4353 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4357 if (endian
== DEVICE_BIG_ENDIAN
) {
4361 io_mem_write(io_index
, addr
, val
, 4);
4363 unsigned long addr1
;
4364 addr1
= (memory_region_get_ram_addr(section
.mr
) & TARGET_PAGE_MASK
)
4365 + section
.offset_within_region
4366 + (addr
& ~TARGET_PAGE_MASK
);
4368 ptr
= qemu_get_ram_ptr(addr1
);
4370 case DEVICE_LITTLE_ENDIAN
:
4373 case DEVICE_BIG_ENDIAN
:
4380 if (!cpu_physical_memory_is_dirty(addr1
)) {
4381 /* invalidate code */
4382 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4384 cpu_physical_memory_set_dirty_flags(addr1
,
4385 (0xff & ~CODE_DIRTY_FLAG
));
4390 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4392 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4395 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4397 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4400 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4402 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4406 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4409 cpu_physical_memory_write(addr
, &v
, 1);
4412 /* warning: addr must be aligned */
4413 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4414 enum device_endian endian
)
4418 MemoryRegionSection section
;
4420 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4422 if (!memory_region_is_ram(section
.mr
) || section
.readonly
) {
4423 if (memory_region_is_ram(section
.mr
)) {
4424 io_index
= io_mem_rom
.ram_addr
;
4426 io_index
= memory_region_get_ram_addr(section
.mr
)
4427 & (IO_MEM_NB_ENTRIES
- 1);
4429 addr
= (addr
& ~TARGET_PAGE_MASK
) + section
.offset_within_region
;
4430 #if defined(TARGET_WORDS_BIGENDIAN)
4431 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4435 if (endian
== DEVICE_BIG_ENDIAN
) {
4439 io_mem_write(io_index
, addr
, val
, 2);
4441 unsigned long addr1
;
4442 addr1
= (memory_region_get_ram_addr(section
.mr
) & TARGET_PAGE_MASK
)
4443 + section
.offset_within_region
+ (addr
& ~TARGET_PAGE_MASK
);
4445 ptr
= qemu_get_ram_ptr(addr1
);
4447 case DEVICE_LITTLE_ENDIAN
:
4450 case DEVICE_BIG_ENDIAN
:
4457 if (!cpu_physical_memory_is_dirty(addr1
)) {
4458 /* invalidate code */
4459 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4461 cpu_physical_memory_set_dirty_flags(addr1
,
4462 (0xff & ~CODE_DIRTY_FLAG
));
4467 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4469 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4472 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4474 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4477 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4479 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4483 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4486 cpu_physical_memory_write(addr
, &val
, 8);
4489 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4491 val
= cpu_to_le64(val
);
4492 cpu_physical_memory_write(addr
, &val
, 8);
4495 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4497 val
= cpu_to_be64(val
);
4498 cpu_physical_memory_write(addr
, &val
, 8);
4501 /* virtual memory access for debug (includes writing to ROM) */
4502 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4503 uint8_t *buf
, int len
, int is_write
)
4506 target_phys_addr_t phys_addr
;
4510 page
= addr
& TARGET_PAGE_MASK
;
4511 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4512 /* if no physical page mapped, return an error */
4513 if (phys_addr
== -1)
4515 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4518 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4520 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4522 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4531 /* in deterministic execution mode, instructions doing device I/Os
4532 must be at the end of the TB */
4533 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4535 TranslationBlock
*tb
;
4537 target_ulong pc
, cs_base
;
4540 tb
= tb_find_pc((unsigned long)retaddr
);
4542 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4545 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4546 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4547 /* Calculate how many instructions had been executed before the fault
4549 n
= n
- env
->icount_decr
.u16
.low
;
4550 /* Generate a new TB ending on the I/O insn. */
4552 /* On MIPS and SH, delay slot instructions can only be restarted if
4553 they were already the first instruction in the TB. If this is not
4554 the first instruction in a TB then re-execute the preceding
4556 #if defined(TARGET_MIPS)
4557 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4558 env
->active_tc
.PC
-= 4;
4559 env
->icount_decr
.u16
.low
++;
4560 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4562 #elif defined(TARGET_SH4)
4563 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4566 env
->icount_decr
.u16
.low
++;
4567 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4570 /* This should never happen. */
4571 if (n
> CF_COUNT_MASK
)
4572 cpu_abort(env
, "TB too big during recompile");
4574 cflags
= n
| CF_LAST_IO
;
4576 cs_base
= tb
->cs_base
;
4578 tb_phys_invalidate(tb
, -1);
4579 /* FIXME: In theory this could raise an exception. In practice
4580 we have already translated the block once so it's probably ok. */
4581 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4582 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4583 the first in the TB) then we end up generating a whole new TB and
4584 repeating the fault, which is horribly inefficient.
4585 Better would be to execute just this insn uncached, or generate a
4587 cpu_resume_from_signal(env
, NULL
);
4590 #if !defined(CONFIG_USER_ONLY)
4592 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4594 int i
, target_code_size
, max_target_code_size
;
4595 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4596 TranslationBlock
*tb
;
4598 target_code_size
= 0;
4599 max_target_code_size
= 0;
4601 direct_jmp_count
= 0;
4602 direct_jmp2_count
= 0;
4603 for(i
= 0; i
< nb_tbs
; i
++) {
4605 target_code_size
+= tb
->size
;
4606 if (tb
->size
> max_target_code_size
)
4607 max_target_code_size
= tb
->size
;
4608 if (tb
->page_addr
[1] != -1)
4610 if (tb
->tb_next_offset
[0] != 0xffff) {
4612 if (tb
->tb_next_offset
[1] != 0xffff) {
4613 direct_jmp2_count
++;
4617 /* XXX: avoid using doubles ? */
4618 cpu_fprintf(f
, "Translation buffer state:\n");
4619 cpu_fprintf(f
, "gen code size %td/%ld\n",
4620 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4621 cpu_fprintf(f
, "TB count %d/%d\n",
4622 nb_tbs
, code_gen_max_blocks
);
4623 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4624 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4625 max_target_code_size
);
4626 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4627 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4628 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4629 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4631 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4632 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4634 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4636 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4637 cpu_fprintf(f
, "\nStatistics:\n");
4638 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4639 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4640 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4641 tcg_dump_info(f
, cpu_fprintf
);
4644 /* NOTE: this function can trigger an exception */
4645 /* NOTE2: the returned address is not exactly the physical address: it
4646 is the offset relative to phys_ram_base */
4647 tb_page_addr_t
get_page_addr_code(CPUState
*env1
, target_ulong addr
)
4649 int mmu_idx
, page_index
, pd
;
4652 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
4653 mmu_idx
= cpu_mmu_index(env1
);
4654 if (unlikely(env1
->tlb_table
[mmu_idx
][page_index
].addr_code
!=
4655 (addr
& TARGET_PAGE_MASK
))) {
4658 pd
= env1
->tlb_table
[mmu_idx
][page_index
].addr_code
& ~TARGET_PAGE_MASK
;
4659 if (pd
!= io_mem_ram
.ram_addr
&& pd
!= io_mem_rom
.ram_addr
4660 && !io_mem_region
[pd
]->rom_device
) {
4661 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4662 cpu_unassigned_access(env1
, addr
, 0, 1, 0, 4);
4664 cpu_abort(env1
, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx
"\n", addr
);
4667 p
= (void *)((uintptr_t)addr
+ env1
->tlb_table
[mmu_idx
][page_index
].addend
);
4668 return qemu_ram_addr_from_host_nofail(p
);
4672 * A helper function for the _utterly broken_ virtio device model to find out if
4673 * it's running on a big endian machine. Don't do this at home kids!
4675 bool virtio_is_big_endian(void);
4676 bool virtio_is_big_endian(void)
4678 #if defined(TARGET_WORDS_BIGENDIAN)
4685 #define MMUSUFFIX _cmmu
4687 #define GETPC() NULL
4688 #define env cpu_single_env
4689 #define SOFTMMU_CODE_ACCESS
4692 #include "softmmu_template.h"
4695 #include "softmmu_template.h"
4698 #include "softmmu_template.h"
4701 #include "softmmu_template.h"