2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
62 #define WANT_EXEC_OBSOLETE
63 #include "exec-obsolete.h"
65 //#define DEBUG_TB_INVALIDATE
67 //#define DEBUG_UNASSIGNED
69 /* make various TB consistency checks */
70 //#define DEBUG_TB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32) && !defined(_WIN64)
97 #define code_gen_section \
98 __attribute__((aligned (16)))
100 #define code_gen_section \
101 __attribute__((aligned (32)))
104 uint8_t code_gen_prologue
[1024] code_gen_section
;
105 static uint8_t *code_gen_buffer
;
106 static unsigned long code_gen_buffer_size
;
107 /* threshold to flush the translated code buffer */
108 static unsigned long code_gen_buffer_max_size
;
109 static uint8_t *code_gen_ptr
;
111 #if !defined(CONFIG_USER_ONLY)
113 static int in_migration
;
115 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
117 static MemoryRegion
*system_memory
;
118 static MemoryRegion
*system_io
;
120 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
121 static MemoryRegion io_mem_subpage_ram
;
125 CPUArchState
*first_cpu
;
126 /* current CPU in the current thread. It is only valid inside
128 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
129 /* 0 = Do not count executed instructions.
130 1 = Precise instruction counting.
131 2 = Adaptive rate instruction counting. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 /* In system mode we want L1_MAP to be based on ram offsets,
147 while in user mode we want it to be based on virtual addresses. */
148 #if !defined(CONFIG_USER_ONLY)
149 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
155 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
158 /* Size of the L2 (and L3, etc) page tables. */
160 #define L2_SIZE (1 << L2_BITS)
162 #define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165 /* The bits remaining after N lower levels of page tables. */
166 #define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169 #if V_L1_BITS_REM < 4
170 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172 #define V_L1_BITS V_L1_BITS_REM
175 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179 uintptr_t qemu_real_host_page_size
;
180 uintptr_t qemu_host_page_size
;
181 uintptr_t qemu_host_page_mask
;
183 /* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185 static void *l1_map
[V_L1_SIZE
];
187 #if !defined(CONFIG_USER_ONLY)
188 typedef struct PhysPageEntry PhysPageEntry
;
190 static MemoryRegionSection
*phys_sections
;
191 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
192 static uint16_t phys_section_unassigned
;
193 static uint16_t phys_section_notdirty
;
194 static uint16_t phys_section_rom
;
195 static uint16_t phys_section_watch
;
197 struct PhysPageEntry
{
198 uint16_t is_leaf
: 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
203 /* Simple allocator for PhysPageEntry nodes */
204 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
205 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
207 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
209 /* This is a multi-level map on the physical address space.
210 The bottom level has pointers to MemoryRegionSections. */
211 static PhysPageEntry phys_map
= { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
213 static void io_mem_init(void);
214 static void memory_map_init(void);
216 static MemoryRegion io_mem_watch
;
221 static const char *logfilename
= "qemu.log";
223 static const char *logfilename
= "/tmp/qemu.log";
227 static int log_append
= 0;
230 static int tb_flush_count
;
231 static int tb_phys_invalidate_count
;
234 static void map_exec(void *addr
, long size
)
237 VirtualProtect(addr
, size
,
238 PAGE_EXECUTE_READWRITE
, &old_protect
);
242 static void map_exec(void *addr
, long size
)
244 unsigned long start
, end
, page_size
;
246 page_size
= getpagesize();
247 start
= (unsigned long)addr
;
248 start
&= ~(page_size
- 1);
250 end
= (unsigned long)addr
+ size
;
251 end
+= page_size
- 1;
252 end
&= ~(page_size
- 1);
254 mprotect((void *)start
, end
- start
,
255 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
259 static void page_init(void)
261 /* NOTE: we can always suppose that qemu_host_page_size >=
265 SYSTEM_INFO system_info
;
267 GetSystemInfo(&system_info
);
268 qemu_real_host_page_size
= system_info
.dwPageSize
;
271 qemu_real_host_page_size
= getpagesize();
273 if (qemu_host_page_size
== 0)
274 qemu_host_page_size
= qemu_real_host_page_size
;
275 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
276 qemu_host_page_size
= TARGET_PAGE_SIZE
;
277 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
279 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
281 #ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry
*freep
;
285 freep
= kinfo_getvmmap(getpid(), &cnt
);
288 for (i
= 0; i
< cnt
; i
++) {
289 unsigned long startaddr
, endaddr
;
291 startaddr
= freep
[i
].kve_start
;
292 endaddr
= freep
[i
].kve_end
;
293 if (h2g_valid(startaddr
)) {
294 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
296 if (h2g_valid(endaddr
)) {
297 endaddr
= h2g(endaddr
);
298 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
300 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
302 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
313 last_brk
= (unsigned long)sbrk(0);
315 f
= fopen("/compat/linux/proc/self/maps", "r");
320 unsigned long startaddr
, endaddr
;
323 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
325 if (n
== 2 && h2g_valid(startaddr
)) {
326 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
328 if (h2g_valid(endaddr
)) {
329 endaddr
= h2g(endaddr
);
333 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
345 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
351 #if defined(CONFIG_USER_ONLY)
352 /* We can't use g_malloc because it may recurse into a locked mutex. */
353 # define ALLOC(P, SIZE) \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
359 # define ALLOC(P, SIZE) \
360 do { P = g_malloc0(SIZE); } while (0)
363 /* Level 1. Always allocated. */
364 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
367 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
374 ALLOC(p
, sizeof(void *) * L2_SIZE
);
378 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
386 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
392 return pd
+ (index
& (L2_SIZE
- 1));
395 static inline PageDesc
*page_find(tb_page_addr_t index
)
397 return page_find_alloc(index
, 0);
400 #if !defined(CONFIG_USER_ONLY)
402 static void phys_map_node_reserve(unsigned nodes
)
404 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
405 typedef PhysPageEntry Node
[L2_SIZE
];
406 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
407 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
408 phys_map_nodes_nb
+ nodes
);
409 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
410 phys_map_nodes_nb_alloc
);
414 static uint16_t phys_map_node_alloc(void)
419 ret
= phys_map_nodes_nb
++;
420 assert(ret
!= PHYS_MAP_NODE_NIL
);
421 assert(ret
!= phys_map_nodes_nb_alloc
);
422 for (i
= 0; i
< L2_SIZE
; ++i
) {
423 phys_map_nodes
[ret
][i
].is_leaf
= 0;
424 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
429 static void phys_map_nodes_reset(void)
431 phys_map_nodes_nb
= 0;
435 static void phys_page_set_level(PhysPageEntry
*lp
, target_phys_addr_t
*index
,
436 target_phys_addr_t
*nb
, uint16_t leaf
,
441 target_phys_addr_t step
= (target_phys_addr_t
)1 << (level
* L2_BITS
);
443 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
444 lp
->ptr
= phys_map_node_alloc();
445 p
= phys_map_nodes
[lp
->ptr
];
447 for (i
= 0; i
< L2_SIZE
; i
++) {
449 p
[i
].ptr
= phys_section_unassigned
;
453 p
= phys_map_nodes
[lp
->ptr
];
455 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
457 while (*nb
&& lp
< &p
[L2_SIZE
]) {
458 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
464 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
470 static void phys_page_set(target_phys_addr_t index
, target_phys_addr_t nb
,
473 /* Wildly overreserve - it doesn't matter much. */
474 phys_map_node_reserve(3 * P_L2_LEVELS
);
476 phys_page_set_level(&phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
479 MemoryRegionSection
*phys_page_find(target_phys_addr_t index
)
481 PhysPageEntry lp
= phys_map
;
484 uint16_t s_index
= phys_section_unassigned
;
486 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
487 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
490 p
= phys_map_nodes
[lp
.ptr
];
491 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
496 return &phys_sections
[s_index
];
499 bool memory_region_is_unassigned(MemoryRegion
*mr
)
501 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
502 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
503 && mr
!= &io_mem_watch
;
506 target_phys_addr_t
memory_region_section_addr(MemoryRegionSection
*section
,
507 target_phys_addr_t addr
)
509 addr
-= section
->offset_within_address_space
;
510 addr
+= section
->offset_within_region
;
514 #define mmap_lock() do { } while(0)
515 #define mmap_unlock() do { } while(0)
518 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
520 #if defined(CONFIG_USER_ONLY)
521 /* Currently it is not recommended to allocate big chunks of data in
522 user mode. It will change when a dedicated libc will be used */
523 #define USE_STATIC_CODE_GEN_BUFFER
526 #ifdef USE_STATIC_CODE_GEN_BUFFER
527 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
528 __attribute__((aligned (CODE_GEN_ALIGN
)));
531 static void code_gen_alloc(unsigned long tb_size
)
533 #ifdef USE_STATIC_CODE_GEN_BUFFER
534 code_gen_buffer
= static_code_gen_buffer
;
535 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
536 map_exec(code_gen_buffer
, code_gen_buffer_size
);
538 code_gen_buffer_size
= tb_size
;
539 if (code_gen_buffer_size
== 0) {
540 #if defined(CONFIG_USER_ONLY)
541 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
543 /* XXX: needs adjustments */
544 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
547 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
548 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
549 /* The code gen buffer location may have constraints depending on
550 the host cpu and OS */
551 #if defined(__linux__)
556 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
557 #if defined(__x86_64__)
559 /* Cannot map more than that */
560 if (code_gen_buffer_size
> (800 * 1024 * 1024))
561 code_gen_buffer_size
= (800 * 1024 * 1024);
562 #elif defined(__sparc_v9__)
563 // Map the buffer below 2G, so we can use direct calls and branches
565 start
= (void *) 0x60000000UL
;
566 if (code_gen_buffer_size
> (512 * 1024 * 1024))
567 code_gen_buffer_size
= (512 * 1024 * 1024);
568 #elif defined(__arm__)
569 /* Keep the buffer no bigger than 16MB to branch between blocks */
570 if (code_gen_buffer_size
> 16 * 1024 * 1024)
571 code_gen_buffer_size
= 16 * 1024 * 1024;
572 #elif defined(__s390x__)
573 /* Map the buffer so that we can use direct calls and branches. */
574 /* We have a +- 4GB range on the branches; leave some slop. */
575 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
576 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
578 start
= (void *)0x90000000UL
;
580 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
581 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
583 if (code_gen_buffer
== MAP_FAILED
) {
584 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
588 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
589 || defined(__DragonFly__) || defined(__OpenBSD__) \
590 || defined(__NetBSD__)
594 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
595 #if defined(__x86_64__)
596 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
597 * 0x40000000 is free */
599 addr
= (void *)0x40000000;
600 /* Cannot map more than that */
601 if (code_gen_buffer_size
> (800 * 1024 * 1024))
602 code_gen_buffer_size
= (800 * 1024 * 1024);
603 #elif defined(__sparc_v9__)
604 // Map the buffer below 2G, so we can use direct calls and branches
606 addr
= (void *) 0x60000000UL
;
607 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
608 code_gen_buffer_size
= (512 * 1024 * 1024);
611 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
612 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
614 if (code_gen_buffer
== MAP_FAILED
) {
615 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
620 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
621 map_exec(code_gen_buffer
, code_gen_buffer_size
);
623 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
624 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
625 code_gen_buffer_max_size
= code_gen_buffer_size
-
626 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
627 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
628 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
631 /* Must be called before using the QEMU cpus. 'tb_size' is the size
632 (in bytes) allocated to the translation buffer. Zero means default
634 void tcg_exec_init(unsigned long tb_size
)
637 code_gen_alloc(tb_size
);
638 code_gen_ptr
= code_gen_buffer
;
639 tcg_register_jit(code_gen_buffer
, code_gen_buffer_size
);
641 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
642 /* There's no guest base to take into account, so go ahead and
643 initialize the prologue now. */
644 tcg_prologue_init(&tcg_ctx
);
648 bool tcg_enabled(void)
650 return code_gen_buffer
!= NULL
;
653 void cpu_exec_init_all(void)
655 #if !defined(CONFIG_USER_ONLY)
661 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
663 static int cpu_common_post_load(void *opaque
, int version_id
)
665 CPUArchState
*env
= opaque
;
667 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
668 version_id is increased. */
669 env
->interrupt_request
&= ~0x01;
675 static const VMStateDescription vmstate_cpu_common
= {
676 .name
= "cpu_common",
678 .minimum_version_id
= 1,
679 .minimum_version_id_old
= 1,
680 .post_load
= cpu_common_post_load
,
681 .fields
= (VMStateField
[]) {
682 VMSTATE_UINT32(halted
, CPUArchState
),
683 VMSTATE_UINT32(interrupt_request
, CPUArchState
),
684 VMSTATE_END_OF_LIST()
689 CPUArchState
*qemu_get_cpu(int cpu
)
691 CPUArchState
*env
= first_cpu
;
694 if (env
->cpu_index
== cpu
)
702 void cpu_exec_init(CPUArchState
*env
)
707 #if defined(CONFIG_USER_ONLY)
710 env
->next_cpu
= NULL
;
713 while (*penv
!= NULL
) {
714 penv
= &(*penv
)->next_cpu
;
717 env
->cpu_index
= cpu_index
;
719 QTAILQ_INIT(&env
->breakpoints
);
720 QTAILQ_INIT(&env
->watchpoints
);
721 #ifndef CONFIG_USER_ONLY
722 env
->thread_id
= qemu_get_thread_id();
725 #if defined(CONFIG_USER_ONLY)
728 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
729 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
730 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
731 cpu_save
, cpu_load
, env
);
735 /* Allocate a new translation block. Flush the translation buffer if
736 too many translation blocks or too much generated code. */
737 static TranslationBlock
*tb_alloc(target_ulong pc
)
739 TranslationBlock
*tb
;
741 if (nb_tbs
>= code_gen_max_blocks
||
742 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
750 void tb_free(TranslationBlock
*tb
)
752 /* In practice this is mostly used for single use temporary TB
753 Ignore the hard cases and just back up if this TB happens to
754 be the last one generated. */
755 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
756 code_gen_ptr
= tb
->tc_ptr
;
761 static inline void invalidate_page_bitmap(PageDesc
*p
)
763 if (p
->code_bitmap
) {
764 g_free(p
->code_bitmap
);
765 p
->code_bitmap
= NULL
;
767 p
->code_write_count
= 0;
770 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
772 static void page_flush_tb_1 (int level
, void **lp
)
781 for (i
= 0; i
< L2_SIZE
; ++i
) {
782 pd
[i
].first_tb
= NULL
;
783 invalidate_page_bitmap(pd
+ i
);
787 for (i
= 0; i
< L2_SIZE
; ++i
) {
788 page_flush_tb_1 (level
- 1, pp
+ i
);
793 static void page_flush_tb(void)
796 for (i
= 0; i
< V_L1_SIZE
; i
++) {
797 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
801 /* flush all the translation blocks */
802 /* XXX: tb_flush is currently not thread safe */
803 void tb_flush(CPUArchState
*env1
)
806 #if defined(DEBUG_FLUSH)
807 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
808 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
810 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
812 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
813 cpu_abort(env1
, "Internal error: code buffer overflow\n");
817 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
818 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
821 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
824 code_gen_ptr
= code_gen_buffer
;
825 /* XXX: flush processor icache at this point if cache flush is
830 #ifdef DEBUG_TB_CHECK
832 static void tb_invalidate_check(target_ulong address
)
834 TranslationBlock
*tb
;
836 address
&= TARGET_PAGE_MASK
;
837 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
838 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
839 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
840 address
>= tb
->pc
+ tb
->size
)) {
841 printf("ERROR invalidate: address=" TARGET_FMT_lx
842 " PC=%08lx size=%04x\n",
843 address
, (long)tb
->pc
, tb
->size
);
849 /* verify that all the pages have correct rights for code */
850 static void tb_page_check(void)
852 TranslationBlock
*tb
;
853 int i
, flags1
, flags2
;
855 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
856 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
857 flags1
= page_get_flags(tb
->pc
);
858 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
859 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
860 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
861 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
869 /* invalidate one TB */
870 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
873 TranslationBlock
*tb1
;
877 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
880 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
884 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
886 TranslationBlock
*tb1
;
891 n1
= (uintptr_t)tb1
& 3;
892 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
894 *ptb
= tb1
->page_next
[n1
];
897 ptb
= &tb1
->page_next
[n1
];
901 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
903 TranslationBlock
*tb1
, **ptb
;
906 ptb
= &tb
->jmp_next
[n
];
909 /* find tb(n) in circular list */
912 n1
= (uintptr_t)tb1
& 3;
913 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
914 if (n1
== n
&& tb1
== tb
)
917 ptb
= &tb1
->jmp_first
;
919 ptb
= &tb1
->jmp_next
[n1
];
922 /* now we can suppress tb(n) from the list */
923 *ptb
= tb
->jmp_next
[n
];
925 tb
->jmp_next
[n
] = NULL
;
929 /* reset the jump entry 'n' of a TB so that it is not chained to
931 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
933 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
936 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
941 tb_page_addr_t phys_pc
;
942 TranslationBlock
*tb1
, *tb2
;
944 /* remove the TB from the hash list */
945 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
946 h
= tb_phys_hash_func(phys_pc
);
947 tb_remove(&tb_phys_hash
[h
], tb
,
948 offsetof(TranslationBlock
, phys_hash_next
));
950 /* remove the TB from the page list */
951 if (tb
->page_addr
[0] != page_addr
) {
952 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
953 tb_page_remove(&p
->first_tb
, tb
);
954 invalidate_page_bitmap(p
);
956 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
957 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
958 tb_page_remove(&p
->first_tb
, tb
);
959 invalidate_page_bitmap(p
);
962 tb_invalidated_flag
= 1;
964 /* remove the TB from the hash list */
965 h
= tb_jmp_cache_hash_func(tb
->pc
);
966 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
967 if (env
->tb_jmp_cache
[h
] == tb
)
968 env
->tb_jmp_cache
[h
] = NULL
;
971 /* suppress this TB from the two jump lists */
972 tb_jmp_remove(tb
, 0);
973 tb_jmp_remove(tb
, 1);
975 /* suppress any remaining jumps to this TB */
978 n1
= (uintptr_t)tb1
& 3;
981 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
982 tb2
= tb1
->jmp_next
[n1
];
983 tb_reset_jump(tb1
, n1
);
984 tb1
->jmp_next
[n1
] = NULL
;
987 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
989 tb_phys_invalidate_count
++;
992 static inline void set_bits(uint8_t *tab
, int start
, int len
)
998 mask
= 0xff << (start
& 7);
999 if ((start
& ~7) == (end
& ~7)) {
1001 mask
&= ~(0xff << (end
& 7));
1006 start
= (start
+ 8) & ~7;
1008 while (start
< end1
) {
1013 mask
= ~(0xff << (end
& 7));
1019 static void build_page_bitmap(PageDesc
*p
)
1021 int n
, tb_start
, tb_end
;
1022 TranslationBlock
*tb
;
1024 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
1027 while (tb
!= NULL
) {
1028 n
= (uintptr_t)tb
& 3;
1029 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1030 /* NOTE: this is subtle as a TB may span two physical pages */
1032 /* NOTE: tb_end may be after the end of the page, but
1033 it is not a problem */
1034 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1035 tb_end
= tb_start
+ tb
->size
;
1036 if (tb_end
> TARGET_PAGE_SIZE
)
1037 tb_end
= TARGET_PAGE_SIZE
;
1040 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1042 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1043 tb
= tb
->page_next
[n
];
1047 TranslationBlock
*tb_gen_code(CPUArchState
*env
,
1048 target_ulong pc
, target_ulong cs_base
,
1049 int flags
, int cflags
)
1051 TranslationBlock
*tb
;
1053 tb_page_addr_t phys_pc
, phys_page2
;
1054 target_ulong virt_page2
;
1057 phys_pc
= get_page_addr_code(env
, pc
);
1060 /* flush must be done */
1062 /* cannot fail at this point */
1064 /* Don't forget to invalidate previous TB info. */
1065 tb_invalidated_flag
= 1;
1067 tc_ptr
= code_gen_ptr
;
1068 tb
->tc_ptr
= tc_ptr
;
1069 tb
->cs_base
= cs_base
;
1071 tb
->cflags
= cflags
;
1072 cpu_gen_code(env
, tb
, &code_gen_size
);
1073 code_gen_ptr
= (void *)(((uintptr_t)code_gen_ptr
+ code_gen_size
+
1074 CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1076 /* check next page if needed */
1077 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1079 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1080 phys_page2
= get_page_addr_code(env
, virt_page2
);
1082 tb_link_page(tb
, phys_pc
, phys_page2
);
1086 /* invalidate all TBs which intersect with the target physical page
1087 starting in range [start;end[. NOTE: start and end must refer to
1088 the same physical page. 'is_cpu_write_access' should be true if called
1089 from a real cpu write access: the virtual CPU will exit the current
1090 TB if code is modified inside this TB. */
1091 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1092 int is_cpu_write_access
)
1094 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1095 CPUArchState
*env
= cpu_single_env
;
1096 tb_page_addr_t tb_start
, tb_end
;
1099 #ifdef TARGET_HAS_PRECISE_SMC
1100 int current_tb_not_found
= is_cpu_write_access
;
1101 TranslationBlock
*current_tb
= NULL
;
1102 int current_tb_modified
= 0;
1103 target_ulong current_pc
= 0;
1104 target_ulong current_cs_base
= 0;
1105 int current_flags
= 0;
1106 #endif /* TARGET_HAS_PRECISE_SMC */
1108 p
= page_find(start
>> TARGET_PAGE_BITS
);
1111 if (!p
->code_bitmap
&&
1112 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1113 is_cpu_write_access
) {
1114 /* build code bitmap */
1115 build_page_bitmap(p
);
1118 /* we remove all the TBs in the range [start, end[ */
1119 /* XXX: see if in some cases it could be faster to invalidate all the code */
1121 while (tb
!= NULL
) {
1122 n
= (uintptr_t)tb
& 3;
1123 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1124 tb_next
= tb
->page_next
[n
];
1125 /* NOTE: this is subtle as a TB may span two physical pages */
1127 /* NOTE: tb_end may be after the end of the page, but
1128 it is not a problem */
1129 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1130 tb_end
= tb_start
+ tb
->size
;
1132 tb_start
= tb
->page_addr
[1];
1133 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1135 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1136 #ifdef TARGET_HAS_PRECISE_SMC
1137 if (current_tb_not_found
) {
1138 current_tb_not_found
= 0;
1140 if (env
->mem_io_pc
) {
1141 /* now we have a real cpu fault */
1142 current_tb
= tb_find_pc(env
->mem_io_pc
);
1145 if (current_tb
== tb
&&
1146 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1147 /* If we are modifying the current TB, we must stop
1148 its execution. We could be more precise by checking
1149 that the modification is after the current PC, but it
1150 would require a specialized function to partially
1151 restore the CPU state */
1153 current_tb_modified
= 1;
1154 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1155 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1158 #endif /* TARGET_HAS_PRECISE_SMC */
1159 /* we need to do that to handle the case where a signal
1160 occurs while doing tb_phys_invalidate() */
1163 saved_tb
= env
->current_tb
;
1164 env
->current_tb
= NULL
;
1166 tb_phys_invalidate(tb
, -1);
1168 env
->current_tb
= saved_tb
;
1169 if (env
->interrupt_request
&& env
->current_tb
)
1170 cpu_interrupt(env
, env
->interrupt_request
);
1175 #if !defined(CONFIG_USER_ONLY)
1176 /* if no code remaining, no need to continue to use slow writes */
1178 invalidate_page_bitmap(p
);
1179 if (is_cpu_write_access
) {
1180 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1184 #ifdef TARGET_HAS_PRECISE_SMC
1185 if (current_tb_modified
) {
1186 /* we generate a block containing just the instruction
1187 modifying the memory. It will ensure that it cannot modify
1189 env
->current_tb
= NULL
;
1190 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1191 cpu_resume_from_signal(env
, NULL
);
1196 /* len must be <= 8 and start must be a multiple of len */
1197 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1203 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1204 cpu_single_env
->mem_io_vaddr
, len
,
1205 cpu_single_env
->eip
,
1206 cpu_single_env
->eip
+
1207 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1210 p
= page_find(start
>> TARGET_PAGE_BITS
);
1213 if (p
->code_bitmap
) {
1214 offset
= start
& ~TARGET_PAGE_MASK
;
1215 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1216 if (b
& ((1 << len
) - 1))
1220 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1224 #if !defined(CONFIG_SOFTMMU)
1225 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1226 uintptr_t pc
, void *puc
)
1228 TranslationBlock
*tb
;
1231 #ifdef TARGET_HAS_PRECISE_SMC
1232 TranslationBlock
*current_tb
= NULL
;
1233 CPUArchState
*env
= cpu_single_env
;
1234 int current_tb_modified
= 0;
1235 target_ulong current_pc
= 0;
1236 target_ulong current_cs_base
= 0;
1237 int current_flags
= 0;
1240 addr
&= TARGET_PAGE_MASK
;
1241 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1245 #ifdef TARGET_HAS_PRECISE_SMC
1246 if (tb
&& pc
!= 0) {
1247 current_tb
= tb_find_pc(pc
);
1250 while (tb
!= NULL
) {
1251 n
= (uintptr_t)tb
& 3;
1252 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1253 #ifdef TARGET_HAS_PRECISE_SMC
1254 if (current_tb
== tb
&&
1255 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1256 /* If we are modifying the current TB, we must stop
1257 its execution. We could be more precise by checking
1258 that the modification is after the current PC, but it
1259 would require a specialized function to partially
1260 restore the CPU state */
1262 current_tb_modified
= 1;
1263 cpu_restore_state(current_tb
, env
, pc
);
1264 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1267 #endif /* TARGET_HAS_PRECISE_SMC */
1268 tb_phys_invalidate(tb
, addr
);
1269 tb
= tb
->page_next
[n
];
1272 #ifdef TARGET_HAS_PRECISE_SMC
1273 if (current_tb_modified
) {
1274 /* we generate a block containing just the instruction
1275 modifying the memory. It will ensure that it cannot modify
1277 env
->current_tb
= NULL
;
1278 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1279 cpu_resume_from_signal(env
, puc
);
1285 /* add the tb in the target page and protect it if necessary */
1286 static inline void tb_alloc_page(TranslationBlock
*tb
,
1287 unsigned int n
, tb_page_addr_t page_addr
)
1290 #ifndef CONFIG_USER_ONLY
1291 bool page_already_protected
;
1294 tb
->page_addr
[n
] = page_addr
;
1295 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1296 tb
->page_next
[n
] = p
->first_tb
;
1297 #ifndef CONFIG_USER_ONLY
1298 page_already_protected
= p
->first_tb
!= NULL
;
1300 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1301 invalidate_page_bitmap(p
);
1303 #if defined(TARGET_HAS_SMC) || 1
1305 #if defined(CONFIG_USER_ONLY)
1306 if (p
->flags
& PAGE_WRITE
) {
1311 /* force the host page as non writable (writes will have a
1312 page fault + mprotect overhead) */
1313 page_addr
&= qemu_host_page_mask
;
1315 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1316 addr
+= TARGET_PAGE_SIZE
) {
1318 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1322 p2
->flags
&= ~PAGE_WRITE
;
1324 mprotect(g2h(page_addr
), qemu_host_page_size
,
1325 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1326 #ifdef DEBUG_TB_INVALIDATE
1327 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1332 /* if some code is already present, then the pages are already
1333 protected. So we handle the case where only the first TB is
1334 allocated in a physical page */
1335 if (!page_already_protected
) {
1336 tlb_protect_code(page_addr
);
1340 #endif /* TARGET_HAS_SMC */
1343 /* add a new TB and link it to the physical page tables. phys_page2 is
1344 (-1) to indicate that only one page contains the TB. */
1345 void tb_link_page(TranslationBlock
*tb
,
1346 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1349 TranslationBlock
**ptb
;
1351 /* Grab the mmap lock to stop another thread invalidating this TB
1352 before we are done. */
1354 /* add in the physical hash table */
1355 h
= tb_phys_hash_func(phys_pc
);
1356 ptb
= &tb_phys_hash
[h
];
1357 tb
->phys_hash_next
= *ptb
;
1360 /* add in the page list */
1361 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1362 if (phys_page2
!= -1)
1363 tb_alloc_page(tb
, 1, phys_page2
);
1365 tb
->page_addr
[1] = -1;
1367 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1368 tb
->jmp_next
[0] = NULL
;
1369 tb
->jmp_next
[1] = NULL
;
1371 /* init original jump addresses */
1372 if (tb
->tb_next_offset
[0] != 0xffff)
1373 tb_reset_jump(tb
, 0);
1374 if (tb
->tb_next_offset
[1] != 0xffff)
1375 tb_reset_jump(tb
, 1);
1377 #ifdef DEBUG_TB_CHECK
1383 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1384 tb[1].tc_ptr. Return NULL if not found */
1385 TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1387 int m_min
, m_max
, m
;
1389 TranslationBlock
*tb
;
1393 if (tc_ptr
< (uintptr_t)code_gen_buffer
||
1394 tc_ptr
>= (uintptr_t)code_gen_ptr
) {
1397 /* binary search (cf Knuth) */
1400 while (m_min
<= m_max
) {
1401 m
= (m_min
+ m_max
) >> 1;
1403 v
= (uintptr_t)tb
->tc_ptr
;
1406 else if (tc_ptr
< v
) {
1415 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1417 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1419 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1422 tb1
= tb
->jmp_next
[n
];
1424 /* find head of list */
1426 n1
= (uintptr_t)tb1
& 3;
1427 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1430 tb1
= tb1
->jmp_next
[n1
];
1432 /* we are now sure now that tb jumps to tb1 */
1435 /* remove tb from the jmp_first list */
1436 ptb
= &tb_next
->jmp_first
;
1439 n1
= (uintptr_t)tb1
& 3;
1440 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1441 if (n1
== n
&& tb1
== tb
)
1443 ptb
= &tb1
->jmp_next
[n1
];
1445 *ptb
= tb
->jmp_next
[n
];
1446 tb
->jmp_next
[n
] = NULL
;
1448 /* suppress the jump to next tb in generated code */
1449 tb_reset_jump(tb
, n
);
1451 /* suppress jumps in the tb on which we could have jumped */
1452 tb_reset_jump_recursive(tb_next
);
1456 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1458 tb_reset_jump_recursive2(tb
, 0);
1459 tb_reset_jump_recursive2(tb
, 1);
1462 #if defined(TARGET_HAS_ICE)
1463 #if defined(CONFIG_USER_ONLY)
1464 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1466 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1469 void tb_invalidate_phys_addr(target_phys_addr_t addr
)
1471 ram_addr_t ram_addr
;
1472 MemoryRegionSection
*section
;
1474 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1475 if (!(memory_region_is_ram(section
->mr
)
1476 || (section
->mr
->rom_device
&& section
->mr
->readable
))) {
1479 ram_addr
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1480 + memory_region_section_addr(section
, addr
);
1481 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1484 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1486 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
));
1489 #endif /* TARGET_HAS_ICE */
1491 #if defined(CONFIG_USER_ONLY)
1492 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1497 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1498 int flags
, CPUWatchpoint
**watchpoint
)
1503 /* Add a watchpoint. */
1504 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1505 int flags
, CPUWatchpoint
**watchpoint
)
1507 target_ulong len_mask
= ~(len
- 1);
1510 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1511 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
1512 len
== 0 || len
> TARGET_PAGE_SIZE
) {
1513 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1514 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1517 wp
= g_malloc(sizeof(*wp
));
1520 wp
->len_mask
= len_mask
;
1523 /* keep all GDB-injected watchpoints in front */
1525 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1527 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1529 tlb_flush_page(env
, addr
);
1536 /* Remove a specific watchpoint. */
1537 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1540 target_ulong len_mask
= ~(len
- 1);
1543 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1544 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1545 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1546 cpu_watchpoint_remove_by_ref(env
, wp
);
1553 /* Remove a specific watchpoint by reference. */
1554 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
1556 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1558 tlb_flush_page(env
, watchpoint
->vaddr
);
1563 /* Remove all matching watchpoints. */
1564 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1566 CPUWatchpoint
*wp
, *next
;
1568 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1569 if (wp
->flags
& mask
)
1570 cpu_watchpoint_remove_by_ref(env
, wp
);
1575 /* Add a breakpoint. */
1576 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
1577 CPUBreakpoint
**breakpoint
)
1579 #if defined(TARGET_HAS_ICE)
1582 bp
= g_malloc(sizeof(*bp
));
1587 /* keep all GDB-injected breakpoints in front */
1589 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1591 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1593 breakpoint_invalidate(env
, pc
);
1603 /* Remove a specific breakpoint. */
1604 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
1606 #if defined(TARGET_HAS_ICE)
1609 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1610 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1611 cpu_breakpoint_remove_by_ref(env
, bp
);
1621 /* Remove a specific breakpoint by reference. */
1622 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
1624 #if defined(TARGET_HAS_ICE)
1625 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1627 breakpoint_invalidate(env
, breakpoint
->pc
);
1633 /* Remove all matching breakpoints. */
1634 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
1636 #if defined(TARGET_HAS_ICE)
1637 CPUBreakpoint
*bp
, *next
;
1639 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1640 if (bp
->flags
& mask
)
1641 cpu_breakpoint_remove_by_ref(env
, bp
);
1646 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1647 CPU loop after each instruction */
1648 void cpu_single_step(CPUArchState
*env
, int enabled
)
1650 #if defined(TARGET_HAS_ICE)
1651 if (env
->singlestep_enabled
!= enabled
) {
1652 env
->singlestep_enabled
= enabled
;
1654 kvm_update_guest_debug(env
, 0);
1656 /* must flush all the translated code to avoid inconsistencies */
1657 /* XXX: only flush what is necessary */
1664 /* enable or disable low levels log */
1665 void cpu_set_log(int log_flags
)
1667 loglevel
= log_flags
;
1668 if (loglevel
&& !logfile
) {
1669 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1671 perror(logfilename
);
1674 #if !defined(CONFIG_SOFTMMU)
1675 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1677 static char logfile_buf
[4096];
1678 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1680 #elif defined(_WIN32)
1681 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1682 setvbuf(logfile
, NULL
, _IONBF
, 0);
1684 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1688 if (!loglevel
&& logfile
) {
1694 void cpu_set_log_filename(const char *filename
)
1696 logfilename
= strdup(filename
);
1701 cpu_set_log(loglevel
);
1704 static void cpu_unlink_tb(CPUArchState
*env
)
1706 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1707 problem and hope the cpu will stop of its own accord. For userspace
1708 emulation this often isn't actually as bad as it sounds. Often
1709 signals are used primarily to interrupt blocking syscalls. */
1710 TranslationBlock
*tb
;
1711 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1713 spin_lock(&interrupt_lock
);
1714 tb
= env
->current_tb
;
1715 /* if the cpu is currently executing code, we must unlink it and
1716 all the potentially executing TB */
1718 env
->current_tb
= NULL
;
1719 tb_reset_jump_recursive(tb
);
1721 spin_unlock(&interrupt_lock
);
1724 #ifndef CONFIG_USER_ONLY
1725 /* mask must never be zero, except for A20 change call */
1726 static void tcg_handle_interrupt(CPUArchState
*env
, int mask
)
1730 old_mask
= env
->interrupt_request
;
1731 env
->interrupt_request
|= mask
;
1734 * If called from iothread context, wake the target cpu in
1737 if (!qemu_cpu_is_self(env
)) {
1743 env
->icount_decr
.u16
.high
= 0xffff;
1745 && (mask
& ~old_mask
) != 0) {
1746 cpu_abort(env
, "Raised interrupt while not in I/O function");
1753 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1755 #else /* CONFIG_USER_ONLY */
1757 void cpu_interrupt(CPUArchState
*env
, int mask
)
1759 env
->interrupt_request
|= mask
;
1762 #endif /* CONFIG_USER_ONLY */
1764 void cpu_reset_interrupt(CPUArchState
*env
, int mask
)
1766 env
->interrupt_request
&= ~mask
;
1769 void cpu_exit(CPUArchState
*env
)
1771 env
->exit_request
= 1;
1775 const CPULogItem cpu_log_items
[] = {
1776 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1777 "show generated host assembly code for each compiled TB" },
1778 { CPU_LOG_TB_IN_ASM
, "in_asm",
1779 "show target assembly code for each compiled TB" },
1780 { CPU_LOG_TB_OP
, "op",
1781 "show micro ops for each compiled TB" },
1782 { CPU_LOG_TB_OP_OPT
, "op_opt",
1785 "before eflags optimization and "
1787 "after liveness analysis" },
1788 { CPU_LOG_INT
, "int",
1789 "show interrupts/exceptions in short format" },
1790 { CPU_LOG_EXEC
, "exec",
1791 "show trace before each executed TB (lots of logs)" },
1792 { CPU_LOG_TB_CPU
, "cpu",
1793 "show CPU state before block translation" },
1795 { CPU_LOG_PCALL
, "pcall",
1796 "show protected mode far calls/returns/exceptions" },
1797 { CPU_LOG_RESET
, "cpu_reset",
1798 "show CPU state before CPU resets" },
1801 { CPU_LOG_IOPORT
, "ioport",
1802 "show all i/o ports accesses" },
1807 static int cmp1(const char *s1
, int n
, const char *s2
)
1809 if (strlen(s2
) != n
)
1811 return memcmp(s1
, s2
, n
) == 0;
1814 /* takes a comma separated list of log masks. Return 0 if error. */
1815 int cpu_str_to_log_mask(const char *str
)
1817 const CPULogItem
*item
;
1824 p1
= strchr(p
, ',');
1827 if(cmp1(p
,p1
-p
,"all")) {
1828 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1832 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1833 if (cmp1(p
, p1
- p
, item
->name
))
1847 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
1854 fprintf(stderr
, "qemu: fatal: ");
1855 vfprintf(stderr
, fmt
, ap
);
1856 fprintf(stderr
, "\n");
1858 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1860 cpu_dump_state(env
, stderr
, fprintf
, 0);
1862 if (qemu_log_enabled()) {
1863 qemu_log("qemu: fatal: ");
1864 qemu_log_vprintf(fmt
, ap2
);
1867 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1869 log_cpu_state(env
, 0);
1876 #if defined(CONFIG_USER_ONLY)
1878 struct sigaction act
;
1879 sigfillset(&act
.sa_mask
);
1880 act
.sa_handler
= SIG_DFL
;
1881 sigaction(SIGABRT
, &act
, NULL
);
1887 CPUArchState
*cpu_copy(CPUArchState
*env
)
1889 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
1890 CPUArchState
*next_cpu
= new_env
->next_cpu
;
1891 int cpu_index
= new_env
->cpu_index
;
1892 #if defined(TARGET_HAS_ICE)
1897 memcpy(new_env
, env
, sizeof(CPUArchState
));
1899 /* Preserve chaining and index. */
1900 new_env
->next_cpu
= next_cpu
;
1901 new_env
->cpu_index
= cpu_index
;
1903 /* Clone all break/watchpoints.
1904 Note: Once we support ptrace with hw-debug register access, make sure
1905 BP_CPU break/watchpoints are handled correctly on clone. */
1906 QTAILQ_INIT(&env
->breakpoints
);
1907 QTAILQ_INIT(&env
->watchpoints
);
1908 #if defined(TARGET_HAS_ICE)
1909 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1910 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1912 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1913 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1921 #if !defined(CONFIG_USER_ONLY)
1922 void tb_flush_jmp_cache(CPUArchState
*env
, target_ulong addr
)
1926 /* Discard jump cache entries for any tb which might potentially
1927 overlap the flushed page. */
1928 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1929 memset (&env
->tb_jmp_cache
[i
], 0,
1930 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1932 i
= tb_jmp_cache_hash_page(addr
);
1933 memset (&env
->tb_jmp_cache
[i
], 0,
1934 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1937 /* Note: start and end must be within the same ram block. */
1938 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1941 uintptr_t length
, start1
;
1943 start
&= TARGET_PAGE_MASK
;
1944 end
= TARGET_PAGE_ALIGN(end
);
1946 length
= end
- start
;
1949 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1951 /* we modify the TLB cache so that the dirty bit will be set again
1952 when accessing the range */
1953 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
1954 /* Check that we don't span multiple blocks - this breaks the
1955 address comparisons below. */
1956 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
1957 != (end
- 1) - start
) {
1960 cpu_tlb_reset_dirty_all(start1
, length
);
1963 int cpu_physical_memory_set_dirty_tracking(int enable
)
1966 in_migration
= enable
;
1970 target_phys_addr_t
memory_region_section_get_iotlb(CPUArchState
*env
,
1971 MemoryRegionSection
*section
,
1973 target_phys_addr_t paddr
,
1975 target_ulong
*address
)
1977 target_phys_addr_t iotlb
;
1980 if (memory_region_is_ram(section
->mr
)) {
1982 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1983 + memory_region_section_addr(section
, paddr
);
1984 if (!section
->readonly
) {
1985 iotlb
|= phys_section_notdirty
;
1987 iotlb
|= phys_section_rom
;
1990 /* IO handlers are currently passed a physical address.
1991 It would be nice to pass an offset from the base address
1992 of that region. This would avoid having to special case RAM,
1993 and avoid full address decoding in every device.
1994 We can't use the high bits of pd for this because
1995 IO_MEM_ROMD uses these as a ram address. */
1996 iotlb
= section
- phys_sections
;
1997 iotlb
+= memory_region_section_addr(section
, paddr
);
2000 /* Make accesses to pages with watchpoints go via the
2001 watchpoint trap routines. */
2002 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2003 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2004 /* Avoid trapping reads of pages with a write breakpoint. */
2005 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2006 iotlb
= phys_section_watch
+ paddr
;
2007 *address
|= TLB_MMIO
;
2018 * Walks guest process memory "regions" one by one
2019 * and calls callback function 'fn' for each region.
2022 struct walk_memory_regions_data
2024 walk_memory_regions_fn fn
;
2030 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2031 abi_ulong end
, int new_prot
)
2033 if (data
->start
!= -1ul) {
2034 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2040 data
->start
= (new_prot
? end
: -1ul);
2041 data
->prot
= new_prot
;
2046 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2047 abi_ulong base
, int level
, void **lp
)
2053 return walk_memory_regions_end(data
, base
, 0);
2058 for (i
= 0; i
< L2_SIZE
; ++i
) {
2059 int prot
= pd
[i
].flags
;
2061 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2062 if (prot
!= data
->prot
) {
2063 rc
= walk_memory_regions_end(data
, pa
, prot
);
2071 for (i
= 0; i
< L2_SIZE
; ++i
) {
2072 pa
= base
| ((abi_ulong
)i
<<
2073 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2074 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2084 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2086 struct walk_memory_regions_data data
;
2094 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2095 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2096 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2102 return walk_memory_regions_end(&data
, 0, 0);
2105 static int dump_region(void *priv
, abi_ulong start
,
2106 abi_ulong end
, unsigned long prot
)
2108 FILE *f
= (FILE *)priv
;
2110 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2111 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2112 start
, end
, end
- start
,
2113 ((prot
& PAGE_READ
) ? 'r' : '-'),
2114 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2115 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2120 /* dump memory mappings */
2121 void page_dump(FILE *f
)
2123 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2124 "start", "end", "size", "prot");
2125 walk_memory_regions(f
, dump_region
);
2128 int page_get_flags(target_ulong address
)
2132 p
= page_find(address
>> TARGET_PAGE_BITS
);
2138 /* Modify the flags of a page and invalidate the code if necessary.
2139 The flag PAGE_WRITE_ORG is positioned automatically depending
2140 on PAGE_WRITE. The mmap_lock should already be held. */
2141 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2143 target_ulong addr
, len
;
2145 /* This function should never be called with addresses outside the
2146 guest address space. If this assert fires, it probably indicates
2147 a missing call to h2g_valid. */
2148 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2149 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2151 assert(start
< end
);
2153 start
= start
& TARGET_PAGE_MASK
;
2154 end
= TARGET_PAGE_ALIGN(end
);
2156 if (flags
& PAGE_WRITE
) {
2157 flags
|= PAGE_WRITE_ORG
;
2160 for (addr
= start
, len
= end
- start
;
2162 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2163 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2165 /* If the write protection bit is set, then we invalidate
2167 if (!(p
->flags
& PAGE_WRITE
) &&
2168 (flags
& PAGE_WRITE
) &&
2170 tb_invalidate_phys_page(addr
, 0, NULL
);
2176 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2182 /* This function should never be called with addresses outside the
2183 guest address space. If this assert fires, it probably indicates
2184 a missing call to h2g_valid. */
2185 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2186 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2192 if (start
+ len
- 1 < start
) {
2193 /* We've wrapped around. */
2197 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2198 start
= start
& TARGET_PAGE_MASK
;
2200 for (addr
= start
, len
= end
- start
;
2202 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2203 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2206 if( !(p
->flags
& PAGE_VALID
) )
2209 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2211 if (flags
& PAGE_WRITE
) {
2212 if (!(p
->flags
& PAGE_WRITE_ORG
))
2214 /* unprotect the page if it was put read-only because it
2215 contains translated code */
2216 if (!(p
->flags
& PAGE_WRITE
)) {
2217 if (!page_unprotect(addr
, 0, NULL
))
2226 /* called from signal handler: invalidate the code and unprotect the
2227 page. Return TRUE if the fault was successfully handled. */
2228 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
2232 target_ulong host_start
, host_end
, addr
;
2234 /* Technically this isn't safe inside a signal handler. However we
2235 know this only ever happens in a synchronous SEGV handler, so in
2236 practice it seems to be ok. */
2239 p
= page_find(address
>> TARGET_PAGE_BITS
);
2245 /* if the page was really writable, then we change its
2246 protection back to writable */
2247 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2248 host_start
= address
& qemu_host_page_mask
;
2249 host_end
= host_start
+ qemu_host_page_size
;
2252 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2253 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2254 p
->flags
|= PAGE_WRITE
;
2257 /* and since the content will be modified, we must invalidate
2258 the corresponding translated code. */
2259 tb_invalidate_phys_page(addr
, pc
, puc
);
2260 #ifdef DEBUG_TB_CHECK
2261 tb_invalidate_check(addr
);
2264 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2273 #endif /* defined(CONFIG_USER_ONLY) */
2275 #if !defined(CONFIG_USER_ONLY)
2277 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2278 typedef struct subpage_t
{
2280 target_phys_addr_t base
;
2281 uint16_t sub_section
[TARGET_PAGE_SIZE
];
2284 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2286 static subpage_t
*subpage_init(target_phys_addr_t base
);
2287 static void destroy_page_desc(uint16_t section_index
)
2289 MemoryRegionSection
*section
= &phys_sections
[section_index
];
2290 MemoryRegion
*mr
= section
->mr
;
2293 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
2294 memory_region_destroy(&subpage
->iomem
);
2299 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
2304 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
2308 p
= phys_map_nodes
[lp
->ptr
];
2309 for (i
= 0; i
< L2_SIZE
; ++i
) {
2310 if (!p
[i
].is_leaf
) {
2311 destroy_l2_mapping(&p
[i
], level
- 1);
2313 destroy_page_desc(p
[i
].ptr
);
2317 lp
->ptr
= PHYS_MAP_NODE_NIL
;
2320 static void destroy_all_mappings(void)
2322 destroy_l2_mapping(&phys_map
, P_L2_LEVELS
- 1);
2323 phys_map_nodes_reset();
2326 static uint16_t phys_section_add(MemoryRegionSection
*section
)
2328 if (phys_sections_nb
== phys_sections_nb_alloc
) {
2329 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
2330 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
2331 phys_sections_nb_alloc
);
2333 phys_sections
[phys_sections_nb
] = *section
;
2334 return phys_sections_nb
++;
2337 static void phys_sections_clear(void)
2339 phys_sections_nb
= 0;
2342 /* register physical memory.
2343 For RAM, 'size' must be a multiple of the target page size.
2344 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2345 io memory page. The address used when calling the IO function is
2346 the offset from the start of the region, plus region_offset. Both
2347 start_addr and region_offset are rounded down to a page boundary
2348 before calculating this offset. This should not be a problem unless
2349 the low bits of start_addr and region_offset differ. */
2350 static void register_subpage(MemoryRegionSection
*section
)
2353 target_phys_addr_t base
= section
->offset_within_address_space
2355 MemoryRegionSection
*existing
= phys_page_find(base
>> TARGET_PAGE_BITS
);
2356 MemoryRegionSection subsection
= {
2357 .offset_within_address_space
= base
,
2358 .size
= TARGET_PAGE_SIZE
,
2360 target_phys_addr_t start
, end
;
2362 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
2364 if (!(existing
->mr
->subpage
)) {
2365 subpage
= subpage_init(base
);
2366 subsection
.mr
= &subpage
->iomem
;
2367 phys_page_set(base
>> TARGET_PAGE_BITS
, 1,
2368 phys_section_add(&subsection
));
2370 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
2372 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
2373 end
= start
+ section
->size
;
2374 subpage_register(subpage
, start
, end
, phys_section_add(section
));
2378 static void register_multipage(MemoryRegionSection
*section
)
2380 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2381 ram_addr_t size
= section
->size
;
2382 target_phys_addr_t addr
;
2383 uint16_t section_index
= phys_section_add(section
);
2388 phys_page_set(addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
2392 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2395 MemoryRegionSection now
= *section
, remain
= *section
;
2397 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
2398 || (now
.size
< TARGET_PAGE_SIZE
)) {
2399 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
2400 - now
.offset_within_address_space
,
2402 register_subpage(&now
);
2403 remain
.size
-= now
.size
;
2404 remain
.offset_within_address_space
+= now
.size
;
2405 remain
.offset_within_region
+= now
.size
;
2408 now
.size
&= TARGET_PAGE_MASK
;
2410 register_multipage(&now
);
2411 remain
.size
-= now
.size
;
2412 remain
.offset_within_address_space
+= now
.size
;
2413 remain
.offset_within_region
+= now
.size
;
2417 register_subpage(&now
);
2422 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2425 kvm_coalesce_mmio_region(addr
, size
);
2428 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2431 kvm_uncoalesce_mmio_region(addr
, size
);
2434 void qemu_flush_coalesced_mmio_buffer(void)
2437 kvm_flush_coalesced_mmio_buffer();
2440 #if defined(__linux__) && !defined(TARGET_S390X)
2442 #include <sys/vfs.h>
2444 #define HUGETLBFS_MAGIC 0x958458f6
2446 static long gethugepagesize(const char *path
)
2452 ret
= statfs(path
, &fs
);
2453 } while (ret
!= 0 && errno
== EINTR
);
2460 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2461 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2466 static void *file_ram_alloc(RAMBlock
*block
,
2476 unsigned long hpagesize
;
2478 hpagesize
= gethugepagesize(path
);
2483 if (memory
< hpagesize
) {
2487 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2488 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2492 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2496 fd
= mkstemp(filename
);
2498 perror("unable to create backing store for hugepages");
2505 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2508 * ftruncate is not supported by hugetlbfs in older
2509 * hosts, so don't bother bailing out on errors.
2510 * If anything goes wrong with it under other filesystems,
2513 if (ftruncate(fd
, memory
))
2514 perror("ftruncate");
2517 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2518 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2519 * to sidestep this quirk.
2521 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2522 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2524 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2526 if (area
== MAP_FAILED
) {
2527 perror("file_ram_alloc: can't mmap RAM pages");
2536 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2538 RAMBlock
*block
, *next_block
;
2539 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2541 if (QLIST_EMPTY(&ram_list
.blocks
))
2544 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2545 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2547 end
= block
->offset
+ block
->length
;
2549 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2550 if (next_block
->offset
>= end
) {
2551 next
= MIN(next
, next_block
->offset
);
2554 if (next
- end
>= size
&& next
- end
< mingap
) {
2556 mingap
= next
- end
;
2560 if (offset
== RAM_ADDR_MAX
) {
2561 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2569 static ram_addr_t
last_ram_offset(void)
2572 ram_addr_t last
= 0;
2574 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2575 last
= MAX(last
, block
->offset
+ block
->length
);
2580 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2582 RAMBlock
*new_block
, *block
;
2585 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2586 if (block
->offset
== addr
) {
2592 assert(!new_block
->idstr
[0]);
2594 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2595 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2597 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2601 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2603 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2604 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2605 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2612 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2615 RAMBlock
*new_block
;
2617 size
= TARGET_PAGE_ALIGN(size
);
2618 new_block
= g_malloc0(sizeof(*new_block
));
2621 new_block
->offset
= find_ram_offset(size
);
2623 new_block
->host
= host
;
2624 new_block
->flags
|= RAM_PREALLOC_MASK
;
2627 #if defined (__linux__) && !defined(TARGET_S390X)
2628 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2629 if (!new_block
->host
) {
2630 new_block
->host
= qemu_vmalloc(size
);
2631 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2634 fprintf(stderr
, "-mem-path option unsupported\n");
2638 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2639 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2640 an system defined value, which is at least 256GB. Larger systems
2641 have larger values. We put the guest between the end of data
2642 segment (system break) and this value. We use 32GB as a base to
2643 have enough room for the system break to grow. */
2644 new_block
->host
= mmap((void*)0x800000000, size
,
2645 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2646 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2647 if (new_block
->host
== MAP_FAILED
) {
2648 fprintf(stderr
, "Allocating RAM failed\n");
2652 if (xen_enabled()) {
2653 xen_ram_alloc(new_block
->offset
, size
, mr
);
2655 new_block
->host
= qemu_vmalloc(size
);
2658 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2661 new_block
->length
= size
;
2663 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2665 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2666 last_ram_offset() >> TARGET_PAGE_BITS
);
2667 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2668 0xff, size
>> TARGET_PAGE_BITS
);
2671 kvm_setup_guest_memory(new_block
->host
, size
);
2673 return new_block
->offset
;
2676 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2678 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2681 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2685 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2686 if (addr
== block
->offset
) {
2687 QLIST_REMOVE(block
, next
);
2694 void qemu_ram_free(ram_addr_t addr
)
2698 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2699 if (addr
== block
->offset
) {
2700 QLIST_REMOVE(block
, next
);
2701 if (block
->flags
& RAM_PREALLOC_MASK
) {
2703 } else if (mem_path
) {
2704 #if defined (__linux__) && !defined(TARGET_S390X)
2706 munmap(block
->host
, block
->length
);
2709 qemu_vfree(block
->host
);
2715 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2716 munmap(block
->host
, block
->length
);
2718 if (xen_enabled()) {
2719 xen_invalidate_map_cache_entry(block
->host
);
2721 qemu_vfree(block
->host
);
2733 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2740 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2741 offset
= addr
- block
->offset
;
2742 if (offset
< block
->length
) {
2743 vaddr
= block
->host
+ offset
;
2744 if (block
->flags
& RAM_PREALLOC_MASK
) {
2748 munmap(vaddr
, length
);
2750 #if defined(__linux__) && !defined(TARGET_S390X)
2753 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2756 flags
|= MAP_PRIVATE
;
2758 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2759 flags
, block
->fd
, offset
);
2761 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2762 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2769 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2770 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2771 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2774 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2775 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2779 if (area
!= vaddr
) {
2780 fprintf(stderr
, "Could not remap addr: "
2781 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2785 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
2791 #endif /* !_WIN32 */
2793 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2794 With the exception of the softmmu code in this file, this should
2795 only be used for local memory (e.g. video ram) that the device owns,
2796 and knows it isn't going to access beyond the end of the block.
2798 It should not be used for general purpose DMA.
2799 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2801 void *qemu_get_ram_ptr(ram_addr_t addr
)
2805 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2806 if (addr
- block
->offset
< block
->length
) {
2807 /* Move this entry to to start of the list. */
2808 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
2809 QLIST_REMOVE(block
, next
);
2810 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2812 if (xen_enabled()) {
2813 /* We need to check if the requested address is in the RAM
2814 * because we don't want to map the entire memory in QEMU.
2815 * In that case just map until the end of the page.
2817 if (block
->offset
== 0) {
2818 return xen_map_cache(addr
, 0, 0);
2819 } else if (block
->host
== NULL
) {
2821 xen_map_cache(block
->offset
, block
->length
, 1);
2824 return block
->host
+ (addr
- block
->offset
);
2828 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2834 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2835 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2837 void *qemu_safe_ram_ptr(ram_addr_t addr
)
2841 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2842 if (addr
- block
->offset
< block
->length
) {
2843 if (xen_enabled()) {
2844 /* We need to check if the requested address is in the RAM
2845 * because we don't want to map the entire memory in QEMU.
2846 * In that case just map until the end of the page.
2848 if (block
->offset
== 0) {
2849 return xen_map_cache(addr
, 0, 0);
2850 } else if (block
->host
== NULL
) {
2852 xen_map_cache(block
->offset
, block
->length
, 1);
2855 return block
->host
+ (addr
- block
->offset
);
2859 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2865 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2866 * but takes a size argument */
2867 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
2872 if (xen_enabled()) {
2873 return xen_map_cache(addr
, *size
, 1);
2877 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2878 if (addr
- block
->offset
< block
->length
) {
2879 if (addr
- block
->offset
+ *size
> block
->length
)
2880 *size
= block
->length
- addr
+ block
->offset
;
2881 return block
->host
+ (addr
- block
->offset
);
2885 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2890 void qemu_put_ram_ptr(void *addr
)
2892 trace_qemu_put_ram_ptr(addr
);
2895 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2898 uint8_t *host
= ptr
;
2900 if (xen_enabled()) {
2901 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
2905 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2906 /* This case append when the block is not mapped. */
2907 if (block
->host
== NULL
) {
2910 if (host
- block
->host
< block
->length
) {
2911 *ram_addr
= block
->offset
+ (host
- block
->host
);
2919 /* Some of the softmmu routines need to translate from a host pointer
2920 (typically a TLB entry) back to a ram offset. */
2921 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
2923 ram_addr_t ram_addr
;
2925 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
2926 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2932 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
2935 #ifdef DEBUG_UNASSIGNED
2936 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2938 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2939 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
2944 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
2945 uint64_t val
, unsigned size
)
2947 #ifdef DEBUG_UNASSIGNED
2948 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
2950 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2951 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
2955 static const MemoryRegionOps unassigned_mem_ops
= {
2956 .read
= unassigned_mem_read
,
2957 .write
= unassigned_mem_write
,
2958 .endianness
= DEVICE_NATIVE_ENDIAN
,
2961 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
2967 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
2968 uint64_t value
, unsigned size
)
2973 static const MemoryRegionOps error_mem_ops
= {
2974 .read
= error_mem_read
,
2975 .write
= error_mem_write
,
2976 .endianness
= DEVICE_NATIVE_ENDIAN
,
2979 static const MemoryRegionOps rom_mem_ops
= {
2980 .read
= error_mem_read
,
2981 .write
= unassigned_mem_write
,
2982 .endianness
= DEVICE_NATIVE_ENDIAN
,
2985 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
2986 uint64_t val
, unsigned size
)
2989 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2990 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2991 #if !defined(CONFIG_USER_ONLY)
2992 tb_invalidate_phys_page_fast(ram_addr
, size
);
2993 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2998 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3001 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3004 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3009 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3010 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3011 /* we remove the notdirty callback only if the code has been
3013 if (dirty_flags
== 0xff)
3014 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3017 static const MemoryRegionOps notdirty_mem_ops
= {
3018 .read
= error_mem_read
,
3019 .write
= notdirty_mem_write
,
3020 .endianness
= DEVICE_NATIVE_ENDIAN
,
3023 /* Generate a debug exception if a watchpoint has been hit. */
3024 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3026 CPUArchState
*env
= cpu_single_env
;
3027 target_ulong pc
, cs_base
;
3028 TranslationBlock
*tb
;
3033 if (env
->watchpoint_hit
) {
3034 /* We re-entered the check after replacing the TB. Now raise
3035 * the debug interrupt so that is will trigger after the
3036 * current instruction. */
3037 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3040 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3041 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3042 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3043 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3044 wp
->flags
|= BP_WATCHPOINT_HIT
;
3045 if (!env
->watchpoint_hit
) {
3046 env
->watchpoint_hit
= wp
;
3047 tb
= tb_find_pc(env
->mem_io_pc
);
3049 cpu_abort(env
, "check_watchpoint: could not find TB for "
3050 "pc=%p", (void *)env
->mem_io_pc
);
3052 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3053 tb_phys_invalidate(tb
, -1);
3054 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3055 env
->exception_index
= EXCP_DEBUG
;
3058 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3059 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3060 cpu_resume_from_signal(env
, NULL
);
3064 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3069 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3070 so these check for a hit then pass through to the normal out-of-line
3072 static uint64_t watch_mem_read(void *opaque
, target_phys_addr_t addr
,
3075 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
3077 case 1: return ldub_phys(addr
);
3078 case 2: return lduw_phys(addr
);
3079 case 4: return ldl_phys(addr
);
3084 static void watch_mem_write(void *opaque
, target_phys_addr_t addr
,
3085 uint64_t val
, unsigned size
)
3087 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
3090 stb_phys(addr
, val
);
3093 stw_phys(addr
, val
);
3096 stl_phys(addr
, val
);
3102 static const MemoryRegionOps watch_mem_ops
= {
3103 .read
= watch_mem_read
,
3104 .write
= watch_mem_write
,
3105 .endianness
= DEVICE_NATIVE_ENDIAN
,
3108 static uint64_t subpage_read(void *opaque
, target_phys_addr_t addr
,
3111 subpage_t
*mmio
= opaque
;
3112 unsigned int idx
= SUBPAGE_IDX(addr
);
3113 MemoryRegionSection
*section
;
3114 #if defined(DEBUG_SUBPAGE)
3115 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3116 mmio
, len
, addr
, idx
);
3119 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3121 addr
-= section
->offset_within_address_space
;
3122 addr
+= section
->offset_within_region
;
3123 return io_mem_read(section
->mr
, addr
, len
);
3126 static void subpage_write(void *opaque
, target_phys_addr_t addr
,
3127 uint64_t value
, unsigned len
)
3129 subpage_t
*mmio
= opaque
;
3130 unsigned int idx
= SUBPAGE_IDX(addr
);
3131 MemoryRegionSection
*section
;
3132 #if defined(DEBUG_SUBPAGE)
3133 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3134 " idx %d value %"PRIx64
"\n",
3135 __func__
, mmio
, len
, addr
, idx
, value
);
3138 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3140 addr
-= section
->offset_within_address_space
;
3141 addr
+= section
->offset_within_region
;
3142 io_mem_write(section
->mr
, addr
, value
, len
);
3145 static const MemoryRegionOps subpage_ops
= {
3146 .read
= subpage_read
,
3147 .write
= subpage_write
,
3148 .endianness
= DEVICE_NATIVE_ENDIAN
,
3151 static uint64_t subpage_ram_read(void *opaque
, target_phys_addr_t addr
,
3154 ram_addr_t raddr
= addr
;
3155 void *ptr
= qemu_get_ram_ptr(raddr
);
3157 case 1: return ldub_p(ptr
);
3158 case 2: return lduw_p(ptr
);
3159 case 4: return ldl_p(ptr
);
3164 static void subpage_ram_write(void *opaque
, target_phys_addr_t addr
,
3165 uint64_t value
, unsigned size
)
3167 ram_addr_t raddr
= addr
;
3168 void *ptr
= qemu_get_ram_ptr(raddr
);
3170 case 1: return stb_p(ptr
, value
);
3171 case 2: return stw_p(ptr
, value
);
3172 case 4: return stl_p(ptr
, value
);
3177 static const MemoryRegionOps subpage_ram_ops
= {
3178 .read
= subpage_ram_read
,
3179 .write
= subpage_ram_write
,
3180 .endianness
= DEVICE_NATIVE_ENDIAN
,
3183 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3188 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3190 idx
= SUBPAGE_IDX(start
);
3191 eidx
= SUBPAGE_IDX(end
);
3192 #if defined(DEBUG_SUBPAGE)
3193 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3194 mmio
, start
, end
, idx
, eidx
, memory
);
3196 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
3197 MemoryRegionSection new_section
= phys_sections
[section
];
3198 new_section
.mr
= &io_mem_subpage_ram
;
3199 section
= phys_section_add(&new_section
);
3201 for (; idx
<= eidx
; idx
++) {
3202 mmio
->sub_section
[idx
] = section
;
3208 static subpage_t
*subpage_init(target_phys_addr_t base
)
3212 mmio
= g_malloc0(sizeof(subpage_t
));
3215 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3216 "subpage", TARGET_PAGE_SIZE
);
3217 mmio
->iomem
.subpage
= true;
3218 #if defined(DEBUG_SUBPAGE)
3219 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3220 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3222 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
3227 static uint16_t dummy_section(MemoryRegion
*mr
)
3229 MemoryRegionSection section
= {
3231 .offset_within_address_space
= 0,
3232 .offset_within_region
= 0,
3236 return phys_section_add(§ion
);
3239 MemoryRegion
*iotlb_to_region(target_phys_addr_t index
)
3241 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
3244 static void io_mem_init(void)
3246 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3247 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3248 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3249 "unassigned", UINT64_MAX
);
3250 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3251 "notdirty", UINT64_MAX
);
3252 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3253 "subpage-ram", UINT64_MAX
);
3254 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3255 "watch", UINT64_MAX
);
3258 static void core_begin(MemoryListener
*listener
)
3260 destroy_all_mappings();
3261 phys_sections_clear();
3262 phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
3263 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
3264 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
3265 phys_section_rom
= dummy_section(&io_mem_rom
);
3266 phys_section_watch
= dummy_section(&io_mem_watch
);
3269 static void core_commit(MemoryListener
*listener
)
3273 /* since each CPU stores ram addresses in its TLB cache, we must
3274 reset the modified entries */
3276 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
3281 static void core_region_add(MemoryListener
*listener
,
3282 MemoryRegionSection
*section
)
3284 cpu_register_physical_memory_log(section
, section
->readonly
);
3287 static void core_region_del(MemoryListener
*listener
,
3288 MemoryRegionSection
*section
)
3292 static void core_region_nop(MemoryListener
*listener
,
3293 MemoryRegionSection
*section
)
3295 cpu_register_physical_memory_log(section
, section
->readonly
);
3298 static void core_log_start(MemoryListener
*listener
,
3299 MemoryRegionSection
*section
)
3303 static void core_log_stop(MemoryListener
*listener
,
3304 MemoryRegionSection
*section
)
3308 static void core_log_sync(MemoryListener
*listener
,
3309 MemoryRegionSection
*section
)
3313 static void core_log_global_start(MemoryListener
*listener
)
3315 cpu_physical_memory_set_dirty_tracking(1);
3318 static void core_log_global_stop(MemoryListener
*listener
)
3320 cpu_physical_memory_set_dirty_tracking(0);
3323 static void core_eventfd_add(MemoryListener
*listener
,
3324 MemoryRegionSection
*section
,
3325 bool match_data
, uint64_t data
, int fd
)
3329 static void core_eventfd_del(MemoryListener
*listener
,
3330 MemoryRegionSection
*section
,
3331 bool match_data
, uint64_t data
, int fd
)
3335 static void io_begin(MemoryListener
*listener
)
3339 static void io_commit(MemoryListener
*listener
)
3343 static void io_region_add(MemoryListener
*listener
,
3344 MemoryRegionSection
*section
)
3346 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
3348 mrio
->mr
= section
->mr
;
3349 mrio
->offset
= section
->offset_within_region
;
3350 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
3351 section
->offset_within_address_space
, section
->size
);
3352 ioport_register(&mrio
->iorange
);
3355 static void io_region_del(MemoryListener
*listener
,
3356 MemoryRegionSection
*section
)
3358 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
3361 static void io_region_nop(MemoryListener
*listener
,
3362 MemoryRegionSection
*section
)
3366 static void io_log_start(MemoryListener
*listener
,
3367 MemoryRegionSection
*section
)
3371 static void io_log_stop(MemoryListener
*listener
,
3372 MemoryRegionSection
*section
)
3376 static void io_log_sync(MemoryListener
*listener
,
3377 MemoryRegionSection
*section
)
3381 static void io_log_global_start(MemoryListener
*listener
)
3385 static void io_log_global_stop(MemoryListener
*listener
)
3389 static void io_eventfd_add(MemoryListener
*listener
,
3390 MemoryRegionSection
*section
,
3391 bool match_data
, uint64_t data
, int fd
)
3395 static void io_eventfd_del(MemoryListener
*listener
,
3396 MemoryRegionSection
*section
,
3397 bool match_data
, uint64_t data
, int fd
)
3401 static MemoryListener core_memory_listener
= {
3402 .begin
= core_begin
,
3403 .commit
= core_commit
,
3404 .region_add
= core_region_add
,
3405 .region_del
= core_region_del
,
3406 .region_nop
= core_region_nop
,
3407 .log_start
= core_log_start
,
3408 .log_stop
= core_log_stop
,
3409 .log_sync
= core_log_sync
,
3410 .log_global_start
= core_log_global_start
,
3411 .log_global_stop
= core_log_global_stop
,
3412 .eventfd_add
= core_eventfd_add
,
3413 .eventfd_del
= core_eventfd_del
,
3417 static MemoryListener io_memory_listener
= {
3419 .commit
= io_commit
,
3420 .region_add
= io_region_add
,
3421 .region_del
= io_region_del
,
3422 .region_nop
= io_region_nop
,
3423 .log_start
= io_log_start
,
3424 .log_stop
= io_log_stop
,
3425 .log_sync
= io_log_sync
,
3426 .log_global_start
= io_log_global_start
,
3427 .log_global_stop
= io_log_global_stop
,
3428 .eventfd_add
= io_eventfd_add
,
3429 .eventfd_del
= io_eventfd_del
,
3433 static void memory_map_init(void)
3435 system_memory
= g_malloc(sizeof(*system_memory
));
3436 memory_region_init(system_memory
, "system", INT64_MAX
);
3437 set_system_memory_map(system_memory
);
3439 system_io
= g_malloc(sizeof(*system_io
));
3440 memory_region_init(system_io
, "io", 65536);
3441 set_system_io_map(system_io
);
3443 memory_listener_register(&core_memory_listener
, system_memory
);
3444 memory_listener_register(&io_memory_listener
, system_io
);
3447 MemoryRegion
*get_system_memory(void)
3449 return system_memory
;
3452 MemoryRegion
*get_system_io(void)
3457 #endif /* !defined(CONFIG_USER_ONLY) */
3459 /* physical memory access (slow version, mainly for debug) */
3460 #if defined(CONFIG_USER_ONLY)
3461 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
3462 uint8_t *buf
, int len
, int is_write
)
3469 page
= addr
& TARGET_PAGE_MASK
;
3470 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3473 flags
= page_get_flags(page
);
3474 if (!(flags
& PAGE_VALID
))
3477 if (!(flags
& PAGE_WRITE
))
3479 /* XXX: this code should not depend on lock_user */
3480 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3483 unlock_user(p
, addr
, l
);
3485 if (!(flags
& PAGE_READ
))
3487 /* XXX: this code should not depend on lock_user */
3488 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3491 unlock_user(p
, addr
, 0);
3501 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3502 int len
, int is_write
)
3507 target_phys_addr_t page
;
3508 MemoryRegionSection
*section
;
3511 page
= addr
& TARGET_PAGE_MASK
;
3512 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3515 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3518 if (!memory_region_is_ram(section
->mr
)) {
3519 target_phys_addr_t addr1
;
3520 addr1
= memory_region_section_addr(section
, addr
);
3521 /* XXX: could force cpu_single_env to NULL to avoid
3523 if (l
>= 4 && ((addr1
& 3) == 0)) {
3524 /* 32 bit write access */
3526 io_mem_write(section
->mr
, addr1
, val
, 4);
3528 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3529 /* 16 bit write access */
3531 io_mem_write(section
->mr
, addr1
, val
, 2);
3534 /* 8 bit write access */
3536 io_mem_write(section
->mr
, addr1
, val
, 1);
3539 } else if (!section
->readonly
) {
3541 addr1
= memory_region_get_ram_addr(section
->mr
)
3542 + memory_region_section_addr(section
, addr
);
3544 ptr
= qemu_get_ram_ptr(addr1
);
3545 memcpy(ptr
, buf
, l
);
3546 if (!cpu_physical_memory_is_dirty(addr1
)) {
3547 /* invalidate code */
3548 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3550 cpu_physical_memory_set_dirty_flags(
3551 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3553 qemu_put_ram_ptr(ptr
);
3556 if (!(memory_region_is_ram(section
->mr
) ||
3557 memory_region_is_romd(section
->mr
))) {
3558 target_phys_addr_t addr1
;
3560 addr1
= memory_region_section_addr(section
, addr
);
3561 if (l
>= 4 && ((addr1
& 3) == 0)) {
3562 /* 32 bit read access */
3563 val
= io_mem_read(section
->mr
, addr1
, 4);
3566 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3567 /* 16 bit read access */
3568 val
= io_mem_read(section
->mr
, addr1
, 2);
3572 /* 8 bit read access */
3573 val
= io_mem_read(section
->mr
, addr1
, 1);
3579 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
3580 + memory_region_section_addr(section
,
3582 memcpy(buf
, ptr
, l
);
3583 qemu_put_ram_ptr(ptr
);
3592 /* used for ROM loading : can write in RAM and ROM */
3593 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3594 const uint8_t *buf
, int len
)
3598 target_phys_addr_t page
;
3599 MemoryRegionSection
*section
;
3602 page
= addr
& TARGET_PAGE_MASK
;
3603 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3606 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3608 if (!(memory_region_is_ram(section
->mr
) ||
3609 memory_region_is_romd(section
->mr
))) {
3612 unsigned long addr1
;
3613 addr1
= memory_region_get_ram_addr(section
->mr
)
3614 + memory_region_section_addr(section
, addr
);
3616 ptr
= qemu_get_ram_ptr(addr1
);
3617 memcpy(ptr
, buf
, l
);
3618 qemu_put_ram_ptr(ptr
);
3628 target_phys_addr_t addr
;
3629 target_phys_addr_t len
;
3632 static BounceBuffer bounce
;
3634 typedef struct MapClient
{
3636 void (*callback
)(void *opaque
);
3637 QLIST_ENTRY(MapClient
) link
;
3640 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3641 = QLIST_HEAD_INITIALIZER(map_client_list
);
3643 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3645 MapClient
*client
= g_malloc(sizeof(*client
));
3647 client
->opaque
= opaque
;
3648 client
->callback
= callback
;
3649 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3653 void cpu_unregister_map_client(void *_client
)
3655 MapClient
*client
= (MapClient
*)_client
;
3657 QLIST_REMOVE(client
, link
);
3661 static void cpu_notify_map_clients(void)
3665 while (!QLIST_EMPTY(&map_client_list
)) {
3666 client
= QLIST_FIRST(&map_client_list
);
3667 client
->callback(client
->opaque
);
3668 cpu_unregister_map_client(client
);
3672 /* Map a physical memory region into a host virtual address.
3673 * May map a subset of the requested range, given by and returned in *plen.
3674 * May return NULL if resources needed to perform the mapping are exhausted.
3675 * Use only for reads OR writes - not for read-modify-write operations.
3676 * Use cpu_register_map_client() to know when retrying the map operation is
3677 * likely to succeed.
3679 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3680 target_phys_addr_t
*plen
,
3683 target_phys_addr_t len
= *plen
;
3684 target_phys_addr_t todo
= 0;
3686 target_phys_addr_t page
;
3687 MemoryRegionSection
*section
;
3688 ram_addr_t raddr
= RAM_ADDR_MAX
;
3693 page
= addr
& TARGET_PAGE_MASK
;
3694 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3697 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3699 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
3700 if (todo
|| bounce
.buffer
) {
3703 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3707 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3711 return bounce
.buffer
;
3714 raddr
= memory_region_get_ram_addr(section
->mr
)
3715 + memory_region_section_addr(section
, addr
);
3723 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3728 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3729 * Will also mark the memory as dirty if is_write == 1. access_len gives
3730 * the amount of memory that was actually read or written by the caller.
3732 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3733 int is_write
, target_phys_addr_t access_len
)
3735 if (buffer
!= bounce
.buffer
) {
3737 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3738 while (access_len
) {
3740 l
= TARGET_PAGE_SIZE
;
3743 if (!cpu_physical_memory_is_dirty(addr1
)) {
3744 /* invalidate code */
3745 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3747 cpu_physical_memory_set_dirty_flags(
3748 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3754 if (xen_enabled()) {
3755 xen_invalidate_map_cache_entry(buffer
);
3760 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3762 qemu_vfree(bounce
.buffer
);
3763 bounce
.buffer
= NULL
;
3764 cpu_notify_map_clients();
3767 /* warning: addr must be aligned */
3768 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
3769 enum device_endian endian
)
3773 MemoryRegionSection
*section
;
3775 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3777 if (!(memory_region_is_ram(section
->mr
) ||
3778 memory_region_is_romd(section
->mr
))) {
3780 addr
= memory_region_section_addr(section
, addr
);
3781 val
= io_mem_read(section
->mr
, addr
, 4);
3782 #if defined(TARGET_WORDS_BIGENDIAN)
3783 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3787 if (endian
== DEVICE_BIG_ENDIAN
) {
3793 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3795 + memory_region_section_addr(section
, addr
));
3797 case DEVICE_LITTLE_ENDIAN
:
3798 val
= ldl_le_p(ptr
);
3800 case DEVICE_BIG_ENDIAN
:
3801 val
= ldl_be_p(ptr
);
3811 uint32_t ldl_phys(target_phys_addr_t addr
)
3813 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3816 uint32_t ldl_le_phys(target_phys_addr_t addr
)
3818 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3821 uint32_t ldl_be_phys(target_phys_addr_t addr
)
3823 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3826 /* warning: addr must be aligned */
3827 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
3828 enum device_endian endian
)
3832 MemoryRegionSection
*section
;
3834 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3836 if (!(memory_region_is_ram(section
->mr
) ||
3837 memory_region_is_romd(section
->mr
))) {
3839 addr
= memory_region_section_addr(section
, addr
);
3841 /* XXX This is broken when device endian != cpu endian.
3842 Fix and add "endian" variable check */
3843 #ifdef TARGET_WORDS_BIGENDIAN
3844 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
3845 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
3847 val
= io_mem_read(section
->mr
, addr
, 4);
3848 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
3852 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3854 + memory_region_section_addr(section
, addr
));
3856 case DEVICE_LITTLE_ENDIAN
:
3857 val
= ldq_le_p(ptr
);
3859 case DEVICE_BIG_ENDIAN
:
3860 val
= ldq_be_p(ptr
);
3870 uint64_t ldq_phys(target_phys_addr_t addr
)
3872 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3875 uint64_t ldq_le_phys(target_phys_addr_t addr
)
3877 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3880 uint64_t ldq_be_phys(target_phys_addr_t addr
)
3882 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3886 uint32_t ldub_phys(target_phys_addr_t addr
)
3889 cpu_physical_memory_read(addr
, &val
, 1);
3893 /* warning: addr must be aligned */
3894 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
3895 enum device_endian endian
)
3899 MemoryRegionSection
*section
;
3901 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3903 if (!(memory_region_is_ram(section
->mr
) ||
3904 memory_region_is_romd(section
->mr
))) {
3906 addr
= memory_region_section_addr(section
, addr
);
3907 val
= io_mem_read(section
->mr
, addr
, 2);
3908 #if defined(TARGET_WORDS_BIGENDIAN)
3909 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3913 if (endian
== DEVICE_BIG_ENDIAN
) {
3919 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3921 + memory_region_section_addr(section
, addr
));
3923 case DEVICE_LITTLE_ENDIAN
:
3924 val
= lduw_le_p(ptr
);
3926 case DEVICE_BIG_ENDIAN
:
3927 val
= lduw_be_p(ptr
);
3937 uint32_t lduw_phys(target_phys_addr_t addr
)
3939 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3942 uint32_t lduw_le_phys(target_phys_addr_t addr
)
3944 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3947 uint32_t lduw_be_phys(target_phys_addr_t addr
)
3949 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3952 /* warning: addr must be aligned. The ram page is not masked as dirty
3953 and the code inside is not invalidated. It is useful if the dirty
3954 bits are used to track modified PTEs */
3955 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3958 MemoryRegionSection
*section
;
3960 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3962 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3963 addr
= memory_region_section_addr(section
, addr
);
3964 if (memory_region_is_ram(section
->mr
)) {
3965 section
= &phys_sections
[phys_section_rom
];
3967 io_mem_write(section
->mr
, addr
, val
, 4);
3969 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
3971 + memory_region_section_addr(section
, addr
);
3972 ptr
= qemu_get_ram_ptr(addr1
);
3975 if (unlikely(in_migration
)) {
3976 if (!cpu_physical_memory_is_dirty(addr1
)) {
3977 /* invalidate code */
3978 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3980 cpu_physical_memory_set_dirty_flags(
3981 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3987 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3990 MemoryRegionSection
*section
;
3992 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3994 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3995 addr
= memory_region_section_addr(section
, addr
);
3996 if (memory_region_is_ram(section
->mr
)) {
3997 section
= &phys_sections
[phys_section_rom
];
3999 #ifdef TARGET_WORDS_BIGENDIAN
4000 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
4001 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
4003 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
4004 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
4007 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
4009 + memory_region_section_addr(section
, addr
));
4014 /* warning: addr must be aligned */
4015 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4016 enum device_endian endian
)
4019 MemoryRegionSection
*section
;
4021 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4023 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
4024 addr
= memory_region_section_addr(section
, addr
);
4025 if (memory_region_is_ram(section
->mr
)) {
4026 section
= &phys_sections
[phys_section_rom
];
4028 #if defined(TARGET_WORDS_BIGENDIAN)
4029 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4033 if (endian
== DEVICE_BIG_ENDIAN
) {
4037 io_mem_write(section
->mr
, addr
, val
, 4);
4039 unsigned long addr1
;
4040 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
4041 + memory_region_section_addr(section
, addr
);
4043 ptr
= qemu_get_ram_ptr(addr1
);
4045 case DEVICE_LITTLE_ENDIAN
:
4048 case DEVICE_BIG_ENDIAN
:
4055 if (!cpu_physical_memory_is_dirty(addr1
)) {
4056 /* invalidate code */
4057 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4059 cpu_physical_memory_set_dirty_flags(addr1
,
4060 (0xff & ~CODE_DIRTY_FLAG
));
4065 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4067 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4070 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4072 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4075 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4077 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4081 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4084 cpu_physical_memory_write(addr
, &v
, 1);
4087 /* warning: addr must be aligned */
4088 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4089 enum device_endian endian
)
4092 MemoryRegionSection
*section
;
4094 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4096 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
4097 addr
= memory_region_section_addr(section
, addr
);
4098 if (memory_region_is_ram(section
->mr
)) {
4099 section
= &phys_sections
[phys_section_rom
];
4101 #if defined(TARGET_WORDS_BIGENDIAN)
4102 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4106 if (endian
== DEVICE_BIG_ENDIAN
) {
4110 io_mem_write(section
->mr
, addr
, val
, 2);
4112 unsigned long addr1
;
4113 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
4114 + memory_region_section_addr(section
, addr
);
4116 ptr
= qemu_get_ram_ptr(addr1
);
4118 case DEVICE_LITTLE_ENDIAN
:
4121 case DEVICE_BIG_ENDIAN
:
4128 if (!cpu_physical_memory_is_dirty(addr1
)) {
4129 /* invalidate code */
4130 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4132 cpu_physical_memory_set_dirty_flags(addr1
,
4133 (0xff & ~CODE_DIRTY_FLAG
));
4138 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4140 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4143 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4145 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4148 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4150 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4154 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4157 cpu_physical_memory_write(addr
, &val
, 8);
4160 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4162 val
= cpu_to_le64(val
);
4163 cpu_physical_memory_write(addr
, &val
, 8);
4166 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4168 val
= cpu_to_be64(val
);
4169 cpu_physical_memory_write(addr
, &val
, 8);
4172 /* virtual memory access for debug (includes writing to ROM) */
4173 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
4174 uint8_t *buf
, int len
, int is_write
)
4177 target_phys_addr_t phys_addr
;
4181 page
= addr
& TARGET_PAGE_MASK
;
4182 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4183 /* if no physical page mapped, return an error */
4184 if (phys_addr
== -1)
4186 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4189 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4191 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4193 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4202 /* in deterministic execution mode, instructions doing device I/Os
4203 must be at the end of the TB */
4204 void cpu_io_recompile(CPUArchState
*env
, uintptr_t retaddr
)
4206 TranslationBlock
*tb
;
4208 target_ulong pc
, cs_base
;
4211 tb
= tb_find_pc(retaddr
);
4213 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4216 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4217 cpu_restore_state(tb
, env
, retaddr
);
4218 /* Calculate how many instructions had been executed before the fault
4220 n
= n
- env
->icount_decr
.u16
.low
;
4221 /* Generate a new TB ending on the I/O insn. */
4223 /* On MIPS and SH, delay slot instructions can only be restarted if
4224 they were already the first instruction in the TB. If this is not
4225 the first instruction in a TB then re-execute the preceding
4227 #if defined(TARGET_MIPS)
4228 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4229 env
->active_tc
.PC
-= 4;
4230 env
->icount_decr
.u16
.low
++;
4231 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4233 #elif defined(TARGET_SH4)
4234 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4237 env
->icount_decr
.u16
.low
++;
4238 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4241 /* This should never happen. */
4242 if (n
> CF_COUNT_MASK
)
4243 cpu_abort(env
, "TB too big during recompile");
4245 cflags
= n
| CF_LAST_IO
;
4247 cs_base
= tb
->cs_base
;
4249 tb_phys_invalidate(tb
, -1);
4250 /* FIXME: In theory this could raise an exception. In practice
4251 we have already translated the block once so it's probably ok. */
4252 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4253 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4254 the first in the TB) then we end up generating a whole new TB and
4255 repeating the fault, which is horribly inefficient.
4256 Better would be to execute just this insn uncached, or generate a
4258 cpu_resume_from_signal(env
, NULL
);
4261 #if !defined(CONFIG_USER_ONLY)
4263 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4265 int i
, target_code_size
, max_target_code_size
;
4266 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4267 TranslationBlock
*tb
;
4269 target_code_size
= 0;
4270 max_target_code_size
= 0;
4272 direct_jmp_count
= 0;
4273 direct_jmp2_count
= 0;
4274 for(i
= 0; i
< nb_tbs
; i
++) {
4276 target_code_size
+= tb
->size
;
4277 if (tb
->size
> max_target_code_size
)
4278 max_target_code_size
= tb
->size
;
4279 if (tb
->page_addr
[1] != -1)
4281 if (tb
->tb_next_offset
[0] != 0xffff) {
4283 if (tb
->tb_next_offset
[1] != 0xffff) {
4284 direct_jmp2_count
++;
4288 /* XXX: avoid using doubles ? */
4289 cpu_fprintf(f
, "Translation buffer state:\n");
4290 cpu_fprintf(f
, "gen code size %td/%ld\n",
4291 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4292 cpu_fprintf(f
, "TB count %d/%d\n",
4293 nb_tbs
, code_gen_max_blocks
);
4294 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4295 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4296 max_target_code_size
);
4297 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4298 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4299 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4300 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4302 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4303 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4305 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4307 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4308 cpu_fprintf(f
, "\nStatistics:\n");
4309 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4310 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4311 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4312 tcg_dump_info(f
, cpu_fprintf
);
4316 * A helper function for the _utterly broken_ virtio device model to find out if
4317 * it's running on a big endian machine. Don't do this at home kids!
4319 bool virtio_is_big_endian(void);
4320 bool virtio_is_big_endian(void)
4322 #if defined(TARGET_WORDS_BIGENDIAN)