2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
38 #include "exec-memory.h"
39 #if defined(CONFIG_USER_ONLY)
41 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42 #include <sys/param.h>
43 #if __FreeBSD_version >= 700104
44 #define HAVE_KINFO_GETVMMAP
45 #define sigqueue sigqueue_freebsd /* avoid redefinition */
48 #include <machine/profile.h>
56 #else /* !CONFIG_USER_ONLY */
57 #include "xen-mapcache.h"
63 #include "memory-internal.h"
65 //#define DEBUG_TB_INVALIDATE
67 //#define DEBUG_UNASSIGNED
69 /* make various TB consistency checks */
70 //#define DEBUG_TB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 uint8_t *code_gen_prologue
;
90 static uint8_t *code_gen_buffer
;
91 static size_t code_gen_buffer_size
;
92 /* threshold to flush the translated code buffer */
93 static size_t code_gen_buffer_max_size
;
94 static uint8_t *code_gen_ptr
;
96 #if !defined(CONFIG_USER_ONLY)
98 static int in_migration
;
100 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
102 static MemoryRegion
*system_memory
;
103 static MemoryRegion
*system_io
;
105 AddressSpace address_space_io
;
106 AddressSpace address_space_memory
;
107 DMAContext dma_context_memory
;
109 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
110 static MemoryRegion io_mem_subpage_ram
;
114 CPUArchState
*first_cpu
;
115 /* current CPU in the current thread. It is only valid inside
117 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
118 /* 0 = Do not count executed instructions.
119 1 = Precise instruction counting.
120 2 = Adaptive rate instruction counting. */
123 typedef struct PageDesc
{
124 /* list of TBs intersecting this ram page */
125 TranslationBlock
*first_tb
;
126 /* in order to optimize self modifying code, we count the number
127 of lookups we do to a given page to use a bitmap */
128 unsigned int code_write_count
;
129 uint8_t *code_bitmap
;
130 #if defined(CONFIG_USER_ONLY)
135 /* In system mode we want L1_MAP to be based on ram offsets,
136 while in user mode we want it to be based on virtual addresses. */
137 #if !defined(CONFIG_USER_ONLY)
138 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
139 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
141 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
144 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
147 /* Size of the L2 (and L3, etc) page tables. */
149 #define L2_SIZE (1 << L2_BITS)
151 #define P_L2_LEVELS \
152 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
154 /* The bits remaining after N lower levels of page tables. */
155 #define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
158 #if V_L1_BITS_REM < 4
159 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
161 #define V_L1_BITS V_L1_BITS_REM
164 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
166 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
168 uintptr_t qemu_real_host_page_size
;
169 uintptr_t qemu_host_page_size
;
170 uintptr_t qemu_host_page_mask
;
172 /* This is a multi-level map on the virtual address space.
173 The bottom level has pointers to PageDesc. */
174 static void *l1_map
[V_L1_SIZE
];
176 #if !defined(CONFIG_USER_ONLY)
178 static MemoryRegionSection
*phys_sections
;
179 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
180 static uint16_t phys_section_unassigned
;
181 static uint16_t phys_section_notdirty
;
182 static uint16_t phys_section_rom
;
183 static uint16_t phys_section_watch
;
185 /* Simple allocator for PhysPageEntry nodes */
186 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
187 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
189 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
191 static void io_mem_init(void);
192 static void memory_map_init(void);
193 static void *qemu_safe_ram_ptr(ram_addr_t addr
);
195 static MemoryRegion io_mem_watch
;
197 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
198 tb_page_addr_t phys_page2
);
201 static int tb_flush_count
;
202 static int tb_phys_invalidate_count
;
205 static inline void map_exec(void *addr
, long size
)
208 VirtualProtect(addr
, size
,
209 PAGE_EXECUTE_READWRITE
, &old_protect
);
213 static inline void map_exec(void *addr
, long size
)
215 unsigned long start
, end
, page_size
;
217 page_size
= getpagesize();
218 start
= (unsigned long)addr
;
219 start
&= ~(page_size
- 1);
221 end
= (unsigned long)addr
+ size
;
222 end
+= page_size
- 1;
223 end
&= ~(page_size
- 1);
225 mprotect((void *)start
, end
- start
,
226 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
230 static void page_init(void)
232 /* NOTE: we can always suppose that qemu_host_page_size >=
236 SYSTEM_INFO system_info
;
238 GetSystemInfo(&system_info
);
239 qemu_real_host_page_size
= system_info
.dwPageSize
;
242 qemu_real_host_page_size
= getpagesize();
244 if (qemu_host_page_size
== 0)
245 qemu_host_page_size
= qemu_real_host_page_size
;
246 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
247 qemu_host_page_size
= TARGET_PAGE_SIZE
;
248 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
250 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
252 #ifdef HAVE_KINFO_GETVMMAP
253 struct kinfo_vmentry
*freep
;
256 freep
= kinfo_getvmmap(getpid(), &cnt
);
259 for (i
= 0; i
< cnt
; i
++) {
260 unsigned long startaddr
, endaddr
;
262 startaddr
= freep
[i
].kve_start
;
263 endaddr
= freep
[i
].kve_end
;
264 if (h2g_valid(startaddr
)) {
265 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
267 if (h2g_valid(endaddr
)) {
268 endaddr
= h2g(endaddr
);
269 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
271 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
273 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
284 last_brk
= (unsigned long)sbrk(0);
286 f
= fopen("/compat/linux/proc/self/maps", "r");
291 unsigned long startaddr
, endaddr
;
294 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
296 if (n
== 2 && h2g_valid(startaddr
)) {
297 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
299 if (h2g_valid(endaddr
)) {
300 endaddr
= h2g(endaddr
);
304 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
316 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
322 #if defined(CONFIG_USER_ONLY)
323 /* We can't use g_malloc because it may recurse into a locked mutex. */
324 # define ALLOC(P, SIZE) \
326 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
327 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
330 # define ALLOC(P, SIZE) \
331 do { P = g_malloc0(SIZE); } while (0)
334 /* Level 1. Always allocated. */
335 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
338 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
345 ALLOC(p
, sizeof(void *) * L2_SIZE
);
349 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
357 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
363 return pd
+ (index
& (L2_SIZE
- 1));
366 static inline PageDesc
*page_find(tb_page_addr_t index
)
368 return page_find_alloc(index
, 0);
371 #if !defined(CONFIG_USER_ONLY)
373 static void phys_map_node_reserve(unsigned nodes
)
375 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
376 typedef PhysPageEntry Node
[L2_SIZE
];
377 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
378 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
379 phys_map_nodes_nb
+ nodes
);
380 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
381 phys_map_nodes_nb_alloc
);
385 static uint16_t phys_map_node_alloc(void)
390 ret
= phys_map_nodes_nb
++;
391 assert(ret
!= PHYS_MAP_NODE_NIL
);
392 assert(ret
!= phys_map_nodes_nb_alloc
);
393 for (i
= 0; i
< L2_SIZE
; ++i
) {
394 phys_map_nodes
[ret
][i
].is_leaf
= 0;
395 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
400 static void phys_map_nodes_reset(void)
402 phys_map_nodes_nb
= 0;
406 static void phys_page_set_level(PhysPageEntry
*lp
, hwaddr
*index
,
407 hwaddr
*nb
, uint16_t leaf
,
412 hwaddr step
= (hwaddr
)1 << (level
* L2_BITS
);
414 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
415 lp
->ptr
= phys_map_node_alloc();
416 p
= phys_map_nodes
[lp
->ptr
];
418 for (i
= 0; i
< L2_SIZE
; i
++) {
420 p
[i
].ptr
= phys_section_unassigned
;
424 p
= phys_map_nodes
[lp
->ptr
];
426 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
428 while (*nb
&& lp
< &p
[L2_SIZE
]) {
429 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
435 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
441 static void phys_page_set(AddressSpaceDispatch
*d
,
442 hwaddr index
, hwaddr nb
,
445 /* Wildly overreserve - it doesn't matter much. */
446 phys_map_node_reserve(3 * P_L2_LEVELS
);
448 phys_page_set_level(&d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
451 MemoryRegionSection
*phys_page_find(AddressSpaceDispatch
*d
, hwaddr index
)
453 PhysPageEntry lp
= d
->phys_map
;
456 uint16_t s_index
= phys_section_unassigned
;
458 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
459 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
462 p
= phys_map_nodes
[lp
.ptr
];
463 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
468 return &phys_sections
[s_index
];
471 bool memory_region_is_unassigned(MemoryRegion
*mr
)
473 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
474 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
475 && mr
!= &io_mem_watch
;
478 #define mmap_lock() do { } while(0)
479 #define mmap_unlock() do { } while(0)
482 #if defined(CONFIG_USER_ONLY)
483 /* Currently it is not recommended to allocate big chunks of data in
484 user mode. It will change when a dedicated libc will be used. */
485 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
486 region in which the guest needs to run. Revisit this. */
487 #define USE_STATIC_CODE_GEN_BUFFER
490 /* ??? Should configure for this, not list operating systems here. */
491 #if (defined(__linux__) \
492 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
493 || defined(__DragonFly__) || defined(__OpenBSD__) \
494 || defined(__NetBSD__))
498 /* Minimum size of the code gen buffer. This number is randomly chosen,
499 but not so small that we can't have a fair number of TB's live. */
500 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
502 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
503 indicated, this is constrained by the range of direct branches on the
504 host cpu, as used by the TCG implementation of goto_tb. */
505 #if defined(__x86_64__)
506 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
507 #elif defined(__sparc__)
508 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
509 #elif defined(__arm__)
510 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
511 #elif defined(__s390x__)
512 /* We have a +- 4GB range on the branches; leave some slop. */
513 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
515 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
518 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
520 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
521 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
522 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
524 static inline size_t size_code_gen_buffer(size_t tb_size
)
526 /* Size the buffer. */
528 #ifdef USE_STATIC_CODE_GEN_BUFFER
529 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
531 /* ??? Needs adjustments. */
532 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
533 static buffer, we could size this on RESERVED_VA, on the text
534 segment size of the executable, or continue to use the default. */
535 tb_size
= (unsigned long)(ram_size
/ 4);
538 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
539 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
541 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
542 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
544 code_gen_buffer_size
= tb_size
;
548 #ifdef USE_STATIC_CODE_GEN_BUFFER
549 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
550 __attribute__((aligned(CODE_GEN_ALIGN
)));
552 static inline void *alloc_code_gen_buffer(void)
554 map_exec(static_code_gen_buffer
, code_gen_buffer_size
);
555 return static_code_gen_buffer
;
557 #elif defined(USE_MMAP)
558 static inline void *alloc_code_gen_buffer(void)
560 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
564 /* Constrain the position of the buffer based on the host cpu.
565 Note that these addresses are chosen in concert with the
566 addresses assigned in the relevant linker script file. */
567 # if defined(__PIE__) || defined(__PIC__)
568 /* Don't bother setting a preferred location if we're building
569 a position-independent executable. We're more likely to get
570 an address near the main executable if we let the kernel
571 choose the address. */
572 # elif defined(__x86_64__) && defined(MAP_32BIT)
573 /* Force the memory down into low memory with the executable.
574 Leave the choice of exact location with the kernel. */
576 /* Cannot expect to map more than 800MB in low memory. */
577 if (code_gen_buffer_size
> 800u * 1024 * 1024) {
578 code_gen_buffer_size
= 800u * 1024 * 1024;
580 # elif defined(__sparc__)
581 start
= 0x40000000ul
;
582 # elif defined(__s390x__)
583 start
= 0x90000000ul
;
586 buf
= mmap((void *)start
, code_gen_buffer_size
,
587 PROT_WRITE
| PROT_READ
| PROT_EXEC
, flags
, -1, 0);
588 return buf
== MAP_FAILED
? NULL
: buf
;
591 static inline void *alloc_code_gen_buffer(void)
593 void *buf
= g_malloc(code_gen_buffer_size
);
595 map_exec(buf
, code_gen_buffer_size
);
599 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
601 static inline void code_gen_alloc(size_t tb_size
)
603 code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
604 code_gen_buffer
= alloc_code_gen_buffer();
605 if (code_gen_buffer
== NULL
) {
606 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
610 /* Steal room for the prologue at the end of the buffer. This ensures
611 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
612 from TB's to the prologue are going to be in range. It also means
613 that we don't need to mark (additional) portions of the data segment
615 code_gen_prologue
= code_gen_buffer
+ code_gen_buffer_size
- 1024;
616 code_gen_buffer_size
-= 1024;
618 code_gen_buffer_max_size
= code_gen_buffer_size
-
619 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
620 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
621 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
624 /* Must be called before using the QEMU cpus. 'tb_size' is the size
625 (in bytes) allocated to the translation buffer. Zero means default
627 void tcg_exec_init(unsigned long tb_size
)
630 code_gen_alloc(tb_size
);
631 code_gen_ptr
= code_gen_buffer
;
632 tcg_register_jit(code_gen_buffer
, code_gen_buffer_size
);
634 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
635 /* There's no guest base to take into account, so go ahead and
636 initialize the prologue now. */
637 tcg_prologue_init(&tcg_ctx
);
641 bool tcg_enabled(void)
643 return code_gen_buffer
!= NULL
;
646 void cpu_exec_init_all(void)
648 #if !defined(CONFIG_USER_ONLY)
654 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
656 static int cpu_common_post_load(void *opaque
, int version_id
)
658 CPUArchState
*env
= opaque
;
660 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
661 version_id is increased. */
662 env
->interrupt_request
&= ~0x01;
668 static const VMStateDescription vmstate_cpu_common
= {
669 .name
= "cpu_common",
671 .minimum_version_id
= 1,
672 .minimum_version_id_old
= 1,
673 .post_load
= cpu_common_post_load
,
674 .fields
= (VMStateField
[]) {
675 VMSTATE_UINT32(halted
, CPUArchState
),
676 VMSTATE_UINT32(interrupt_request
, CPUArchState
),
677 VMSTATE_END_OF_LIST()
682 CPUArchState
*qemu_get_cpu(int cpu
)
684 CPUArchState
*env
= first_cpu
;
687 if (env
->cpu_index
== cpu
)
695 void cpu_exec_init(CPUArchState
*env
)
697 #ifndef CONFIG_USER_ONLY
698 CPUState
*cpu
= ENV_GET_CPU(env
);
703 #if defined(CONFIG_USER_ONLY)
706 env
->next_cpu
= NULL
;
709 while (*penv
!= NULL
) {
710 penv
= &(*penv
)->next_cpu
;
713 env
->cpu_index
= cpu_index
;
715 QTAILQ_INIT(&env
->breakpoints
);
716 QTAILQ_INIT(&env
->watchpoints
);
717 #ifndef CONFIG_USER_ONLY
718 cpu
->thread_id
= qemu_get_thread_id();
721 #if defined(CONFIG_USER_ONLY)
724 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
725 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
726 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
727 cpu_save
, cpu_load
, env
);
731 /* Allocate a new translation block. Flush the translation buffer if
732 too many translation blocks or too much generated code. */
733 static TranslationBlock
*tb_alloc(target_ulong pc
)
735 TranslationBlock
*tb
;
737 if (nb_tbs
>= code_gen_max_blocks
||
738 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
746 void tb_free(TranslationBlock
*tb
)
748 /* In practice this is mostly used for single use temporary TB
749 Ignore the hard cases and just back up if this TB happens to
750 be the last one generated. */
751 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
752 code_gen_ptr
= tb
->tc_ptr
;
757 static inline void invalidate_page_bitmap(PageDesc
*p
)
759 if (p
->code_bitmap
) {
760 g_free(p
->code_bitmap
);
761 p
->code_bitmap
= NULL
;
763 p
->code_write_count
= 0;
766 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
768 static void page_flush_tb_1 (int level
, void **lp
)
777 for (i
= 0; i
< L2_SIZE
; ++i
) {
778 pd
[i
].first_tb
= NULL
;
779 invalidate_page_bitmap(pd
+ i
);
783 for (i
= 0; i
< L2_SIZE
; ++i
) {
784 page_flush_tb_1 (level
- 1, pp
+ i
);
789 static void page_flush_tb(void)
792 for (i
= 0; i
< V_L1_SIZE
; i
++) {
793 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
797 /* flush all the translation blocks */
798 /* XXX: tb_flush is currently not thread safe */
799 void tb_flush(CPUArchState
*env1
)
802 #if defined(DEBUG_FLUSH)
803 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
804 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
806 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
808 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
809 cpu_abort(env1
, "Internal error: code buffer overflow\n");
813 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
814 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
817 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
820 code_gen_ptr
= code_gen_buffer
;
821 /* XXX: flush processor icache at this point if cache flush is
826 #ifdef DEBUG_TB_CHECK
828 static void tb_invalidate_check(target_ulong address
)
830 TranslationBlock
*tb
;
832 address
&= TARGET_PAGE_MASK
;
833 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
834 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
835 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
836 address
>= tb
->pc
+ tb
->size
)) {
837 printf("ERROR invalidate: address=" TARGET_FMT_lx
838 " PC=%08lx size=%04x\n",
839 address
, (long)tb
->pc
, tb
->size
);
845 /* verify that all the pages have correct rights for code */
846 static void tb_page_check(void)
848 TranslationBlock
*tb
;
849 int i
, flags1
, flags2
;
851 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
852 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
853 flags1
= page_get_flags(tb
->pc
);
854 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
855 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
856 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
857 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
865 /* invalidate one TB */
866 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
869 TranslationBlock
*tb1
;
873 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
876 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
880 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
882 TranslationBlock
*tb1
;
887 n1
= (uintptr_t)tb1
& 3;
888 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
890 *ptb
= tb1
->page_next
[n1
];
893 ptb
= &tb1
->page_next
[n1
];
897 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
899 TranslationBlock
*tb1
, **ptb
;
902 ptb
= &tb
->jmp_next
[n
];
905 /* find tb(n) in circular list */
908 n1
= (uintptr_t)tb1
& 3;
909 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
910 if (n1
== n
&& tb1
== tb
)
913 ptb
= &tb1
->jmp_first
;
915 ptb
= &tb1
->jmp_next
[n1
];
918 /* now we can suppress tb(n) from the list */
919 *ptb
= tb
->jmp_next
[n
];
921 tb
->jmp_next
[n
] = NULL
;
925 /* reset the jump entry 'n' of a TB so that it is not chained to
927 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
929 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
932 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
937 tb_page_addr_t phys_pc
;
938 TranslationBlock
*tb1
, *tb2
;
940 /* remove the TB from the hash list */
941 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
942 h
= tb_phys_hash_func(phys_pc
);
943 tb_remove(&tb_phys_hash
[h
], tb
,
944 offsetof(TranslationBlock
, phys_hash_next
));
946 /* remove the TB from the page list */
947 if (tb
->page_addr
[0] != page_addr
) {
948 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
949 tb_page_remove(&p
->first_tb
, tb
);
950 invalidate_page_bitmap(p
);
952 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
953 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
954 tb_page_remove(&p
->first_tb
, tb
);
955 invalidate_page_bitmap(p
);
958 tb_invalidated_flag
= 1;
960 /* remove the TB from the hash list */
961 h
= tb_jmp_cache_hash_func(tb
->pc
);
962 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
963 if (env
->tb_jmp_cache
[h
] == tb
)
964 env
->tb_jmp_cache
[h
] = NULL
;
967 /* suppress this TB from the two jump lists */
968 tb_jmp_remove(tb
, 0);
969 tb_jmp_remove(tb
, 1);
971 /* suppress any remaining jumps to this TB */
974 n1
= (uintptr_t)tb1
& 3;
977 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
978 tb2
= tb1
->jmp_next
[n1
];
979 tb_reset_jump(tb1
, n1
);
980 tb1
->jmp_next
[n1
] = NULL
;
983 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
985 tb_phys_invalidate_count
++;
988 static inline void set_bits(uint8_t *tab
, int start
, int len
)
994 mask
= 0xff << (start
& 7);
995 if ((start
& ~7) == (end
& ~7)) {
997 mask
&= ~(0xff << (end
& 7));
1002 start
= (start
+ 8) & ~7;
1004 while (start
< end1
) {
1009 mask
= ~(0xff << (end
& 7));
1015 static void build_page_bitmap(PageDesc
*p
)
1017 int n
, tb_start
, tb_end
;
1018 TranslationBlock
*tb
;
1020 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
1023 while (tb
!= NULL
) {
1024 n
= (uintptr_t)tb
& 3;
1025 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1026 /* NOTE: this is subtle as a TB may span two physical pages */
1028 /* NOTE: tb_end may be after the end of the page, but
1029 it is not a problem */
1030 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1031 tb_end
= tb_start
+ tb
->size
;
1032 if (tb_end
> TARGET_PAGE_SIZE
)
1033 tb_end
= TARGET_PAGE_SIZE
;
1036 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1038 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1039 tb
= tb
->page_next
[n
];
1043 TranslationBlock
*tb_gen_code(CPUArchState
*env
,
1044 target_ulong pc
, target_ulong cs_base
,
1045 int flags
, int cflags
)
1047 TranslationBlock
*tb
;
1049 tb_page_addr_t phys_pc
, phys_page2
;
1050 target_ulong virt_page2
;
1053 phys_pc
= get_page_addr_code(env
, pc
);
1056 /* flush must be done */
1058 /* cannot fail at this point */
1060 /* Don't forget to invalidate previous TB info. */
1061 tb_invalidated_flag
= 1;
1063 tc_ptr
= code_gen_ptr
;
1064 tb
->tc_ptr
= tc_ptr
;
1065 tb
->cs_base
= cs_base
;
1067 tb
->cflags
= cflags
;
1068 cpu_gen_code(env
, tb
, &code_gen_size
);
1069 code_gen_ptr
= (void *)(((uintptr_t)code_gen_ptr
+ code_gen_size
+
1070 CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1072 /* check next page if needed */
1073 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1075 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1076 phys_page2
= get_page_addr_code(env
, virt_page2
);
1078 tb_link_page(tb
, phys_pc
, phys_page2
);
1083 * Invalidate all TBs which intersect with the target physical address range
1084 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1085 * 'is_cpu_write_access' should be true if called from a real cpu write
1086 * access: the virtual CPU will exit the current TB if code is modified inside
1089 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
,
1090 int is_cpu_write_access
)
1092 while (start
< end
) {
1093 tb_invalidate_phys_page_range(start
, end
, is_cpu_write_access
);
1094 start
&= TARGET_PAGE_MASK
;
1095 start
+= TARGET_PAGE_SIZE
;
1100 * Invalidate all TBs which intersect with the target physical address range
1101 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1102 * 'is_cpu_write_access' should be true if called from a real cpu write
1103 * access: the virtual CPU will exit the current TB if code is modified inside
1106 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1107 int is_cpu_write_access
)
1109 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1110 CPUArchState
*env
= cpu_single_env
;
1111 tb_page_addr_t tb_start
, tb_end
;
1114 #ifdef TARGET_HAS_PRECISE_SMC
1115 int current_tb_not_found
= is_cpu_write_access
;
1116 TranslationBlock
*current_tb
= NULL
;
1117 int current_tb_modified
= 0;
1118 target_ulong current_pc
= 0;
1119 target_ulong current_cs_base
= 0;
1120 int current_flags
= 0;
1121 #endif /* TARGET_HAS_PRECISE_SMC */
1123 p
= page_find(start
>> TARGET_PAGE_BITS
);
1126 if (!p
->code_bitmap
&&
1127 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1128 is_cpu_write_access
) {
1129 /* build code bitmap */
1130 build_page_bitmap(p
);
1133 /* we remove all the TBs in the range [start, end[ */
1134 /* XXX: see if in some cases it could be faster to invalidate all the code */
1136 while (tb
!= NULL
) {
1137 n
= (uintptr_t)tb
& 3;
1138 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1139 tb_next
= tb
->page_next
[n
];
1140 /* NOTE: this is subtle as a TB may span two physical pages */
1142 /* NOTE: tb_end may be after the end of the page, but
1143 it is not a problem */
1144 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1145 tb_end
= tb_start
+ tb
->size
;
1147 tb_start
= tb
->page_addr
[1];
1148 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1150 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1151 #ifdef TARGET_HAS_PRECISE_SMC
1152 if (current_tb_not_found
) {
1153 current_tb_not_found
= 0;
1155 if (env
->mem_io_pc
) {
1156 /* now we have a real cpu fault */
1157 current_tb
= tb_find_pc(env
->mem_io_pc
);
1160 if (current_tb
== tb
&&
1161 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1162 /* If we are modifying the current TB, we must stop
1163 its execution. We could be more precise by checking
1164 that the modification is after the current PC, but it
1165 would require a specialized function to partially
1166 restore the CPU state */
1168 current_tb_modified
= 1;
1169 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1170 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1173 #endif /* TARGET_HAS_PRECISE_SMC */
1174 /* we need to do that to handle the case where a signal
1175 occurs while doing tb_phys_invalidate() */
1178 saved_tb
= env
->current_tb
;
1179 env
->current_tb
= NULL
;
1181 tb_phys_invalidate(tb
, -1);
1183 env
->current_tb
= saved_tb
;
1184 if (env
->interrupt_request
&& env
->current_tb
)
1185 cpu_interrupt(env
, env
->interrupt_request
);
1190 #if !defined(CONFIG_USER_ONLY)
1191 /* if no code remaining, no need to continue to use slow writes */
1193 invalidate_page_bitmap(p
);
1194 if (is_cpu_write_access
) {
1195 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1199 #ifdef TARGET_HAS_PRECISE_SMC
1200 if (current_tb_modified
) {
1201 /* we generate a block containing just the instruction
1202 modifying the memory. It will ensure that it cannot modify
1204 env
->current_tb
= NULL
;
1205 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1206 cpu_resume_from_signal(env
, NULL
);
1211 /* len must be <= 8 and start must be a multiple of len */
1212 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1218 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1219 cpu_single_env
->mem_io_vaddr
, len
,
1220 cpu_single_env
->eip
,
1221 cpu_single_env
->eip
+
1222 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1225 p
= page_find(start
>> TARGET_PAGE_BITS
);
1228 if (p
->code_bitmap
) {
1229 offset
= start
& ~TARGET_PAGE_MASK
;
1230 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1231 if (b
& ((1 << len
) - 1))
1235 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1239 #if !defined(CONFIG_SOFTMMU)
1240 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1241 uintptr_t pc
, void *puc
)
1243 TranslationBlock
*tb
;
1246 #ifdef TARGET_HAS_PRECISE_SMC
1247 TranslationBlock
*current_tb
= NULL
;
1248 CPUArchState
*env
= cpu_single_env
;
1249 int current_tb_modified
= 0;
1250 target_ulong current_pc
= 0;
1251 target_ulong current_cs_base
= 0;
1252 int current_flags
= 0;
1255 addr
&= TARGET_PAGE_MASK
;
1256 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1260 #ifdef TARGET_HAS_PRECISE_SMC
1261 if (tb
&& pc
!= 0) {
1262 current_tb
= tb_find_pc(pc
);
1265 while (tb
!= NULL
) {
1266 n
= (uintptr_t)tb
& 3;
1267 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1268 #ifdef TARGET_HAS_PRECISE_SMC
1269 if (current_tb
== tb
&&
1270 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1271 /* If we are modifying the current TB, we must stop
1272 its execution. We could be more precise by checking
1273 that the modification is after the current PC, but it
1274 would require a specialized function to partially
1275 restore the CPU state */
1277 current_tb_modified
= 1;
1278 cpu_restore_state(current_tb
, env
, pc
);
1279 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1282 #endif /* TARGET_HAS_PRECISE_SMC */
1283 tb_phys_invalidate(tb
, addr
);
1284 tb
= tb
->page_next
[n
];
1287 #ifdef TARGET_HAS_PRECISE_SMC
1288 if (current_tb_modified
) {
1289 /* we generate a block containing just the instruction
1290 modifying the memory. It will ensure that it cannot modify
1292 env
->current_tb
= NULL
;
1293 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1294 cpu_resume_from_signal(env
, puc
);
1300 /* add the tb in the target page and protect it if necessary */
1301 static inline void tb_alloc_page(TranslationBlock
*tb
,
1302 unsigned int n
, tb_page_addr_t page_addr
)
1305 #ifndef CONFIG_USER_ONLY
1306 bool page_already_protected
;
1309 tb
->page_addr
[n
] = page_addr
;
1310 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1311 tb
->page_next
[n
] = p
->first_tb
;
1312 #ifndef CONFIG_USER_ONLY
1313 page_already_protected
= p
->first_tb
!= NULL
;
1315 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1316 invalidate_page_bitmap(p
);
1318 #if defined(TARGET_HAS_SMC) || 1
1320 #if defined(CONFIG_USER_ONLY)
1321 if (p
->flags
& PAGE_WRITE
) {
1326 /* force the host page as non writable (writes will have a
1327 page fault + mprotect overhead) */
1328 page_addr
&= qemu_host_page_mask
;
1330 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1331 addr
+= TARGET_PAGE_SIZE
) {
1333 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1337 p2
->flags
&= ~PAGE_WRITE
;
1339 mprotect(g2h(page_addr
), qemu_host_page_size
,
1340 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1341 #ifdef DEBUG_TB_INVALIDATE
1342 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1347 /* if some code is already present, then the pages are already
1348 protected. So we handle the case where only the first TB is
1349 allocated in a physical page */
1350 if (!page_already_protected
) {
1351 tlb_protect_code(page_addr
);
1355 #endif /* TARGET_HAS_SMC */
1358 /* add a new TB and link it to the physical page tables. phys_page2 is
1359 (-1) to indicate that only one page contains the TB. */
1360 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1361 tb_page_addr_t phys_page2
)
1364 TranslationBlock
**ptb
;
1366 /* Grab the mmap lock to stop another thread invalidating this TB
1367 before we are done. */
1369 /* add in the physical hash table */
1370 h
= tb_phys_hash_func(phys_pc
);
1371 ptb
= &tb_phys_hash
[h
];
1372 tb
->phys_hash_next
= *ptb
;
1375 /* add in the page list */
1376 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1377 if (phys_page2
!= -1)
1378 tb_alloc_page(tb
, 1, phys_page2
);
1380 tb
->page_addr
[1] = -1;
1382 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1383 tb
->jmp_next
[0] = NULL
;
1384 tb
->jmp_next
[1] = NULL
;
1386 /* init original jump addresses */
1387 if (tb
->tb_next_offset
[0] != 0xffff)
1388 tb_reset_jump(tb
, 0);
1389 if (tb
->tb_next_offset
[1] != 0xffff)
1390 tb_reset_jump(tb
, 1);
1392 #ifdef DEBUG_TB_CHECK
1398 #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1399 /* check whether the given addr is in TCG generated code buffer or not */
1400 bool is_tcg_gen_code(uintptr_t tc_ptr
)
1402 /* This can be called during code generation, code_gen_buffer_max_size
1403 is used instead of code_gen_ptr for upper boundary checking */
1404 return (tc_ptr
>= (uintptr_t)code_gen_buffer
&&
1405 tc_ptr
< (uintptr_t)(code_gen_buffer
+ code_gen_buffer_max_size
));
1409 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1410 tb[1].tc_ptr. Return NULL if not found */
1411 TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1413 int m_min
, m_max
, m
;
1415 TranslationBlock
*tb
;
1419 if (tc_ptr
< (uintptr_t)code_gen_buffer
||
1420 tc_ptr
>= (uintptr_t)code_gen_ptr
) {
1423 /* binary search (cf Knuth) */
1426 while (m_min
<= m_max
) {
1427 m
= (m_min
+ m_max
) >> 1;
1429 v
= (uintptr_t)tb
->tc_ptr
;
1432 else if (tc_ptr
< v
) {
1441 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1443 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1445 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1448 tb1
= tb
->jmp_next
[n
];
1450 /* find head of list */
1452 n1
= (uintptr_t)tb1
& 3;
1453 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1456 tb1
= tb1
->jmp_next
[n1
];
1458 /* we are now sure now that tb jumps to tb1 */
1461 /* remove tb from the jmp_first list */
1462 ptb
= &tb_next
->jmp_first
;
1465 n1
= (uintptr_t)tb1
& 3;
1466 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1467 if (n1
== n
&& tb1
== tb
)
1469 ptb
= &tb1
->jmp_next
[n1
];
1471 *ptb
= tb
->jmp_next
[n
];
1472 tb
->jmp_next
[n
] = NULL
;
1474 /* suppress the jump to next tb in generated code */
1475 tb_reset_jump(tb
, n
);
1477 /* suppress jumps in the tb on which we could have jumped */
1478 tb_reset_jump_recursive(tb_next
);
1482 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1484 tb_reset_jump_recursive2(tb
, 0);
1485 tb_reset_jump_recursive2(tb
, 1);
1488 #if defined(TARGET_HAS_ICE)
1489 #if defined(CONFIG_USER_ONLY)
1490 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1492 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1495 void tb_invalidate_phys_addr(hwaddr addr
)
1497 ram_addr_t ram_addr
;
1498 MemoryRegionSection
*section
;
1500 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
1501 if (!(memory_region_is_ram(section
->mr
)
1502 || (section
->mr
->rom_device
&& section
->mr
->readable
))) {
1505 ram_addr
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1506 + memory_region_section_addr(section
, addr
);
1507 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1510 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1512 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
1513 (pc
& ~TARGET_PAGE_MASK
));
1516 #endif /* TARGET_HAS_ICE */
1518 #if defined(CONFIG_USER_ONLY)
1519 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1524 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1525 int flags
, CPUWatchpoint
**watchpoint
)
1530 /* Add a watchpoint. */
1531 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1532 int flags
, CPUWatchpoint
**watchpoint
)
1534 target_ulong len_mask
= ~(len
- 1);
1537 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1538 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
1539 len
== 0 || len
> TARGET_PAGE_SIZE
) {
1540 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1541 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1544 wp
= g_malloc(sizeof(*wp
));
1547 wp
->len_mask
= len_mask
;
1550 /* keep all GDB-injected watchpoints in front */
1552 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1554 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1556 tlb_flush_page(env
, addr
);
1563 /* Remove a specific watchpoint. */
1564 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1567 target_ulong len_mask
= ~(len
- 1);
1570 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1571 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1572 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1573 cpu_watchpoint_remove_by_ref(env
, wp
);
1580 /* Remove a specific watchpoint by reference. */
1581 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
1583 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1585 tlb_flush_page(env
, watchpoint
->vaddr
);
1590 /* Remove all matching watchpoints. */
1591 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1593 CPUWatchpoint
*wp
, *next
;
1595 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1596 if (wp
->flags
& mask
)
1597 cpu_watchpoint_remove_by_ref(env
, wp
);
1602 /* Add a breakpoint. */
1603 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
1604 CPUBreakpoint
**breakpoint
)
1606 #if defined(TARGET_HAS_ICE)
1609 bp
= g_malloc(sizeof(*bp
));
1614 /* keep all GDB-injected breakpoints in front */
1616 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1618 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1620 breakpoint_invalidate(env
, pc
);
1630 /* Remove a specific breakpoint. */
1631 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
1633 #if defined(TARGET_HAS_ICE)
1636 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1637 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1638 cpu_breakpoint_remove_by_ref(env
, bp
);
1648 /* Remove a specific breakpoint by reference. */
1649 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
1651 #if defined(TARGET_HAS_ICE)
1652 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1654 breakpoint_invalidate(env
, breakpoint
->pc
);
1660 /* Remove all matching breakpoints. */
1661 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
1663 #if defined(TARGET_HAS_ICE)
1664 CPUBreakpoint
*bp
, *next
;
1666 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1667 if (bp
->flags
& mask
)
1668 cpu_breakpoint_remove_by_ref(env
, bp
);
1673 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1674 CPU loop after each instruction */
1675 void cpu_single_step(CPUArchState
*env
, int enabled
)
1677 #if defined(TARGET_HAS_ICE)
1678 if (env
->singlestep_enabled
!= enabled
) {
1679 env
->singlestep_enabled
= enabled
;
1681 kvm_update_guest_debug(env
, 0);
1683 /* must flush all the translated code to avoid inconsistencies */
1684 /* XXX: only flush what is necessary */
1691 static void cpu_unlink_tb(CPUArchState
*env
)
1693 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1694 problem and hope the cpu will stop of its own accord. For userspace
1695 emulation this often isn't actually as bad as it sounds. Often
1696 signals are used primarily to interrupt blocking syscalls. */
1697 TranslationBlock
*tb
;
1698 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1700 spin_lock(&interrupt_lock
);
1701 tb
= env
->current_tb
;
1702 /* if the cpu is currently executing code, we must unlink it and
1703 all the potentially executing TB */
1705 env
->current_tb
= NULL
;
1706 tb_reset_jump_recursive(tb
);
1708 spin_unlock(&interrupt_lock
);
1711 #ifndef CONFIG_USER_ONLY
1712 /* mask must never be zero, except for A20 change call */
1713 static void tcg_handle_interrupt(CPUArchState
*env
, int mask
)
1715 CPUState
*cpu
= ENV_GET_CPU(env
);
1718 old_mask
= env
->interrupt_request
;
1719 env
->interrupt_request
|= mask
;
1722 * If called from iothread context, wake the target cpu in
1725 if (!qemu_cpu_is_self(cpu
)) {
1731 env
->icount_decr
.u16
.high
= 0xffff;
1733 && (mask
& ~old_mask
) != 0) {
1734 cpu_abort(env
, "Raised interrupt while not in I/O function");
1741 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1743 #else /* CONFIG_USER_ONLY */
1745 void cpu_interrupt(CPUArchState
*env
, int mask
)
1747 env
->interrupt_request
|= mask
;
1750 #endif /* CONFIG_USER_ONLY */
1752 void cpu_reset_interrupt(CPUArchState
*env
, int mask
)
1754 env
->interrupt_request
&= ~mask
;
1757 void cpu_exit(CPUArchState
*env
)
1759 env
->exit_request
= 1;
1763 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
1770 fprintf(stderr
, "qemu: fatal: ");
1771 vfprintf(stderr
, fmt
, ap
);
1772 fprintf(stderr
, "\n");
1773 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
1774 if (qemu_log_enabled()) {
1775 qemu_log("qemu: fatal: ");
1776 qemu_log_vprintf(fmt
, ap2
);
1778 log_cpu_state(env
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
1784 #if defined(CONFIG_USER_ONLY)
1786 struct sigaction act
;
1787 sigfillset(&act
.sa_mask
);
1788 act
.sa_handler
= SIG_DFL
;
1789 sigaction(SIGABRT
, &act
, NULL
);
1795 CPUArchState
*cpu_copy(CPUArchState
*env
)
1797 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
1798 CPUArchState
*next_cpu
= new_env
->next_cpu
;
1799 int cpu_index
= new_env
->cpu_index
;
1800 #if defined(TARGET_HAS_ICE)
1805 memcpy(new_env
, env
, sizeof(CPUArchState
));
1807 /* Preserve chaining and index. */
1808 new_env
->next_cpu
= next_cpu
;
1809 new_env
->cpu_index
= cpu_index
;
1811 /* Clone all break/watchpoints.
1812 Note: Once we support ptrace with hw-debug register access, make sure
1813 BP_CPU break/watchpoints are handled correctly on clone. */
1814 QTAILQ_INIT(&env
->breakpoints
);
1815 QTAILQ_INIT(&env
->watchpoints
);
1816 #if defined(TARGET_HAS_ICE)
1817 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1818 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1820 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1821 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1829 #if !defined(CONFIG_USER_ONLY)
1830 void tb_flush_jmp_cache(CPUArchState
*env
, target_ulong addr
)
1834 /* Discard jump cache entries for any tb which might potentially
1835 overlap the flushed page. */
1836 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1837 memset (&env
->tb_jmp_cache
[i
], 0,
1838 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1840 i
= tb_jmp_cache_hash_page(addr
);
1841 memset (&env
->tb_jmp_cache
[i
], 0,
1842 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1845 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
1850 /* we modify the TLB cache so that the dirty bit will be set again
1851 when accessing the range */
1852 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
1853 /* Check that we don't span multiple blocks - this breaks the
1854 address comparisons below. */
1855 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
1856 != (end
- 1) - start
) {
1859 cpu_tlb_reset_dirty_all(start1
, length
);
1863 /* Note: start and end must be within the same ram block. */
1864 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1869 start
&= TARGET_PAGE_MASK
;
1870 end
= TARGET_PAGE_ALIGN(end
);
1872 length
= end
- start
;
1875 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1877 if (tcg_enabled()) {
1878 tlb_reset_dirty_range_all(start
, end
, length
);
1882 static int cpu_physical_memory_set_dirty_tracking(int enable
)
1885 in_migration
= enable
;
1889 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
1890 MemoryRegionSection
*section
,
1894 target_ulong
*address
)
1899 if (memory_region_is_ram(section
->mr
)) {
1901 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1902 + memory_region_section_addr(section
, paddr
);
1903 if (!section
->readonly
) {
1904 iotlb
|= phys_section_notdirty
;
1906 iotlb
|= phys_section_rom
;
1909 /* IO handlers are currently passed a physical address.
1910 It would be nice to pass an offset from the base address
1911 of that region. This would avoid having to special case RAM,
1912 and avoid full address decoding in every device.
1913 We can't use the high bits of pd for this because
1914 IO_MEM_ROMD uses these as a ram address. */
1915 iotlb
= section
- phys_sections
;
1916 iotlb
+= memory_region_section_addr(section
, paddr
);
1919 /* Make accesses to pages with watchpoints go via the
1920 watchpoint trap routines. */
1921 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1922 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
1923 /* Avoid trapping reads of pages with a write breakpoint. */
1924 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1925 iotlb
= phys_section_watch
+ paddr
;
1926 *address
|= TLB_MMIO
;
1937 * Walks guest process memory "regions" one by one
1938 * and calls callback function 'fn' for each region.
1941 struct walk_memory_regions_data
1943 walk_memory_regions_fn fn
;
1949 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1950 abi_ulong end
, int new_prot
)
1952 if (data
->start
!= -1ul) {
1953 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1959 data
->start
= (new_prot
? end
: -1ul);
1960 data
->prot
= new_prot
;
1965 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1966 abi_ulong base
, int level
, void **lp
)
1972 return walk_memory_regions_end(data
, base
, 0);
1977 for (i
= 0; i
< L2_SIZE
; ++i
) {
1978 int prot
= pd
[i
].flags
;
1980 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1981 if (prot
!= data
->prot
) {
1982 rc
= walk_memory_regions_end(data
, pa
, prot
);
1990 for (i
= 0; i
< L2_SIZE
; ++i
) {
1991 pa
= base
| ((abi_ulong
)i
<<
1992 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
1993 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2003 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2005 struct walk_memory_regions_data data
;
2013 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2014 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2015 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2021 return walk_memory_regions_end(&data
, 0, 0);
2024 static int dump_region(void *priv
, abi_ulong start
,
2025 abi_ulong end
, unsigned long prot
)
2027 FILE *f
= (FILE *)priv
;
2029 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2030 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2031 start
, end
, end
- start
,
2032 ((prot
& PAGE_READ
) ? 'r' : '-'),
2033 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2034 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2039 /* dump memory mappings */
2040 void page_dump(FILE *f
)
2042 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2043 "start", "end", "size", "prot");
2044 walk_memory_regions(f
, dump_region
);
2047 int page_get_flags(target_ulong address
)
2051 p
= page_find(address
>> TARGET_PAGE_BITS
);
2057 /* Modify the flags of a page and invalidate the code if necessary.
2058 The flag PAGE_WRITE_ORG is positioned automatically depending
2059 on PAGE_WRITE. The mmap_lock should already be held. */
2060 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2062 target_ulong addr
, len
;
2064 /* This function should never be called with addresses outside the
2065 guest address space. If this assert fires, it probably indicates
2066 a missing call to h2g_valid. */
2067 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2068 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2070 assert(start
< end
);
2072 start
= start
& TARGET_PAGE_MASK
;
2073 end
= TARGET_PAGE_ALIGN(end
);
2075 if (flags
& PAGE_WRITE
) {
2076 flags
|= PAGE_WRITE_ORG
;
2079 for (addr
= start
, len
= end
- start
;
2081 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2082 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2084 /* If the write protection bit is set, then we invalidate
2086 if (!(p
->flags
& PAGE_WRITE
) &&
2087 (flags
& PAGE_WRITE
) &&
2089 tb_invalidate_phys_page(addr
, 0, NULL
);
2095 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2101 /* This function should never be called with addresses outside the
2102 guest address space. If this assert fires, it probably indicates
2103 a missing call to h2g_valid. */
2104 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2105 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2111 if (start
+ len
- 1 < start
) {
2112 /* We've wrapped around. */
2116 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2117 start
= start
& TARGET_PAGE_MASK
;
2119 for (addr
= start
, len
= end
- start
;
2121 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2122 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2125 if( !(p
->flags
& PAGE_VALID
) )
2128 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2130 if (flags
& PAGE_WRITE
) {
2131 if (!(p
->flags
& PAGE_WRITE_ORG
))
2133 /* unprotect the page if it was put read-only because it
2134 contains translated code */
2135 if (!(p
->flags
& PAGE_WRITE
)) {
2136 if (!page_unprotect(addr
, 0, NULL
))
2145 /* called from signal handler: invalidate the code and unprotect the
2146 page. Return TRUE if the fault was successfully handled. */
2147 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
2151 target_ulong host_start
, host_end
, addr
;
2153 /* Technically this isn't safe inside a signal handler. However we
2154 know this only ever happens in a synchronous SEGV handler, so in
2155 practice it seems to be ok. */
2158 p
= page_find(address
>> TARGET_PAGE_BITS
);
2164 /* if the page was really writable, then we change its
2165 protection back to writable */
2166 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2167 host_start
= address
& qemu_host_page_mask
;
2168 host_end
= host_start
+ qemu_host_page_size
;
2171 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2172 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2173 p
->flags
|= PAGE_WRITE
;
2176 /* and since the content will be modified, we must invalidate
2177 the corresponding translated code. */
2178 tb_invalidate_phys_page(addr
, pc
, puc
);
2179 #ifdef DEBUG_TB_CHECK
2180 tb_invalidate_check(addr
);
2183 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2192 #endif /* defined(CONFIG_USER_ONLY) */
2194 #if !defined(CONFIG_USER_ONLY)
2196 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2197 typedef struct subpage_t
{
2200 uint16_t sub_section
[TARGET_PAGE_SIZE
];
2203 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2205 static subpage_t
*subpage_init(hwaddr base
);
2206 static void destroy_page_desc(uint16_t section_index
)
2208 MemoryRegionSection
*section
= &phys_sections
[section_index
];
2209 MemoryRegion
*mr
= section
->mr
;
2212 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
2213 memory_region_destroy(&subpage
->iomem
);
2218 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
2223 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
2227 p
= phys_map_nodes
[lp
->ptr
];
2228 for (i
= 0; i
< L2_SIZE
; ++i
) {
2229 if (!p
[i
].is_leaf
) {
2230 destroy_l2_mapping(&p
[i
], level
- 1);
2232 destroy_page_desc(p
[i
].ptr
);
2236 lp
->ptr
= PHYS_MAP_NODE_NIL
;
2239 static void destroy_all_mappings(AddressSpaceDispatch
*d
)
2241 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
2242 phys_map_nodes_reset();
2245 static uint16_t phys_section_add(MemoryRegionSection
*section
)
2247 if (phys_sections_nb
== phys_sections_nb_alloc
) {
2248 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
2249 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
2250 phys_sections_nb_alloc
);
2252 phys_sections
[phys_sections_nb
] = *section
;
2253 return phys_sections_nb
++;
2256 static void phys_sections_clear(void)
2258 phys_sections_nb
= 0;
2261 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
2264 hwaddr base
= section
->offset_within_address_space
2266 MemoryRegionSection
*existing
= phys_page_find(d
, base
>> TARGET_PAGE_BITS
);
2267 MemoryRegionSection subsection
= {
2268 .offset_within_address_space
= base
,
2269 .size
= TARGET_PAGE_SIZE
,
2273 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
2275 if (!(existing
->mr
->subpage
)) {
2276 subpage
= subpage_init(base
);
2277 subsection
.mr
= &subpage
->iomem
;
2278 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
2279 phys_section_add(&subsection
));
2281 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
2283 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
2284 end
= start
+ section
->size
- 1;
2285 subpage_register(subpage
, start
, end
, phys_section_add(section
));
2289 static void register_multipage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
2291 hwaddr start_addr
= section
->offset_within_address_space
;
2292 ram_addr_t size
= section
->size
;
2294 uint16_t section_index
= phys_section_add(section
);
2299 phys_page_set(d
, addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
2303 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
2305 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
2306 MemoryRegionSection now
= *section
, remain
= *section
;
2308 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
2309 || (now
.size
< TARGET_PAGE_SIZE
)) {
2310 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
2311 - now
.offset_within_address_space
,
2313 register_subpage(d
, &now
);
2314 remain
.size
-= now
.size
;
2315 remain
.offset_within_address_space
+= now
.size
;
2316 remain
.offset_within_region
+= now
.size
;
2318 while (remain
.size
>= TARGET_PAGE_SIZE
) {
2320 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
2321 now
.size
= TARGET_PAGE_SIZE
;
2322 register_subpage(d
, &now
);
2324 now
.size
&= TARGET_PAGE_MASK
;
2325 register_multipage(d
, &now
);
2327 remain
.size
-= now
.size
;
2328 remain
.offset_within_address_space
+= now
.size
;
2329 remain
.offset_within_region
+= now
.size
;
2333 register_subpage(d
, &now
);
2337 void qemu_flush_coalesced_mmio_buffer(void)
2340 kvm_flush_coalesced_mmio_buffer();
2343 #if defined(__linux__) && !defined(TARGET_S390X)
2345 #include <sys/vfs.h>
2347 #define HUGETLBFS_MAGIC 0x958458f6
2349 static long gethugepagesize(const char *path
)
2355 ret
= statfs(path
, &fs
);
2356 } while (ret
!= 0 && errno
== EINTR
);
2363 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2364 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2369 static void *file_ram_alloc(RAMBlock
*block
,
2379 unsigned long hpagesize
;
2381 hpagesize
= gethugepagesize(path
);
2386 if (memory
< hpagesize
) {
2390 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2391 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2395 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2399 fd
= mkstemp(filename
);
2401 perror("unable to create backing store for hugepages");
2408 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2411 * ftruncate is not supported by hugetlbfs in older
2412 * hosts, so don't bother bailing out on errors.
2413 * If anything goes wrong with it under other filesystems,
2416 if (ftruncate(fd
, memory
))
2417 perror("ftruncate");
2420 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2421 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2422 * to sidestep this quirk.
2424 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2425 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2427 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2429 if (area
== MAP_FAILED
) {
2430 perror("file_ram_alloc: can't mmap RAM pages");
2439 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2441 RAMBlock
*block
, *next_block
;
2442 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2444 if (QLIST_EMPTY(&ram_list
.blocks
))
2447 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2448 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2450 end
= block
->offset
+ block
->length
;
2452 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2453 if (next_block
->offset
>= end
) {
2454 next
= MIN(next
, next_block
->offset
);
2457 if (next
- end
>= size
&& next
- end
< mingap
) {
2459 mingap
= next
- end
;
2463 if (offset
== RAM_ADDR_MAX
) {
2464 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2472 ram_addr_t
last_ram_offset(void)
2475 ram_addr_t last
= 0;
2477 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2478 last
= MAX(last
, block
->offset
+ block
->length
);
2483 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
2486 QemuOpts
*machine_opts
;
2488 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2489 machine_opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
2491 !qemu_opt_get_bool(machine_opts
, "dump-guest-core", true)) {
2492 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
2494 perror("qemu_madvise");
2495 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
2496 "but dump_guest_core=off specified\n");
2501 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2503 RAMBlock
*new_block
, *block
;
2506 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2507 if (block
->offset
== addr
) {
2513 assert(!new_block
->idstr
[0]);
2516 char *id
= qdev_get_dev_path(dev
);
2518 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2522 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2524 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2525 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2526 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2533 static int memory_try_enable_merging(void *addr
, size_t len
)
2537 opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
2538 if (opts
&& !qemu_opt_get_bool(opts
, "mem-merge", true)) {
2539 /* disabled by the user */
2543 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
2546 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2549 RAMBlock
*new_block
;
2551 size
= TARGET_PAGE_ALIGN(size
);
2552 new_block
= g_malloc0(sizeof(*new_block
));
2555 new_block
->offset
= find_ram_offset(size
);
2557 new_block
->host
= host
;
2558 new_block
->flags
|= RAM_PREALLOC_MASK
;
2561 #if defined (__linux__) && !defined(TARGET_S390X)
2562 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2563 if (!new_block
->host
) {
2564 new_block
->host
= qemu_vmalloc(size
);
2565 memory_try_enable_merging(new_block
->host
, size
);
2568 fprintf(stderr
, "-mem-path option unsupported\n");
2572 if (xen_enabled()) {
2573 xen_ram_alloc(new_block
->offset
, size
, mr
);
2574 } else if (kvm_enabled()) {
2575 /* some s390/kvm configurations have special constraints */
2576 new_block
->host
= kvm_vmalloc(size
);
2578 new_block
->host
= qemu_vmalloc(size
);
2580 memory_try_enable_merging(new_block
->host
, size
);
2583 new_block
->length
= size
;
2585 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2587 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2588 last_ram_offset() >> TARGET_PAGE_BITS
);
2589 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2590 0, size
>> TARGET_PAGE_BITS
);
2591 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
2593 qemu_ram_setup_dump(new_block
->host
, size
);
2594 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
2597 kvm_setup_guest_memory(new_block
->host
, size
);
2599 return new_block
->offset
;
2602 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2604 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2607 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2611 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2612 if (addr
== block
->offset
) {
2613 QLIST_REMOVE(block
, next
);
2620 void qemu_ram_free(ram_addr_t addr
)
2624 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2625 if (addr
== block
->offset
) {
2626 QLIST_REMOVE(block
, next
);
2627 if (block
->flags
& RAM_PREALLOC_MASK
) {
2629 } else if (mem_path
) {
2630 #if defined (__linux__) && !defined(TARGET_S390X)
2632 munmap(block
->host
, block
->length
);
2635 qemu_vfree(block
->host
);
2641 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2642 munmap(block
->host
, block
->length
);
2644 if (xen_enabled()) {
2645 xen_invalidate_map_cache_entry(block
->host
);
2647 qemu_vfree(block
->host
);
2659 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2666 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2667 offset
= addr
- block
->offset
;
2668 if (offset
< block
->length
) {
2669 vaddr
= block
->host
+ offset
;
2670 if (block
->flags
& RAM_PREALLOC_MASK
) {
2674 munmap(vaddr
, length
);
2676 #if defined(__linux__) && !defined(TARGET_S390X)
2679 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2682 flags
|= MAP_PRIVATE
;
2684 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2685 flags
, block
->fd
, offset
);
2687 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2688 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2695 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2696 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2697 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2700 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2701 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2705 if (area
!= vaddr
) {
2706 fprintf(stderr
, "Could not remap addr: "
2707 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2711 memory_try_enable_merging(vaddr
, length
);
2712 qemu_ram_setup_dump(vaddr
, length
);
2718 #endif /* !_WIN32 */
2720 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2721 With the exception of the softmmu code in this file, this should
2722 only be used for local memory (e.g. video ram) that the device owns,
2723 and knows it isn't going to access beyond the end of the block.
2725 It should not be used for general purpose DMA.
2726 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2728 void *qemu_get_ram_ptr(ram_addr_t addr
)
2732 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2733 if (addr
- block
->offset
< block
->length
) {
2734 /* Move this entry to to start of the list. */
2735 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
2736 QLIST_REMOVE(block
, next
);
2737 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2739 if (xen_enabled()) {
2740 /* We need to check if the requested address is in the RAM
2741 * because we don't want to map the entire memory in QEMU.
2742 * In that case just map until the end of the page.
2744 if (block
->offset
== 0) {
2745 return xen_map_cache(addr
, 0, 0);
2746 } else if (block
->host
== NULL
) {
2748 xen_map_cache(block
->offset
, block
->length
, 1);
2751 return block
->host
+ (addr
- block
->offset
);
2755 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2761 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2762 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2764 static void *qemu_safe_ram_ptr(ram_addr_t addr
)
2768 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2769 if (addr
- block
->offset
< block
->length
) {
2770 if (xen_enabled()) {
2771 /* We need to check if the requested address is in the RAM
2772 * because we don't want to map the entire memory in QEMU.
2773 * In that case just map until the end of the page.
2775 if (block
->offset
== 0) {
2776 return xen_map_cache(addr
, 0, 0);
2777 } else if (block
->host
== NULL
) {
2779 xen_map_cache(block
->offset
, block
->length
, 1);
2782 return block
->host
+ (addr
- block
->offset
);
2786 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2792 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2793 * but takes a size argument */
2794 static void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
2799 if (xen_enabled()) {
2800 return xen_map_cache(addr
, *size
, 1);
2804 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2805 if (addr
- block
->offset
< block
->length
) {
2806 if (addr
- block
->offset
+ *size
> block
->length
)
2807 *size
= block
->length
- addr
+ block
->offset
;
2808 return block
->host
+ (addr
- block
->offset
);
2812 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2817 void qemu_put_ram_ptr(void *addr
)
2819 trace_qemu_put_ram_ptr(addr
);
2822 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2825 uint8_t *host
= ptr
;
2827 if (xen_enabled()) {
2828 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
2832 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2833 /* This case append when the block is not mapped. */
2834 if (block
->host
== NULL
) {
2837 if (host
- block
->host
< block
->length
) {
2838 *ram_addr
= block
->offset
+ (host
- block
->host
);
2846 /* Some of the softmmu routines need to translate from a host pointer
2847 (typically a TLB entry) back to a ram offset. */
2848 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
2850 ram_addr_t ram_addr
;
2852 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
2853 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2859 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
2862 #ifdef DEBUG_UNASSIGNED
2863 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2865 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2866 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
2871 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
2872 uint64_t val
, unsigned size
)
2874 #ifdef DEBUG_UNASSIGNED
2875 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
2877 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2878 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
2882 static const MemoryRegionOps unassigned_mem_ops
= {
2883 .read
= unassigned_mem_read
,
2884 .write
= unassigned_mem_write
,
2885 .endianness
= DEVICE_NATIVE_ENDIAN
,
2888 static uint64_t error_mem_read(void *opaque
, hwaddr addr
,
2894 static void error_mem_write(void *opaque
, hwaddr addr
,
2895 uint64_t value
, unsigned size
)
2900 static const MemoryRegionOps error_mem_ops
= {
2901 .read
= error_mem_read
,
2902 .write
= error_mem_write
,
2903 .endianness
= DEVICE_NATIVE_ENDIAN
,
2906 static const MemoryRegionOps rom_mem_ops
= {
2907 .read
= error_mem_read
,
2908 .write
= unassigned_mem_write
,
2909 .endianness
= DEVICE_NATIVE_ENDIAN
,
2912 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2913 uint64_t val
, unsigned size
)
2916 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2917 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2918 #if !defined(CONFIG_USER_ONLY)
2919 tb_invalidate_phys_page_fast(ram_addr
, size
);
2920 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2925 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2928 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2931 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2936 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2937 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
2938 /* we remove the notdirty callback only if the code has been
2940 if (dirty_flags
== 0xff)
2941 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2944 static const MemoryRegionOps notdirty_mem_ops
= {
2945 .read
= error_mem_read
,
2946 .write
= notdirty_mem_write
,
2947 .endianness
= DEVICE_NATIVE_ENDIAN
,
2950 /* Generate a debug exception if a watchpoint has been hit. */
2951 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2953 CPUArchState
*env
= cpu_single_env
;
2954 target_ulong pc
, cs_base
;
2955 TranslationBlock
*tb
;
2960 if (env
->watchpoint_hit
) {
2961 /* We re-entered the check after replacing the TB. Now raise
2962 * the debug interrupt so that is will trigger after the
2963 * current instruction. */
2964 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2967 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2968 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2969 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2970 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2971 wp
->flags
|= BP_WATCHPOINT_HIT
;
2972 if (!env
->watchpoint_hit
) {
2973 env
->watchpoint_hit
= wp
;
2974 tb
= tb_find_pc(env
->mem_io_pc
);
2976 cpu_abort(env
, "check_watchpoint: could not find TB for "
2977 "pc=%p", (void *)env
->mem_io_pc
);
2979 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
2980 tb_phys_invalidate(tb
, -1);
2981 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2982 env
->exception_index
= EXCP_DEBUG
;
2985 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2986 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2987 cpu_resume_from_signal(env
, NULL
);
2991 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2996 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2997 so these check for a hit then pass through to the normal out-of-line
2999 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
3002 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
3004 case 1: return ldub_phys(addr
);
3005 case 2: return lduw_phys(addr
);
3006 case 4: return ldl_phys(addr
);
3011 static void watch_mem_write(void *opaque
, hwaddr addr
,
3012 uint64_t val
, unsigned size
)
3014 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
3017 stb_phys(addr
, val
);
3020 stw_phys(addr
, val
);
3023 stl_phys(addr
, val
);
3029 static const MemoryRegionOps watch_mem_ops
= {
3030 .read
= watch_mem_read
,
3031 .write
= watch_mem_write
,
3032 .endianness
= DEVICE_NATIVE_ENDIAN
,
3035 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
3038 subpage_t
*mmio
= opaque
;
3039 unsigned int idx
= SUBPAGE_IDX(addr
);
3040 MemoryRegionSection
*section
;
3041 #if defined(DEBUG_SUBPAGE)
3042 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3043 mmio
, len
, addr
, idx
);
3046 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3048 addr
-= section
->offset_within_address_space
;
3049 addr
+= section
->offset_within_region
;
3050 return io_mem_read(section
->mr
, addr
, len
);
3053 static void subpage_write(void *opaque
, hwaddr addr
,
3054 uint64_t value
, unsigned len
)
3056 subpage_t
*mmio
= opaque
;
3057 unsigned int idx
= SUBPAGE_IDX(addr
);
3058 MemoryRegionSection
*section
;
3059 #if defined(DEBUG_SUBPAGE)
3060 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3061 " idx %d value %"PRIx64
"\n",
3062 __func__
, mmio
, len
, addr
, idx
, value
);
3065 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3067 addr
-= section
->offset_within_address_space
;
3068 addr
+= section
->offset_within_region
;
3069 io_mem_write(section
->mr
, addr
, value
, len
);
3072 static const MemoryRegionOps subpage_ops
= {
3073 .read
= subpage_read
,
3074 .write
= subpage_write
,
3075 .endianness
= DEVICE_NATIVE_ENDIAN
,
3078 static uint64_t subpage_ram_read(void *opaque
, hwaddr addr
,
3081 ram_addr_t raddr
= addr
;
3082 void *ptr
= qemu_get_ram_ptr(raddr
);
3084 case 1: return ldub_p(ptr
);
3085 case 2: return lduw_p(ptr
);
3086 case 4: return ldl_p(ptr
);
3091 static void subpage_ram_write(void *opaque
, hwaddr addr
,
3092 uint64_t value
, unsigned size
)
3094 ram_addr_t raddr
= addr
;
3095 void *ptr
= qemu_get_ram_ptr(raddr
);
3097 case 1: return stb_p(ptr
, value
);
3098 case 2: return stw_p(ptr
, value
);
3099 case 4: return stl_p(ptr
, value
);
3104 static const MemoryRegionOps subpage_ram_ops
= {
3105 .read
= subpage_ram_read
,
3106 .write
= subpage_ram_write
,
3107 .endianness
= DEVICE_NATIVE_ENDIAN
,
3110 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3115 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3117 idx
= SUBPAGE_IDX(start
);
3118 eidx
= SUBPAGE_IDX(end
);
3119 #if defined(DEBUG_SUBPAGE)
3120 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3121 mmio
, start
, end
, idx
, eidx
, memory
);
3123 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
3124 MemoryRegionSection new_section
= phys_sections
[section
];
3125 new_section
.mr
= &io_mem_subpage_ram
;
3126 section
= phys_section_add(&new_section
);
3128 for (; idx
<= eidx
; idx
++) {
3129 mmio
->sub_section
[idx
] = section
;
3135 static subpage_t
*subpage_init(hwaddr base
)
3139 mmio
= g_malloc0(sizeof(subpage_t
));
3142 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3143 "subpage", TARGET_PAGE_SIZE
);
3144 mmio
->iomem
.subpage
= true;
3145 #if defined(DEBUG_SUBPAGE)
3146 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3147 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3149 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
3154 static uint16_t dummy_section(MemoryRegion
*mr
)
3156 MemoryRegionSection section
= {
3158 .offset_within_address_space
= 0,
3159 .offset_within_region
= 0,
3163 return phys_section_add(§ion
);
3166 MemoryRegion
*iotlb_to_region(hwaddr index
)
3168 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
3171 static void io_mem_init(void)
3173 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3174 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3175 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3176 "unassigned", UINT64_MAX
);
3177 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3178 "notdirty", UINT64_MAX
);
3179 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3180 "subpage-ram", UINT64_MAX
);
3181 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3182 "watch", UINT64_MAX
);
3185 static void mem_begin(MemoryListener
*listener
)
3187 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
3189 destroy_all_mappings(d
);
3190 d
->phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
3193 static void core_begin(MemoryListener
*listener
)
3195 phys_sections_clear();
3196 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
3197 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
3198 phys_section_rom
= dummy_section(&io_mem_rom
);
3199 phys_section_watch
= dummy_section(&io_mem_watch
);
3202 static void tcg_commit(MemoryListener
*listener
)
3206 /* since each CPU stores ram addresses in its TLB cache, we must
3207 reset the modified entries */
3209 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
3214 static void core_log_global_start(MemoryListener
*listener
)
3216 cpu_physical_memory_set_dirty_tracking(1);
3219 static void core_log_global_stop(MemoryListener
*listener
)
3221 cpu_physical_memory_set_dirty_tracking(0);
3224 static void io_region_add(MemoryListener
*listener
,
3225 MemoryRegionSection
*section
)
3227 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
3229 mrio
->mr
= section
->mr
;
3230 mrio
->offset
= section
->offset_within_region
;
3231 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
3232 section
->offset_within_address_space
, section
->size
);
3233 ioport_register(&mrio
->iorange
);
3236 static void io_region_del(MemoryListener
*listener
,
3237 MemoryRegionSection
*section
)
3239 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
3242 static MemoryListener core_memory_listener
= {
3243 .begin
= core_begin
,
3244 .log_global_start
= core_log_global_start
,
3245 .log_global_stop
= core_log_global_stop
,
3249 static MemoryListener io_memory_listener
= {
3250 .region_add
= io_region_add
,
3251 .region_del
= io_region_del
,
3255 static MemoryListener tcg_memory_listener
= {
3256 .commit
= tcg_commit
,
3259 void address_space_init_dispatch(AddressSpace
*as
)
3261 AddressSpaceDispatch
*d
= g_new(AddressSpaceDispatch
, 1);
3263 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
3264 d
->listener
= (MemoryListener
) {
3266 .region_add
= mem_add
,
3267 .region_nop
= mem_add
,
3271 memory_listener_register(&d
->listener
, as
);
3274 void address_space_destroy_dispatch(AddressSpace
*as
)
3276 AddressSpaceDispatch
*d
= as
->dispatch
;
3278 memory_listener_unregister(&d
->listener
);
3279 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
3281 as
->dispatch
= NULL
;
3284 static void memory_map_init(void)
3286 system_memory
= g_malloc(sizeof(*system_memory
));
3287 memory_region_init(system_memory
, "system", INT64_MAX
);
3288 address_space_init(&address_space_memory
, system_memory
);
3289 address_space_memory
.name
= "memory";
3291 system_io
= g_malloc(sizeof(*system_io
));
3292 memory_region_init(system_io
, "io", 65536);
3293 address_space_init(&address_space_io
, system_io
);
3294 address_space_io
.name
= "I/O";
3296 memory_listener_register(&core_memory_listener
, &address_space_memory
);
3297 memory_listener_register(&io_memory_listener
, &address_space_io
);
3298 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
3300 dma_context_init(&dma_context_memory
, &address_space_memory
,
3304 MemoryRegion
*get_system_memory(void)
3306 return system_memory
;
3309 MemoryRegion
*get_system_io(void)
3314 #endif /* !defined(CONFIG_USER_ONLY) */
3316 /* physical memory access (slow version, mainly for debug) */
3317 #if defined(CONFIG_USER_ONLY)
3318 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
3319 uint8_t *buf
, int len
, int is_write
)
3326 page
= addr
& TARGET_PAGE_MASK
;
3327 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3330 flags
= page_get_flags(page
);
3331 if (!(flags
& PAGE_VALID
))
3334 if (!(flags
& PAGE_WRITE
))
3336 /* XXX: this code should not depend on lock_user */
3337 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3340 unlock_user(p
, addr
, l
);
3342 if (!(flags
& PAGE_READ
))
3344 /* XXX: this code should not depend on lock_user */
3345 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3348 unlock_user(p
, addr
, 0);
3359 static void invalidate_and_set_dirty(hwaddr addr
,
3362 if (!cpu_physical_memory_is_dirty(addr
)) {
3363 /* invalidate code */
3364 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
3366 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
3368 xen_modified_memory(addr
, length
);
3371 void address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
3372 int len
, bool is_write
)
3374 AddressSpaceDispatch
*d
= as
->dispatch
;
3379 MemoryRegionSection
*section
;
3382 page
= addr
& TARGET_PAGE_MASK
;
3383 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3386 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
3389 if (!memory_region_is_ram(section
->mr
)) {
3391 addr1
= memory_region_section_addr(section
, addr
);
3392 /* XXX: could force cpu_single_env to NULL to avoid
3394 if (l
>= 4 && ((addr1
& 3) == 0)) {
3395 /* 32 bit write access */
3397 io_mem_write(section
->mr
, addr1
, val
, 4);
3399 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3400 /* 16 bit write access */
3402 io_mem_write(section
->mr
, addr1
, val
, 2);
3405 /* 8 bit write access */
3407 io_mem_write(section
->mr
, addr1
, val
, 1);
3410 } else if (!section
->readonly
) {
3412 addr1
= memory_region_get_ram_addr(section
->mr
)
3413 + memory_region_section_addr(section
, addr
);
3415 ptr
= qemu_get_ram_ptr(addr1
);
3416 memcpy(ptr
, buf
, l
);
3417 invalidate_and_set_dirty(addr1
, l
);
3418 qemu_put_ram_ptr(ptr
);
3421 if (!(memory_region_is_ram(section
->mr
) ||
3422 memory_region_is_romd(section
->mr
))) {
3425 addr1
= memory_region_section_addr(section
, addr
);
3426 if (l
>= 4 && ((addr1
& 3) == 0)) {
3427 /* 32 bit read access */
3428 val
= io_mem_read(section
->mr
, addr1
, 4);
3431 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3432 /* 16 bit read access */
3433 val
= io_mem_read(section
->mr
, addr1
, 2);
3437 /* 8 bit read access */
3438 val
= io_mem_read(section
->mr
, addr1
, 1);
3444 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
3445 + memory_region_section_addr(section
,
3447 memcpy(buf
, ptr
, l
);
3448 qemu_put_ram_ptr(ptr
);
3457 void address_space_write(AddressSpace
*as
, hwaddr addr
,
3458 const uint8_t *buf
, int len
)
3460 address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
3464 * address_space_read: read from an address space.
3466 * @as: #AddressSpace to be accessed
3467 * @addr: address within that address space
3468 * @buf: buffer with the data transferred
3470 void address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
3472 address_space_rw(as
, addr
, buf
, len
, false);
3476 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
3477 int len
, int is_write
)
3479 return address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
3482 /* used for ROM loading : can write in RAM and ROM */
3483 void cpu_physical_memory_write_rom(hwaddr addr
,
3484 const uint8_t *buf
, int len
)
3486 AddressSpaceDispatch
*d
= address_space_memory
.dispatch
;
3490 MemoryRegionSection
*section
;
3493 page
= addr
& TARGET_PAGE_MASK
;
3494 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3497 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
3499 if (!(memory_region_is_ram(section
->mr
) ||
3500 memory_region_is_romd(section
->mr
))) {
3503 unsigned long addr1
;
3504 addr1
= memory_region_get_ram_addr(section
->mr
)
3505 + memory_region_section_addr(section
, addr
);
3507 ptr
= qemu_get_ram_ptr(addr1
);
3508 memcpy(ptr
, buf
, l
);
3509 invalidate_and_set_dirty(addr1
, l
);
3510 qemu_put_ram_ptr(ptr
);
3524 static BounceBuffer bounce
;
3526 typedef struct MapClient
{
3528 void (*callback
)(void *opaque
);
3529 QLIST_ENTRY(MapClient
) link
;
3532 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3533 = QLIST_HEAD_INITIALIZER(map_client_list
);
3535 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3537 MapClient
*client
= g_malloc(sizeof(*client
));
3539 client
->opaque
= opaque
;
3540 client
->callback
= callback
;
3541 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3545 static void cpu_unregister_map_client(void *_client
)
3547 MapClient
*client
= (MapClient
*)_client
;
3549 QLIST_REMOVE(client
, link
);
3553 static void cpu_notify_map_clients(void)
3557 while (!QLIST_EMPTY(&map_client_list
)) {
3558 client
= QLIST_FIRST(&map_client_list
);
3559 client
->callback(client
->opaque
);
3560 cpu_unregister_map_client(client
);
3564 /* Map a physical memory region into a host virtual address.
3565 * May map a subset of the requested range, given by and returned in *plen.
3566 * May return NULL if resources needed to perform the mapping are exhausted.
3567 * Use only for reads OR writes - not for read-modify-write operations.
3568 * Use cpu_register_map_client() to know when retrying the map operation is
3569 * likely to succeed.
3571 void *address_space_map(AddressSpace
*as
,
3576 AddressSpaceDispatch
*d
= as
->dispatch
;
3581 MemoryRegionSection
*section
;
3582 ram_addr_t raddr
= RAM_ADDR_MAX
;
3587 page
= addr
& TARGET_PAGE_MASK
;
3588 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3591 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
3593 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
3594 if (todo
|| bounce
.buffer
) {
3597 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3601 address_space_read(as
, addr
, bounce
.buffer
, l
);
3605 return bounce
.buffer
;
3608 raddr
= memory_region_get_ram_addr(section
->mr
)
3609 + memory_region_section_addr(section
, addr
);
3617 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3622 /* Unmaps a memory region previously mapped by address_space_map().
3623 * Will also mark the memory as dirty if is_write == 1. access_len gives
3624 * the amount of memory that was actually read or written by the caller.
3626 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3627 int is_write
, hwaddr access_len
)
3629 if (buffer
!= bounce
.buffer
) {
3631 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3632 while (access_len
) {
3634 l
= TARGET_PAGE_SIZE
;
3637 invalidate_and_set_dirty(addr1
, l
);
3642 if (xen_enabled()) {
3643 xen_invalidate_map_cache_entry(buffer
);
3648 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
3650 qemu_vfree(bounce
.buffer
);
3651 bounce
.buffer
= NULL
;
3652 cpu_notify_map_clients();
3655 void *cpu_physical_memory_map(hwaddr addr
,
3659 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3662 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3663 int is_write
, hwaddr access_len
)
3665 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3668 /* warning: addr must be aligned */
3669 static inline uint32_t ldl_phys_internal(hwaddr addr
,
3670 enum device_endian endian
)
3674 MemoryRegionSection
*section
;
3676 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
3678 if (!(memory_region_is_ram(section
->mr
) ||
3679 memory_region_is_romd(section
->mr
))) {
3681 addr
= memory_region_section_addr(section
, addr
);
3682 val
= io_mem_read(section
->mr
, addr
, 4);
3683 #if defined(TARGET_WORDS_BIGENDIAN)
3684 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3688 if (endian
== DEVICE_BIG_ENDIAN
) {
3694 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3696 + memory_region_section_addr(section
, addr
));
3698 case DEVICE_LITTLE_ENDIAN
:
3699 val
= ldl_le_p(ptr
);
3701 case DEVICE_BIG_ENDIAN
:
3702 val
= ldl_be_p(ptr
);
3712 uint32_t ldl_phys(hwaddr addr
)
3714 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3717 uint32_t ldl_le_phys(hwaddr addr
)
3719 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3722 uint32_t ldl_be_phys(hwaddr addr
)
3724 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3727 /* warning: addr must be aligned */
3728 static inline uint64_t ldq_phys_internal(hwaddr addr
,
3729 enum device_endian endian
)
3733 MemoryRegionSection
*section
;
3735 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
3737 if (!(memory_region_is_ram(section
->mr
) ||
3738 memory_region_is_romd(section
->mr
))) {
3740 addr
= memory_region_section_addr(section
, addr
);
3742 /* XXX This is broken when device endian != cpu endian.
3743 Fix and add "endian" variable check */
3744 #ifdef TARGET_WORDS_BIGENDIAN
3745 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
3746 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
3748 val
= io_mem_read(section
->mr
, addr
, 4);
3749 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
3753 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3755 + memory_region_section_addr(section
, addr
));
3757 case DEVICE_LITTLE_ENDIAN
:
3758 val
= ldq_le_p(ptr
);
3760 case DEVICE_BIG_ENDIAN
:
3761 val
= ldq_be_p(ptr
);
3771 uint64_t ldq_phys(hwaddr addr
)
3773 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3776 uint64_t ldq_le_phys(hwaddr addr
)
3778 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3781 uint64_t ldq_be_phys(hwaddr addr
)
3783 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3787 uint32_t ldub_phys(hwaddr addr
)
3790 cpu_physical_memory_read(addr
, &val
, 1);
3794 /* warning: addr must be aligned */
3795 static inline uint32_t lduw_phys_internal(hwaddr addr
,
3796 enum device_endian endian
)
3800 MemoryRegionSection
*section
;
3802 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
3804 if (!(memory_region_is_ram(section
->mr
) ||
3805 memory_region_is_romd(section
->mr
))) {
3807 addr
= memory_region_section_addr(section
, addr
);
3808 val
= io_mem_read(section
->mr
, addr
, 2);
3809 #if defined(TARGET_WORDS_BIGENDIAN)
3810 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3814 if (endian
== DEVICE_BIG_ENDIAN
) {
3820 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3822 + memory_region_section_addr(section
, addr
));
3824 case DEVICE_LITTLE_ENDIAN
:
3825 val
= lduw_le_p(ptr
);
3827 case DEVICE_BIG_ENDIAN
:
3828 val
= lduw_be_p(ptr
);
3838 uint32_t lduw_phys(hwaddr addr
)
3840 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3843 uint32_t lduw_le_phys(hwaddr addr
)
3845 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3848 uint32_t lduw_be_phys(hwaddr addr
)
3850 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3853 /* warning: addr must be aligned. The ram page is not masked as dirty
3854 and the code inside is not invalidated. It is useful if the dirty
3855 bits are used to track modified PTEs */
3856 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
3859 MemoryRegionSection
*section
;
3861 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
3863 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3864 addr
= memory_region_section_addr(section
, addr
);
3865 if (memory_region_is_ram(section
->mr
)) {
3866 section
= &phys_sections
[phys_section_rom
];
3868 io_mem_write(section
->mr
, addr
, val
, 4);
3870 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
3872 + memory_region_section_addr(section
, addr
);
3873 ptr
= qemu_get_ram_ptr(addr1
);
3876 if (unlikely(in_migration
)) {
3877 if (!cpu_physical_memory_is_dirty(addr1
)) {
3878 /* invalidate code */
3879 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3881 cpu_physical_memory_set_dirty_flags(
3882 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3888 void stq_phys_notdirty(hwaddr addr
, uint64_t val
)
3891 MemoryRegionSection
*section
;
3893 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
3895 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3896 addr
= memory_region_section_addr(section
, addr
);
3897 if (memory_region_is_ram(section
->mr
)) {
3898 section
= &phys_sections
[phys_section_rom
];
3900 #ifdef TARGET_WORDS_BIGENDIAN
3901 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
3902 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
3904 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
3905 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
3908 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3910 + memory_region_section_addr(section
, addr
));
3915 /* warning: addr must be aligned */
3916 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
3917 enum device_endian endian
)
3920 MemoryRegionSection
*section
;
3922 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
3924 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3925 addr
= memory_region_section_addr(section
, addr
);
3926 if (memory_region_is_ram(section
->mr
)) {
3927 section
= &phys_sections
[phys_section_rom
];
3929 #if defined(TARGET_WORDS_BIGENDIAN)
3930 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3934 if (endian
== DEVICE_BIG_ENDIAN
) {
3938 io_mem_write(section
->mr
, addr
, val
, 4);
3940 unsigned long addr1
;
3941 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
3942 + memory_region_section_addr(section
, addr
);
3944 ptr
= qemu_get_ram_ptr(addr1
);
3946 case DEVICE_LITTLE_ENDIAN
:
3949 case DEVICE_BIG_ENDIAN
:
3956 invalidate_and_set_dirty(addr1
, 4);
3960 void stl_phys(hwaddr addr
, uint32_t val
)
3962 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
3965 void stl_le_phys(hwaddr addr
, uint32_t val
)
3967 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
3970 void stl_be_phys(hwaddr addr
, uint32_t val
)
3972 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
3976 void stb_phys(hwaddr addr
, uint32_t val
)
3979 cpu_physical_memory_write(addr
, &v
, 1);
3982 /* warning: addr must be aligned */
3983 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
3984 enum device_endian endian
)
3987 MemoryRegionSection
*section
;
3989 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
3991 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3992 addr
= memory_region_section_addr(section
, addr
);
3993 if (memory_region_is_ram(section
->mr
)) {
3994 section
= &phys_sections
[phys_section_rom
];
3996 #if defined(TARGET_WORDS_BIGENDIAN)
3997 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4001 if (endian
== DEVICE_BIG_ENDIAN
) {
4005 io_mem_write(section
->mr
, addr
, val
, 2);
4007 unsigned long addr1
;
4008 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
4009 + memory_region_section_addr(section
, addr
);
4011 ptr
= qemu_get_ram_ptr(addr1
);
4013 case DEVICE_LITTLE_ENDIAN
:
4016 case DEVICE_BIG_ENDIAN
:
4023 invalidate_and_set_dirty(addr1
, 2);
4027 void stw_phys(hwaddr addr
, uint32_t val
)
4029 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4032 void stw_le_phys(hwaddr addr
, uint32_t val
)
4034 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4037 void stw_be_phys(hwaddr addr
, uint32_t val
)
4039 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4043 void stq_phys(hwaddr addr
, uint64_t val
)
4046 cpu_physical_memory_write(addr
, &val
, 8);
4049 void stq_le_phys(hwaddr addr
, uint64_t val
)
4051 val
= cpu_to_le64(val
);
4052 cpu_physical_memory_write(addr
, &val
, 8);
4055 void stq_be_phys(hwaddr addr
, uint64_t val
)
4057 val
= cpu_to_be64(val
);
4058 cpu_physical_memory_write(addr
, &val
, 8);
4061 /* virtual memory access for debug (includes writing to ROM) */
4062 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
4063 uint8_t *buf
, int len
, int is_write
)
4070 page
= addr
& TARGET_PAGE_MASK
;
4071 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4072 /* if no physical page mapped, return an error */
4073 if (phys_addr
== -1)
4075 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4078 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4080 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4082 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4091 /* in deterministic execution mode, instructions doing device I/Os
4092 must be at the end of the TB */
4093 void cpu_io_recompile(CPUArchState
*env
, uintptr_t retaddr
)
4095 TranslationBlock
*tb
;
4097 target_ulong pc
, cs_base
;
4100 tb
= tb_find_pc(retaddr
);
4102 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4105 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4106 cpu_restore_state(tb
, env
, retaddr
);
4107 /* Calculate how many instructions had been executed before the fault
4109 n
= n
- env
->icount_decr
.u16
.low
;
4110 /* Generate a new TB ending on the I/O insn. */
4112 /* On MIPS and SH, delay slot instructions can only be restarted if
4113 they were already the first instruction in the TB. If this is not
4114 the first instruction in a TB then re-execute the preceding
4116 #if defined(TARGET_MIPS)
4117 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4118 env
->active_tc
.PC
-= 4;
4119 env
->icount_decr
.u16
.low
++;
4120 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4122 #elif defined(TARGET_SH4)
4123 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4126 env
->icount_decr
.u16
.low
++;
4127 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4130 /* This should never happen. */
4131 if (n
> CF_COUNT_MASK
)
4132 cpu_abort(env
, "TB too big during recompile");
4134 cflags
= n
| CF_LAST_IO
;
4136 cs_base
= tb
->cs_base
;
4138 tb_phys_invalidate(tb
, -1);
4139 /* FIXME: In theory this could raise an exception. In practice
4140 we have already translated the block once so it's probably ok. */
4141 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4142 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4143 the first in the TB) then we end up generating a whole new TB and
4144 repeating the fault, which is horribly inefficient.
4145 Better would be to execute just this insn uncached, or generate a
4147 cpu_resume_from_signal(env
, NULL
);
4150 #if !defined(CONFIG_USER_ONLY)
4152 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4154 int i
, target_code_size
, max_target_code_size
;
4155 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4156 TranslationBlock
*tb
;
4158 target_code_size
= 0;
4159 max_target_code_size
= 0;
4161 direct_jmp_count
= 0;
4162 direct_jmp2_count
= 0;
4163 for(i
= 0; i
< nb_tbs
; i
++) {
4165 target_code_size
+= tb
->size
;
4166 if (tb
->size
> max_target_code_size
)
4167 max_target_code_size
= tb
->size
;
4168 if (tb
->page_addr
[1] != -1)
4170 if (tb
->tb_next_offset
[0] != 0xffff) {
4172 if (tb
->tb_next_offset
[1] != 0xffff) {
4173 direct_jmp2_count
++;
4177 /* XXX: avoid using doubles ? */
4178 cpu_fprintf(f
, "Translation buffer state:\n");
4179 cpu_fprintf(f
, "gen code size %td/%zd\n",
4180 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4181 cpu_fprintf(f
, "TB count %d/%d\n",
4182 nb_tbs
, code_gen_max_blocks
);
4183 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4184 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4185 max_target_code_size
);
4186 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4187 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4188 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4189 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4191 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4192 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4194 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4196 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4197 cpu_fprintf(f
, "\nStatistics:\n");
4198 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4199 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4200 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4201 tcg_dump_info(f
, cpu_fprintf
);
4205 * A helper function for the _utterly broken_ virtio device model to find out if
4206 * it's running on a big endian machine. Don't do this at home kids!
4208 bool virtio_is_big_endian(void);
4209 bool virtio_is_big_endian(void)
4211 #if defined(TARGET_WORDS_BIGENDIAN)
4220 #ifndef CONFIG_USER_ONLY
4221 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
4223 MemoryRegionSection
*section
;
4225 section
= phys_page_find(address_space_memory
.dispatch
,
4226 phys_addr
>> TARGET_PAGE_BITS
);
4228 return !(memory_region_is_ram(section
->mr
) ||
4229 memory_region_is_romd(section
->mr
));