2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
29 #include "cache-utils.h"
37 #include "qemu-timer.h"
39 #include "exec-memory.h"
40 #if defined(CONFIG_USER_ONLY)
42 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
43 #include <sys/param.h>
44 #if __FreeBSD_version >= 700104
45 #define HAVE_KINFO_GETVMMAP
46 #define sigqueue sigqueue_freebsd /* avoid redefinition */
49 #include <machine/profile.h>
57 #else /* !CONFIG_USER_ONLY */
58 #include "xen-mapcache.h"
62 #define WANT_EXEC_OBSOLETE
63 #include "exec-obsolete.h"
65 //#define DEBUG_TB_INVALIDATE
68 //#define DEBUG_UNASSIGNED
70 /* make various TB consistency checks */
71 //#define DEBUG_TB_CHECK
72 //#define DEBUG_TLB_CHECK
74 //#define DEBUG_IOPORT
75 //#define DEBUG_SUBPAGE
77 #if !defined(CONFIG_USER_ONLY)
78 /* TB consistency checks only implemented for usermode emulation. */
82 #define SMC_BITMAP_USE_THRESHOLD 10
84 static TranslationBlock
*tbs
;
85 static int code_gen_max_blocks
;
86 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
91 #if defined(__arm__) || defined(__sparc_v9__)
92 /* The prologue must be reachable with a direct jump. ARM and Sparc64
93 have limited branch ranges (possibly also PPC) so place it in a
94 section close to code segment. */
95 #define code_gen_section \
96 __attribute__((__section__(".gen_code"))) \
97 __attribute__((aligned (32)))
99 /* Maximum alignment for Win32 is 16. */
100 #define code_gen_section \
101 __attribute__((aligned (16)))
103 #define code_gen_section \
104 __attribute__((aligned (32)))
107 uint8_t code_gen_prologue
[1024] code_gen_section
;
108 static uint8_t *code_gen_buffer
;
109 static unsigned long code_gen_buffer_size
;
110 /* threshold to flush the translated code buffer */
111 static unsigned long code_gen_buffer_max_size
;
112 static uint8_t *code_gen_ptr
;
114 #if !defined(CONFIG_USER_ONLY)
116 static int in_migration
;
118 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
120 static MemoryRegion
*system_memory
;
121 static MemoryRegion
*system_io
;
123 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
124 static MemoryRegion io_mem_subpage_ram
;
129 /* current CPU in the current thread. It is only valid inside
131 DEFINE_TLS(CPUState
*,cpu_single_env
);
132 /* 0 = Do not count executed instructions.
133 1 = Precise instruction counting.
134 2 = Adaptive rate instruction counting. */
137 typedef struct PageDesc
{
138 /* list of TBs intersecting this ram page */
139 TranslationBlock
*first_tb
;
140 /* in order to optimize self modifying code, we count the number
141 of lookups we do to a given page to use a bitmap */
142 unsigned int code_write_count
;
143 uint8_t *code_bitmap
;
144 #if defined(CONFIG_USER_ONLY)
149 /* In system mode we want L1_MAP to be based on ram offsets,
150 while in user mode we want it to be based on virtual addresses. */
151 #if !defined(CONFIG_USER_ONLY)
152 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
153 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
155 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
158 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
161 /* Size of the L2 (and L3, etc) page tables. */
163 #define L2_SIZE (1 << L2_BITS)
165 /* The bits remaining after N lower levels of page tables. */
166 #define P_L1_BITS_REM \
167 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168 #define V_L1_BITS_REM \
169 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
171 /* Size of the L1 page table. Avoid silly small sizes. */
172 #if P_L1_BITS_REM < 4
173 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
175 #define P_L1_BITS P_L1_BITS_REM
178 #if V_L1_BITS_REM < 4
179 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
181 #define V_L1_BITS V_L1_BITS_REM
184 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
185 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
187 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
188 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
190 unsigned long qemu_real_host_page_size
;
191 unsigned long qemu_host_page_size
;
192 unsigned long qemu_host_page_mask
;
194 /* This is a multi-level map on the virtual address space.
195 The bottom level has pointers to PageDesc. */
196 static void *l1_map
[V_L1_SIZE
];
198 #if !defined(CONFIG_USER_ONLY)
199 typedef struct PhysPageDesc
{
200 /* offset in host memory of the page + io_index in the low bits */
201 ram_addr_t phys_offset
;
202 ram_addr_t region_offset
;
205 /* This is a multi-level map on the physical address space.
206 The bottom level has pointers to PhysPageDesc. */
207 static void *l1_phys_map
[P_L1_SIZE
];
209 static void io_mem_init(void);
210 static void memory_map_init(void);
212 /* io memory support */
213 MemoryRegion
*io_mem_region
[IO_MEM_NB_ENTRIES
];
214 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
215 static MemoryRegion io_mem_watch
;
220 static const char *logfilename
= "qemu.log";
222 static const char *logfilename
= "/tmp/qemu.log";
226 static int log_append
= 0;
229 #if !defined(CONFIG_USER_ONLY)
230 static int tlb_flush_count
;
232 static int tb_flush_count
;
233 static int tb_phys_invalidate_count
;
236 static void map_exec(void *addr
, long size
)
239 VirtualProtect(addr
, size
,
240 PAGE_EXECUTE_READWRITE
, &old_protect
);
244 static void map_exec(void *addr
, long size
)
246 unsigned long start
, end
, page_size
;
248 page_size
= getpagesize();
249 start
= (unsigned long)addr
;
250 start
&= ~(page_size
- 1);
252 end
= (unsigned long)addr
+ size
;
253 end
+= page_size
- 1;
254 end
&= ~(page_size
- 1);
256 mprotect((void *)start
, end
- start
,
257 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
261 static void page_init(void)
263 /* NOTE: we can always suppose that qemu_host_page_size >=
267 SYSTEM_INFO system_info
;
269 GetSystemInfo(&system_info
);
270 qemu_real_host_page_size
= system_info
.dwPageSize
;
273 qemu_real_host_page_size
= getpagesize();
275 if (qemu_host_page_size
== 0)
276 qemu_host_page_size
= qemu_real_host_page_size
;
277 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
278 qemu_host_page_size
= TARGET_PAGE_SIZE
;
279 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
281 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
283 #ifdef HAVE_KINFO_GETVMMAP
284 struct kinfo_vmentry
*freep
;
287 freep
= kinfo_getvmmap(getpid(), &cnt
);
290 for (i
= 0; i
< cnt
; i
++) {
291 unsigned long startaddr
, endaddr
;
293 startaddr
= freep
[i
].kve_start
;
294 endaddr
= freep
[i
].kve_end
;
295 if (h2g_valid(startaddr
)) {
296 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
298 if (h2g_valid(endaddr
)) {
299 endaddr
= h2g(endaddr
);
300 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
302 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
304 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
315 last_brk
= (unsigned long)sbrk(0);
317 f
= fopen("/compat/linux/proc/self/maps", "r");
322 unsigned long startaddr
, endaddr
;
325 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
327 if (n
== 2 && h2g_valid(startaddr
)) {
328 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
330 if (h2g_valid(endaddr
)) {
331 endaddr
= h2g(endaddr
);
335 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
347 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
353 #if defined(CONFIG_USER_ONLY)
354 /* We can't use g_malloc because it may recurse into a locked mutex. */
355 # define ALLOC(P, SIZE) \
357 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
358 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
361 # define ALLOC(P, SIZE) \
362 do { P = g_malloc0(SIZE); } while (0)
365 /* Level 1. Always allocated. */
366 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
369 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
376 ALLOC(p
, sizeof(void *) * L2_SIZE
);
380 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
388 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
394 return pd
+ (index
& (L2_SIZE
- 1));
397 static inline PageDesc
*page_find(tb_page_addr_t index
)
399 return page_find_alloc(index
, 0);
402 #if !defined(CONFIG_USER_ONLY)
403 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
409 /* Level 1. Always allocated. */
410 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
413 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
419 *lp
= p
= g_malloc0(sizeof(void *) * L2_SIZE
);
421 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
427 int first_index
= index
& ~(L2_SIZE
- 1);
433 *lp
= pd
= g_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
435 for (i
= 0; i
< L2_SIZE
; i
++) {
436 pd
[i
].phys_offset
= io_mem_unassigned
.ram_addr
;
437 pd
[i
].region_offset
= (first_index
+ i
) << TARGET_PAGE_BITS
;
441 return pd
+ (index
& (L2_SIZE
- 1));
444 static inline PhysPageDesc
phys_page_find(target_phys_addr_t index
)
446 PhysPageDesc
*p
= phys_page_find_alloc(index
, 0);
451 return (PhysPageDesc
) {
452 .phys_offset
= io_mem_unassigned
.ram_addr
,
453 .region_offset
= index
<< TARGET_PAGE_BITS
,
458 static void tlb_protect_code(ram_addr_t ram_addr
);
459 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
461 #define mmap_lock() do { } while(0)
462 #define mmap_unlock() do { } while(0)
465 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
467 #if defined(CONFIG_USER_ONLY)
468 /* Currently it is not recommended to allocate big chunks of data in
469 user mode. It will change when a dedicated libc will be used */
470 #define USE_STATIC_CODE_GEN_BUFFER
473 #ifdef USE_STATIC_CODE_GEN_BUFFER
474 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
475 __attribute__((aligned (CODE_GEN_ALIGN
)));
478 static void code_gen_alloc(unsigned long tb_size
)
480 #ifdef USE_STATIC_CODE_GEN_BUFFER
481 code_gen_buffer
= static_code_gen_buffer
;
482 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
483 map_exec(code_gen_buffer
, code_gen_buffer_size
);
485 code_gen_buffer_size
= tb_size
;
486 if (code_gen_buffer_size
== 0) {
487 #if defined(CONFIG_USER_ONLY)
488 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
490 /* XXX: needs adjustments */
491 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
494 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
495 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
496 /* The code gen buffer location may have constraints depending on
497 the host cpu and OS */
498 #if defined(__linux__)
503 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
504 #if defined(__x86_64__)
506 /* Cannot map more than that */
507 if (code_gen_buffer_size
> (800 * 1024 * 1024))
508 code_gen_buffer_size
= (800 * 1024 * 1024);
509 #elif defined(__sparc_v9__)
510 // Map the buffer below 2G, so we can use direct calls and branches
512 start
= (void *) 0x60000000UL
;
513 if (code_gen_buffer_size
> (512 * 1024 * 1024))
514 code_gen_buffer_size
= (512 * 1024 * 1024);
515 #elif defined(__arm__)
516 /* Keep the buffer no bigger than 16GB to branch between blocks */
517 if (code_gen_buffer_size
> 16 * 1024 * 1024)
518 code_gen_buffer_size
= 16 * 1024 * 1024;
519 #elif defined(__s390x__)
520 /* Map the buffer so that we can use direct calls and branches. */
521 /* We have a +- 4GB range on the branches; leave some slop. */
522 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
523 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
525 start
= (void *)0x90000000UL
;
527 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
528 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
530 if (code_gen_buffer
== MAP_FAILED
) {
531 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
535 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
536 || defined(__DragonFly__) || defined(__OpenBSD__) \
537 || defined(__NetBSD__)
541 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
542 #if defined(__x86_64__)
543 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
544 * 0x40000000 is free */
546 addr
= (void *)0x40000000;
547 /* Cannot map more than that */
548 if (code_gen_buffer_size
> (800 * 1024 * 1024))
549 code_gen_buffer_size
= (800 * 1024 * 1024);
550 #elif defined(__sparc_v9__)
551 // Map the buffer below 2G, so we can use direct calls and branches
553 addr
= (void *) 0x60000000UL
;
554 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
555 code_gen_buffer_size
= (512 * 1024 * 1024);
558 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
559 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
561 if (code_gen_buffer
== MAP_FAILED
) {
562 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
567 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
568 map_exec(code_gen_buffer
, code_gen_buffer_size
);
570 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
571 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
572 code_gen_buffer_max_size
= code_gen_buffer_size
-
573 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
574 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
575 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
578 /* Must be called before using the QEMU cpus. 'tb_size' is the size
579 (in bytes) allocated to the translation buffer. Zero means default
581 void tcg_exec_init(unsigned long tb_size
)
584 code_gen_alloc(tb_size
);
585 code_gen_ptr
= code_gen_buffer
;
587 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
588 /* There's no guest base to take into account, so go ahead and
589 initialize the prologue now. */
590 tcg_prologue_init(&tcg_ctx
);
594 bool tcg_enabled(void)
596 return code_gen_buffer
!= NULL
;
599 void cpu_exec_init_all(void)
601 #if !defined(CONFIG_USER_ONLY)
607 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
609 static int cpu_common_post_load(void *opaque
, int version_id
)
611 CPUState
*env
= opaque
;
613 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
614 version_id is increased. */
615 env
->interrupt_request
&= ~0x01;
621 static const VMStateDescription vmstate_cpu_common
= {
622 .name
= "cpu_common",
624 .minimum_version_id
= 1,
625 .minimum_version_id_old
= 1,
626 .post_load
= cpu_common_post_load
,
627 .fields
= (VMStateField
[]) {
628 VMSTATE_UINT32(halted
, CPUState
),
629 VMSTATE_UINT32(interrupt_request
, CPUState
),
630 VMSTATE_END_OF_LIST()
635 CPUState
*qemu_get_cpu(int cpu
)
637 CPUState
*env
= first_cpu
;
640 if (env
->cpu_index
== cpu
)
648 void cpu_exec_init(CPUState
*env
)
653 #if defined(CONFIG_USER_ONLY)
656 env
->next_cpu
= NULL
;
659 while (*penv
!= NULL
) {
660 penv
= &(*penv
)->next_cpu
;
663 env
->cpu_index
= cpu_index
;
665 QTAILQ_INIT(&env
->breakpoints
);
666 QTAILQ_INIT(&env
->watchpoints
);
667 #ifndef CONFIG_USER_ONLY
668 env
->thread_id
= qemu_get_thread_id();
671 #if defined(CONFIG_USER_ONLY)
674 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
675 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
676 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
677 cpu_save
, cpu_load
, env
);
681 /* Allocate a new translation block. Flush the translation buffer if
682 too many translation blocks or too much generated code. */
683 static TranslationBlock
*tb_alloc(target_ulong pc
)
685 TranslationBlock
*tb
;
687 if (nb_tbs
>= code_gen_max_blocks
||
688 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
696 void tb_free(TranslationBlock
*tb
)
698 /* In practice this is mostly used for single use temporary TB
699 Ignore the hard cases and just back up if this TB happens to
700 be the last one generated. */
701 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
702 code_gen_ptr
= tb
->tc_ptr
;
707 static inline void invalidate_page_bitmap(PageDesc
*p
)
709 if (p
->code_bitmap
) {
710 g_free(p
->code_bitmap
);
711 p
->code_bitmap
= NULL
;
713 p
->code_write_count
= 0;
716 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
718 static void page_flush_tb_1 (int level
, void **lp
)
727 for (i
= 0; i
< L2_SIZE
; ++i
) {
728 pd
[i
].first_tb
= NULL
;
729 invalidate_page_bitmap(pd
+ i
);
733 for (i
= 0; i
< L2_SIZE
; ++i
) {
734 page_flush_tb_1 (level
- 1, pp
+ i
);
739 static void page_flush_tb(void)
742 for (i
= 0; i
< V_L1_SIZE
; i
++) {
743 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
747 /* flush all the translation blocks */
748 /* XXX: tb_flush is currently not thread safe */
749 void tb_flush(CPUState
*env1
)
752 #if defined(DEBUG_FLUSH)
753 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
754 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
756 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
758 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
759 cpu_abort(env1
, "Internal error: code buffer overflow\n");
763 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
764 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
767 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
770 code_gen_ptr
= code_gen_buffer
;
771 /* XXX: flush processor icache at this point if cache flush is
776 #ifdef DEBUG_TB_CHECK
778 static void tb_invalidate_check(target_ulong address
)
780 TranslationBlock
*tb
;
782 address
&= TARGET_PAGE_MASK
;
783 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
784 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
785 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
786 address
>= tb
->pc
+ tb
->size
)) {
787 printf("ERROR invalidate: address=" TARGET_FMT_lx
788 " PC=%08lx size=%04x\n",
789 address
, (long)tb
->pc
, tb
->size
);
795 /* verify that all the pages have correct rights for code */
796 static void tb_page_check(void)
798 TranslationBlock
*tb
;
799 int i
, flags1
, flags2
;
801 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
802 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
803 flags1
= page_get_flags(tb
->pc
);
804 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
805 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
806 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
807 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
815 /* invalidate one TB */
816 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
819 TranslationBlock
*tb1
;
823 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
826 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
830 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
832 TranslationBlock
*tb1
;
838 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
840 *ptb
= tb1
->page_next
[n1
];
843 ptb
= &tb1
->page_next
[n1
];
847 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
849 TranslationBlock
*tb1
, **ptb
;
852 ptb
= &tb
->jmp_next
[n
];
855 /* find tb(n) in circular list */
859 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
860 if (n1
== n
&& tb1
== tb
)
863 ptb
= &tb1
->jmp_first
;
865 ptb
= &tb1
->jmp_next
[n1
];
868 /* now we can suppress tb(n) from the list */
869 *ptb
= tb
->jmp_next
[n
];
871 tb
->jmp_next
[n
] = NULL
;
875 /* reset the jump entry 'n' of a TB so that it is not chained to
877 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
879 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
882 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
887 tb_page_addr_t phys_pc
;
888 TranslationBlock
*tb1
, *tb2
;
890 /* remove the TB from the hash list */
891 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
892 h
= tb_phys_hash_func(phys_pc
);
893 tb_remove(&tb_phys_hash
[h
], tb
,
894 offsetof(TranslationBlock
, phys_hash_next
));
896 /* remove the TB from the page list */
897 if (tb
->page_addr
[0] != page_addr
) {
898 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
899 tb_page_remove(&p
->first_tb
, tb
);
900 invalidate_page_bitmap(p
);
902 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
903 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
904 tb_page_remove(&p
->first_tb
, tb
);
905 invalidate_page_bitmap(p
);
908 tb_invalidated_flag
= 1;
910 /* remove the TB from the hash list */
911 h
= tb_jmp_cache_hash_func(tb
->pc
);
912 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
913 if (env
->tb_jmp_cache
[h
] == tb
)
914 env
->tb_jmp_cache
[h
] = NULL
;
917 /* suppress this TB from the two jump lists */
918 tb_jmp_remove(tb
, 0);
919 tb_jmp_remove(tb
, 1);
921 /* suppress any remaining jumps to this TB */
927 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
928 tb2
= tb1
->jmp_next
[n1
];
929 tb_reset_jump(tb1
, n1
);
930 tb1
->jmp_next
[n1
] = NULL
;
933 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
935 tb_phys_invalidate_count
++;
938 static inline void set_bits(uint8_t *tab
, int start
, int len
)
944 mask
= 0xff << (start
& 7);
945 if ((start
& ~7) == (end
& ~7)) {
947 mask
&= ~(0xff << (end
& 7));
952 start
= (start
+ 8) & ~7;
954 while (start
< end1
) {
959 mask
= ~(0xff << (end
& 7));
965 static void build_page_bitmap(PageDesc
*p
)
967 int n
, tb_start
, tb_end
;
968 TranslationBlock
*tb
;
970 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
975 tb
= (TranslationBlock
*)((long)tb
& ~3);
976 /* NOTE: this is subtle as a TB may span two physical pages */
978 /* NOTE: tb_end may be after the end of the page, but
979 it is not a problem */
980 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
981 tb_end
= tb_start
+ tb
->size
;
982 if (tb_end
> TARGET_PAGE_SIZE
)
983 tb_end
= TARGET_PAGE_SIZE
;
986 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
988 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
989 tb
= tb
->page_next
[n
];
993 TranslationBlock
*tb_gen_code(CPUState
*env
,
994 target_ulong pc
, target_ulong cs_base
,
995 int flags
, int cflags
)
997 TranslationBlock
*tb
;
999 tb_page_addr_t phys_pc
, phys_page2
;
1000 target_ulong virt_page2
;
1003 phys_pc
= get_page_addr_code(env
, pc
);
1006 /* flush must be done */
1008 /* cannot fail at this point */
1010 /* Don't forget to invalidate previous TB info. */
1011 tb_invalidated_flag
= 1;
1013 tc_ptr
= code_gen_ptr
;
1014 tb
->tc_ptr
= tc_ptr
;
1015 tb
->cs_base
= cs_base
;
1017 tb
->cflags
= cflags
;
1018 cpu_gen_code(env
, tb
, &code_gen_size
);
1019 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1021 /* check next page if needed */
1022 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1024 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1025 phys_page2
= get_page_addr_code(env
, virt_page2
);
1027 tb_link_page(tb
, phys_pc
, phys_page2
);
1031 /* invalidate all TBs which intersect with the target physical page
1032 starting in range [start;end[. NOTE: start and end must refer to
1033 the same physical page. 'is_cpu_write_access' should be true if called
1034 from a real cpu write access: the virtual CPU will exit the current
1035 TB if code is modified inside this TB. */
1036 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1037 int is_cpu_write_access
)
1039 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1040 CPUState
*env
= cpu_single_env
;
1041 tb_page_addr_t tb_start
, tb_end
;
1044 #ifdef TARGET_HAS_PRECISE_SMC
1045 int current_tb_not_found
= is_cpu_write_access
;
1046 TranslationBlock
*current_tb
= NULL
;
1047 int current_tb_modified
= 0;
1048 target_ulong current_pc
= 0;
1049 target_ulong current_cs_base
= 0;
1050 int current_flags
= 0;
1051 #endif /* TARGET_HAS_PRECISE_SMC */
1053 p
= page_find(start
>> TARGET_PAGE_BITS
);
1056 if (!p
->code_bitmap
&&
1057 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1058 is_cpu_write_access
) {
1059 /* build code bitmap */
1060 build_page_bitmap(p
);
1063 /* we remove all the TBs in the range [start, end[ */
1064 /* XXX: see if in some cases it could be faster to invalidate all the code */
1066 while (tb
!= NULL
) {
1068 tb
= (TranslationBlock
*)((long)tb
& ~3);
1069 tb_next
= tb
->page_next
[n
];
1070 /* NOTE: this is subtle as a TB may span two physical pages */
1072 /* NOTE: tb_end may be after the end of the page, but
1073 it is not a problem */
1074 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1075 tb_end
= tb_start
+ tb
->size
;
1077 tb_start
= tb
->page_addr
[1];
1078 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1080 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1081 #ifdef TARGET_HAS_PRECISE_SMC
1082 if (current_tb_not_found
) {
1083 current_tb_not_found
= 0;
1085 if (env
->mem_io_pc
) {
1086 /* now we have a real cpu fault */
1087 current_tb
= tb_find_pc(env
->mem_io_pc
);
1090 if (current_tb
== tb
&&
1091 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1092 /* If we are modifying the current TB, we must stop
1093 its execution. We could be more precise by checking
1094 that the modification is after the current PC, but it
1095 would require a specialized function to partially
1096 restore the CPU state */
1098 current_tb_modified
= 1;
1099 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1100 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1103 #endif /* TARGET_HAS_PRECISE_SMC */
1104 /* we need to do that to handle the case where a signal
1105 occurs while doing tb_phys_invalidate() */
1108 saved_tb
= env
->current_tb
;
1109 env
->current_tb
= NULL
;
1111 tb_phys_invalidate(tb
, -1);
1113 env
->current_tb
= saved_tb
;
1114 if (env
->interrupt_request
&& env
->current_tb
)
1115 cpu_interrupt(env
, env
->interrupt_request
);
1120 #if !defined(CONFIG_USER_ONLY)
1121 /* if no code remaining, no need to continue to use slow writes */
1123 invalidate_page_bitmap(p
);
1124 if (is_cpu_write_access
) {
1125 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1129 #ifdef TARGET_HAS_PRECISE_SMC
1130 if (current_tb_modified
) {
1131 /* we generate a block containing just the instruction
1132 modifying the memory. It will ensure that it cannot modify
1134 env
->current_tb
= NULL
;
1135 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1136 cpu_resume_from_signal(env
, NULL
);
1141 /* len must be <= 8 and start must be a multiple of len */
1142 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1148 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1149 cpu_single_env
->mem_io_vaddr
, len
,
1150 cpu_single_env
->eip
,
1151 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1154 p
= page_find(start
>> TARGET_PAGE_BITS
);
1157 if (p
->code_bitmap
) {
1158 offset
= start
& ~TARGET_PAGE_MASK
;
1159 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1160 if (b
& ((1 << len
) - 1))
1164 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1168 #if !defined(CONFIG_SOFTMMU)
1169 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1170 unsigned long pc
, void *puc
)
1172 TranslationBlock
*tb
;
1175 #ifdef TARGET_HAS_PRECISE_SMC
1176 TranslationBlock
*current_tb
= NULL
;
1177 CPUState
*env
= cpu_single_env
;
1178 int current_tb_modified
= 0;
1179 target_ulong current_pc
= 0;
1180 target_ulong current_cs_base
= 0;
1181 int current_flags
= 0;
1184 addr
&= TARGET_PAGE_MASK
;
1185 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1189 #ifdef TARGET_HAS_PRECISE_SMC
1190 if (tb
&& pc
!= 0) {
1191 current_tb
= tb_find_pc(pc
);
1194 while (tb
!= NULL
) {
1196 tb
= (TranslationBlock
*)((long)tb
& ~3);
1197 #ifdef TARGET_HAS_PRECISE_SMC
1198 if (current_tb
== tb
&&
1199 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1200 /* If we are modifying the current TB, we must stop
1201 its execution. We could be more precise by checking
1202 that the modification is after the current PC, but it
1203 would require a specialized function to partially
1204 restore the CPU state */
1206 current_tb_modified
= 1;
1207 cpu_restore_state(current_tb
, env
, pc
);
1208 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1211 #endif /* TARGET_HAS_PRECISE_SMC */
1212 tb_phys_invalidate(tb
, addr
);
1213 tb
= tb
->page_next
[n
];
1216 #ifdef TARGET_HAS_PRECISE_SMC
1217 if (current_tb_modified
) {
1218 /* we generate a block containing just the instruction
1219 modifying the memory. It will ensure that it cannot modify
1221 env
->current_tb
= NULL
;
1222 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1223 cpu_resume_from_signal(env
, puc
);
1229 /* add the tb in the target page and protect it if necessary */
1230 static inline void tb_alloc_page(TranslationBlock
*tb
,
1231 unsigned int n
, tb_page_addr_t page_addr
)
1234 #ifndef CONFIG_USER_ONLY
1235 bool page_already_protected
;
1238 tb
->page_addr
[n
] = page_addr
;
1239 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1240 tb
->page_next
[n
] = p
->first_tb
;
1241 #ifndef CONFIG_USER_ONLY
1242 page_already_protected
= p
->first_tb
!= NULL
;
1244 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1245 invalidate_page_bitmap(p
);
1247 #if defined(TARGET_HAS_SMC) || 1
1249 #if defined(CONFIG_USER_ONLY)
1250 if (p
->flags
& PAGE_WRITE
) {
1255 /* force the host page as non writable (writes will have a
1256 page fault + mprotect overhead) */
1257 page_addr
&= qemu_host_page_mask
;
1259 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1260 addr
+= TARGET_PAGE_SIZE
) {
1262 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1266 p2
->flags
&= ~PAGE_WRITE
;
1268 mprotect(g2h(page_addr
), qemu_host_page_size
,
1269 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1270 #ifdef DEBUG_TB_INVALIDATE
1271 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1276 /* if some code is already present, then the pages are already
1277 protected. So we handle the case where only the first TB is
1278 allocated in a physical page */
1279 if (!page_already_protected
) {
1280 tlb_protect_code(page_addr
);
1284 #endif /* TARGET_HAS_SMC */
1287 /* add a new TB and link it to the physical page tables. phys_page2 is
1288 (-1) to indicate that only one page contains the TB. */
1289 void tb_link_page(TranslationBlock
*tb
,
1290 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1293 TranslationBlock
**ptb
;
1295 /* Grab the mmap lock to stop another thread invalidating this TB
1296 before we are done. */
1298 /* add in the physical hash table */
1299 h
= tb_phys_hash_func(phys_pc
);
1300 ptb
= &tb_phys_hash
[h
];
1301 tb
->phys_hash_next
= *ptb
;
1304 /* add in the page list */
1305 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1306 if (phys_page2
!= -1)
1307 tb_alloc_page(tb
, 1, phys_page2
);
1309 tb
->page_addr
[1] = -1;
1311 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1312 tb
->jmp_next
[0] = NULL
;
1313 tb
->jmp_next
[1] = NULL
;
1315 /* init original jump addresses */
1316 if (tb
->tb_next_offset
[0] != 0xffff)
1317 tb_reset_jump(tb
, 0);
1318 if (tb
->tb_next_offset
[1] != 0xffff)
1319 tb_reset_jump(tb
, 1);
1321 #ifdef DEBUG_TB_CHECK
1327 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1328 tb[1].tc_ptr. Return NULL if not found */
1329 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1331 int m_min
, m_max
, m
;
1333 TranslationBlock
*tb
;
1337 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1338 tc_ptr
>= (unsigned long)code_gen_ptr
)
1340 /* binary search (cf Knuth) */
1343 while (m_min
<= m_max
) {
1344 m
= (m_min
+ m_max
) >> 1;
1346 v
= (unsigned long)tb
->tc_ptr
;
1349 else if (tc_ptr
< v
) {
1358 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1360 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1362 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1365 tb1
= tb
->jmp_next
[n
];
1367 /* find head of list */
1370 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1373 tb1
= tb1
->jmp_next
[n1
];
1375 /* we are now sure now that tb jumps to tb1 */
1378 /* remove tb from the jmp_first list */
1379 ptb
= &tb_next
->jmp_first
;
1383 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1384 if (n1
== n
&& tb1
== tb
)
1386 ptb
= &tb1
->jmp_next
[n1
];
1388 *ptb
= tb
->jmp_next
[n
];
1389 tb
->jmp_next
[n
] = NULL
;
1391 /* suppress the jump to next tb in generated code */
1392 tb_reset_jump(tb
, n
);
1394 /* suppress jumps in the tb on which we could have jumped */
1395 tb_reset_jump_recursive(tb_next
);
1399 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1401 tb_reset_jump_recursive2(tb
, 0);
1402 tb_reset_jump_recursive2(tb
, 1);
1405 #if defined(TARGET_HAS_ICE)
1406 #if defined(CONFIG_USER_ONLY)
1407 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1409 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1412 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1414 target_phys_addr_t addr
;
1416 ram_addr_t ram_addr
;
1419 addr
= cpu_get_phys_page_debug(env
, pc
);
1420 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1422 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1423 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1426 #endif /* TARGET_HAS_ICE */
1428 #if defined(CONFIG_USER_ONLY)
1429 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1434 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1435 int flags
, CPUWatchpoint
**watchpoint
)
1440 /* Add a watchpoint. */
1441 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1442 int flags
, CPUWatchpoint
**watchpoint
)
1444 target_ulong len_mask
= ~(len
- 1);
1447 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1448 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1449 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1450 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1453 wp
= g_malloc(sizeof(*wp
));
1456 wp
->len_mask
= len_mask
;
1459 /* keep all GDB-injected watchpoints in front */
1461 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1463 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1465 tlb_flush_page(env
, addr
);
1472 /* Remove a specific watchpoint. */
1473 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1476 target_ulong len_mask
= ~(len
- 1);
1479 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1480 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1481 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1482 cpu_watchpoint_remove_by_ref(env
, wp
);
1489 /* Remove a specific watchpoint by reference. */
1490 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1492 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1494 tlb_flush_page(env
, watchpoint
->vaddr
);
1499 /* Remove all matching watchpoints. */
1500 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1502 CPUWatchpoint
*wp
, *next
;
1504 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1505 if (wp
->flags
& mask
)
1506 cpu_watchpoint_remove_by_ref(env
, wp
);
1511 /* Add a breakpoint. */
1512 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1513 CPUBreakpoint
**breakpoint
)
1515 #if defined(TARGET_HAS_ICE)
1518 bp
= g_malloc(sizeof(*bp
));
1523 /* keep all GDB-injected breakpoints in front */
1525 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1527 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1529 breakpoint_invalidate(env
, pc
);
1539 /* Remove a specific breakpoint. */
1540 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1542 #if defined(TARGET_HAS_ICE)
1545 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1546 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1547 cpu_breakpoint_remove_by_ref(env
, bp
);
1557 /* Remove a specific breakpoint by reference. */
1558 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1560 #if defined(TARGET_HAS_ICE)
1561 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1563 breakpoint_invalidate(env
, breakpoint
->pc
);
1569 /* Remove all matching breakpoints. */
1570 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1572 #if defined(TARGET_HAS_ICE)
1573 CPUBreakpoint
*bp
, *next
;
1575 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1576 if (bp
->flags
& mask
)
1577 cpu_breakpoint_remove_by_ref(env
, bp
);
1582 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1583 CPU loop after each instruction */
1584 void cpu_single_step(CPUState
*env
, int enabled
)
1586 #if defined(TARGET_HAS_ICE)
1587 if (env
->singlestep_enabled
!= enabled
) {
1588 env
->singlestep_enabled
= enabled
;
1590 kvm_update_guest_debug(env
, 0);
1592 /* must flush all the translated code to avoid inconsistencies */
1593 /* XXX: only flush what is necessary */
1600 /* enable or disable low levels log */
1601 void cpu_set_log(int log_flags
)
1603 loglevel
= log_flags
;
1604 if (loglevel
&& !logfile
) {
1605 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1607 perror(logfilename
);
1610 #if !defined(CONFIG_SOFTMMU)
1611 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1613 static char logfile_buf
[4096];
1614 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1616 #elif defined(_WIN32)
1617 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1618 setvbuf(logfile
, NULL
, _IONBF
, 0);
1620 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1624 if (!loglevel
&& logfile
) {
1630 void cpu_set_log_filename(const char *filename
)
1632 logfilename
= strdup(filename
);
1637 cpu_set_log(loglevel
);
1640 static void cpu_unlink_tb(CPUState
*env
)
1642 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1643 problem and hope the cpu will stop of its own accord. For userspace
1644 emulation this often isn't actually as bad as it sounds. Often
1645 signals are used primarily to interrupt blocking syscalls. */
1646 TranslationBlock
*tb
;
1647 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1649 spin_lock(&interrupt_lock
);
1650 tb
= env
->current_tb
;
1651 /* if the cpu is currently executing code, we must unlink it and
1652 all the potentially executing TB */
1654 env
->current_tb
= NULL
;
1655 tb_reset_jump_recursive(tb
);
1657 spin_unlock(&interrupt_lock
);
1660 #ifndef CONFIG_USER_ONLY
1661 /* mask must never be zero, except for A20 change call */
1662 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1666 old_mask
= env
->interrupt_request
;
1667 env
->interrupt_request
|= mask
;
1670 * If called from iothread context, wake the target cpu in
1673 if (!qemu_cpu_is_self(env
)) {
1679 env
->icount_decr
.u16
.high
= 0xffff;
1681 && (mask
& ~old_mask
) != 0) {
1682 cpu_abort(env
, "Raised interrupt while not in I/O function");
1689 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1691 #else /* CONFIG_USER_ONLY */
1693 void cpu_interrupt(CPUState
*env
, int mask
)
1695 env
->interrupt_request
|= mask
;
1698 #endif /* CONFIG_USER_ONLY */
1700 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1702 env
->interrupt_request
&= ~mask
;
1705 void cpu_exit(CPUState
*env
)
1707 env
->exit_request
= 1;
1711 const CPULogItem cpu_log_items
[] = {
1712 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1713 "show generated host assembly code for each compiled TB" },
1714 { CPU_LOG_TB_IN_ASM
, "in_asm",
1715 "show target assembly code for each compiled TB" },
1716 { CPU_LOG_TB_OP
, "op",
1717 "show micro ops for each compiled TB" },
1718 { CPU_LOG_TB_OP_OPT
, "op_opt",
1721 "before eflags optimization and "
1723 "after liveness analysis" },
1724 { CPU_LOG_INT
, "int",
1725 "show interrupts/exceptions in short format" },
1726 { CPU_LOG_EXEC
, "exec",
1727 "show trace before each executed TB (lots of logs)" },
1728 { CPU_LOG_TB_CPU
, "cpu",
1729 "show CPU state before block translation" },
1731 { CPU_LOG_PCALL
, "pcall",
1732 "show protected mode far calls/returns/exceptions" },
1733 { CPU_LOG_RESET
, "cpu_reset",
1734 "show CPU state before CPU resets" },
1737 { CPU_LOG_IOPORT
, "ioport",
1738 "show all i/o ports accesses" },
1743 static int cmp1(const char *s1
, int n
, const char *s2
)
1745 if (strlen(s2
) != n
)
1747 return memcmp(s1
, s2
, n
) == 0;
1750 /* takes a comma separated list of log masks. Return 0 if error. */
1751 int cpu_str_to_log_mask(const char *str
)
1753 const CPULogItem
*item
;
1760 p1
= strchr(p
, ',');
1763 if(cmp1(p
,p1
-p
,"all")) {
1764 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1768 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1769 if (cmp1(p
, p1
- p
, item
->name
))
1783 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1790 fprintf(stderr
, "qemu: fatal: ");
1791 vfprintf(stderr
, fmt
, ap
);
1792 fprintf(stderr
, "\n");
1794 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1796 cpu_dump_state(env
, stderr
, fprintf
, 0);
1798 if (qemu_log_enabled()) {
1799 qemu_log("qemu: fatal: ");
1800 qemu_log_vprintf(fmt
, ap2
);
1803 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1805 log_cpu_state(env
, 0);
1812 #if defined(CONFIG_USER_ONLY)
1814 struct sigaction act
;
1815 sigfillset(&act
.sa_mask
);
1816 act
.sa_handler
= SIG_DFL
;
1817 sigaction(SIGABRT
, &act
, NULL
);
1823 CPUState
*cpu_copy(CPUState
*env
)
1825 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1826 CPUState
*next_cpu
= new_env
->next_cpu
;
1827 int cpu_index
= new_env
->cpu_index
;
1828 #if defined(TARGET_HAS_ICE)
1833 memcpy(new_env
, env
, sizeof(CPUState
));
1835 /* Preserve chaining and index. */
1836 new_env
->next_cpu
= next_cpu
;
1837 new_env
->cpu_index
= cpu_index
;
1839 /* Clone all break/watchpoints.
1840 Note: Once we support ptrace with hw-debug register access, make sure
1841 BP_CPU break/watchpoints are handled correctly on clone. */
1842 QTAILQ_INIT(&env
->breakpoints
);
1843 QTAILQ_INIT(&env
->watchpoints
);
1844 #if defined(TARGET_HAS_ICE)
1845 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1846 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1848 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1849 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1857 #if !defined(CONFIG_USER_ONLY)
1859 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1863 /* Discard jump cache entries for any tb which might potentially
1864 overlap the flushed page. */
1865 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1866 memset (&env
->tb_jmp_cache
[i
], 0,
1867 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1869 i
= tb_jmp_cache_hash_page(addr
);
1870 memset (&env
->tb_jmp_cache
[i
], 0,
1871 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1874 static CPUTLBEntry s_cputlb_empty_entry
= {
1881 /* NOTE: if flush_global is true, also flush global entries (not
1883 void tlb_flush(CPUState
*env
, int flush_global
)
1887 #if defined(DEBUG_TLB)
1888 printf("tlb_flush:\n");
1890 /* must reset current TB so that interrupts cannot modify the
1891 links while we are modifying them */
1892 env
->current_tb
= NULL
;
1894 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1896 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1897 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1901 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1903 env
->tlb_flush_addr
= -1;
1904 env
->tlb_flush_mask
= 0;
1908 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1910 if (addr
== (tlb_entry
->addr_read
&
1911 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1912 addr
== (tlb_entry
->addr_write
&
1913 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1914 addr
== (tlb_entry
->addr_code
&
1915 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1916 *tlb_entry
= s_cputlb_empty_entry
;
1920 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1925 #if defined(DEBUG_TLB)
1926 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1928 /* Check if we need to flush due to large pages. */
1929 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1930 #if defined(DEBUG_TLB)
1931 printf("tlb_flush_page: forced full flush ("
1932 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1933 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1938 /* must reset current TB so that interrupts cannot modify the
1939 links while we are modifying them */
1940 env
->current_tb
= NULL
;
1942 addr
&= TARGET_PAGE_MASK
;
1943 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1944 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1945 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1947 tlb_flush_jmp_cache(env
, addr
);
1950 /* update the TLBs so that writes to code in the virtual page 'addr'
1952 static void tlb_protect_code(ram_addr_t ram_addr
)
1954 cpu_physical_memory_reset_dirty(ram_addr
,
1955 ram_addr
+ TARGET_PAGE_SIZE
,
1959 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1960 tested for self modifying code */
1961 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1964 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
1967 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1968 unsigned long start
, unsigned long length
)
1971 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
1972 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1973 if ((addr
- start
) < length
) {
1974 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1979 /* Note: start and end must be within the same ram block. */
1980 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1984 unsigned long length
, start1
;
1987 start
&= TARGET_PAGE_MASK
;
1988 end
= TARGET_PAGE_ALIGN(end
);
1990 length
= end
- start
;
1993 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1995 /* we modify the TLB cache so that the dirty bit will be set again
1996 when accessing the range */
1997 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
1998 /* Check that we don't span multiple blocks - this breaks the
1999 address comparisons below. */
2000 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2001 != (end
- 1) - start
) {
2005 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2007 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2008 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2009 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2015 int cpu_physical_memory_set_dirty_tracking(int enable
)
2018 in_migration
= enable
;
2022 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2024 ram_addr_t ram_addr
;
2027 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
2028 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2029 + tlb_entry
->addend
);
2030 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2031 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2032 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2037 /* update the TLB according to the current state of the dirty bits */
2038 void cpu_tlb_update_dirty(CPUState
*env
)
2042 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2043 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2044 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2048 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2050 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2051 tlb_entry
->addr_write
= vaddr
;
2054 /* update the TLB corresponding to virtual page vaddr
2055 so that it is no longer dirty */
2056 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2061 vaddr
&= TARGET_PAGE_MASK
;
2062 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2063 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2064 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2067 /* Our TLB does not support large pages, so remember the area covered by
2068 large pages and trigger a full TLB flush if these are invalidated. */
2069 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2072 target_ulong mask
= ~(size
- 1);
2074 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2075 env
->tlb_flush_addr
= vaddr
& mask
;
2076 env
->tlb_flush_mask
= mask
;
2079 /* Extend the existing region to include the new page.
2080 This is a compromise between unnecessary flushes and the cost
2081 of maintaining a full variable size TLB. */
2082 mask
&= env
->tlb_flush_mask
;
2083 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2086 env
->tlb_flush_addr
&= mask
;
2087 env
->tlb_flush_mask
= mask
;
2090 static bool is_ram_rom(ram_addr_t pd
)
2092 pd
&= ~TARGET_PAGE_MASK
;
2093 return pd
== io_mem_ram
.ram_addr
|| pd
== io_mem_rom
.ram_addr
;
2096 static bool is_romd(ram_addr_t pd
)
2100 pd
&= ~TARGET_PAGE_MASK
;
2101 mr
= io_mem_region
[pd
];
2102 return mr
->rom_device
&& mr
->readable
;
2105 static bool is_ram_rom_romd(ram_addr_t pd
)
2107 return is_ram_rom(pd
) || is_romd(pd
);
2110 /* Add a new TLB entry. At most one entry for a given virtual address
2111 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2112 supplied size is only used by tlb_flush_page. */
2113 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2114 target_phys_addr_t paddr
, int prot
,
2115 int mmu_idx
, target_ulong size
)
2120 target_ulong address
;
2121 target_ulong code_address
;
2122 unsigned long addend
;
2125 target_phys_addr_t iotlb
;
2127 assert(size
>= TARGET_PAGE_SIZE
);
2128 if (size
!= TARGET_PAGE_SIZE
) {
2129 tlb_add_large_page(env
, vaddr
, size
);
2131 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2133 #if defined(DEBUG_TLB)
2134 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2135 " prot=%x idx=%d pd=0x%08lx\n",
2136 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2140 if (!is_ram_rom_romd(pd
)) {
2141 /* IO memory case (romd handled later) */
2142 address
|= TLB_MMIO
;
2144 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2145 if (is_ram_rom(pd
)) {
2147 iotlb
= pd
& TARGET_PAGE_MASK
;
2148 if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
)
2149 iotlb
|= io_mem_notdirty
.ram_addr
;
2151 iotlb
|= io_mem_rom
.ram_addr
;
2153 /* IO handlers are currently passed a physical address.
2154 It would be nice to pass an offset from the base address
2155 of that region. This would avoid having to special case RAM,
2156 and avoid full address decoding in every device.
2157 We can't use the high bits of pd for this because
2158 IO_MEM_ROMD uses these as a ram address. */
2159 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2160 iotlb
+= p
.region_offset
;
2163 code_address
= address
;
2164 /* Make accesses to pages with watchpoints go via the
2165 watchpoint trap routines. */
2166 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2167 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2168 /* Avoid trapping reads of pages with a write breakpoint. */
2169 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2170 iotlb
= io_mem_watch
.ram_addr
+ paddr
;
2171 address
|= TLB_MMIO
;
2177 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2178 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2179 te
= &env
->tlb_table
[mmu_idx
][index
];
2180 te
->addend
= addend
- vaddr
;
2181 if (prot
& PAGE_READ
) {
2182 te
->addr_read
= address
;
2187 if (prot
& PAGE_EXEC
) {
2188 te
->addr_code
= code_address
;
2192 if (prot
& PAGE_WRITE
) {
2193 if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_rom
.ram_addr
|| is_romd(pd
)) {
2194 /* Write access calls the I/O callback. */
2195 te
->addr_write
= address
| TLB_MMIO
;
2196 } else if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
&&
2197 !cpu_physical_memory_is_dirty(pd
)) {
2198 te
->addr_write
= address
| TLB_NOTDIRTY
;
2200 te
->addr_write
= address
;
2203 te
->addr_write
= -1;
2209 void tlb_flush(CPUState
*env
, int flush_global
)
2213 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2218 * Walks guest process memory "regions" one by one
2219 * and calls callback function 'fn' for each region.
2222 struct walk_memory_regions_data
2224 walk_memory_regions_fn fn
;
2226 unsigned long start
;
2230 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2231 abi_ulong end
, int new_prot
)
2233 if (data
->start
!= -1ul) {
2234 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2240 data
->start
= (new_prot
? end
: -1ul);
2241 data
->prot
= new_prot
;
2246 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2247 abi_ulong base
, int level
, void **lp
)
2253 return walk_memory_regions_end(data
, base
, 0);
2258 for (i
= 0; i
< L2_SIZE
; ++i
) {
2259 int prot
= pd
[i
].flags
;
2261 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2262 if (prot
!= data
->prot
) {
2263 rc
= walk_memory_regions_end(data
, pa
, prot
);
2271 for (i
= 0; i
< L2_SIZE
; ++i
) {
2272 pa
= base
| ((abi_ulong
)i
<<
2273 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2274 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2284 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2286 struct walk_memory_regions_data data
;
2294 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2295 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2296 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2302 return walk_memory_regions_end(&data
, 0, 0);
2305 static int dump_region(void *priv
, abi_ulong start
,
2306 abi_ulong end
, unsigned long prot
)
2308 FILE *f
= (FILE *)priv
;
2310 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2311 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2312 start
, end
, end
- start
,
2313 ((prot
& PAGE_READ
) ? 'r' : '-'),
2314 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2315 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2320 /* dump memory mappings */
2321 void page_dump(FILE *f
)
2323 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2324 "start", "end", "size", "prot");
2325 walk_memory_regions(f
, dump_region
);
2328 int page_get_flags(target_ulong address
)
2332 p
= page_find(address
>> TARGET_PAGE_BITS
);
2338 /* Modify the flags of a page and invalidate the code if necessary.
2339 The flag PAGE_WRITE_ORG is positioned automatically depending
2340 on PAGE_WRITE. The mmap_lock should already be held. */
2341 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2343 target_ulong addr
, len
;
2345 /* This function should never be called with addresses outside the
2346 guest address space. If this assert fires, it probably indicates
2347 a missing call to h2g_valid. */
2348 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2349 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2351 assert(start
< end
);
2353 start
= start
& TARGET_PAGE_MASK
;
2354 end
= TARGET_PAGE_ALIGN(end
);
2356 if (flags
& PAGE_WRITE
) {
2357 flags
|= PAGE_WRITE_ORG
;
2360 for (addr
= start
, len
= end
- start
;
2362 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2363 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2365 /* If the write protection bit is set, then we invalidate
2367 if (!(p
->flags
& PAGE_WRITE
) &&
2368 (flags
& PAGE_WRITE
) &&
2370 tb_invalidate_phys_page(addr
, 0, NULL
);
2376 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2382 /* This function should never be called with addresses outside the
2383 guest address space. If this assert fires, it probably indicates
2384 a missing call to h2g_valid. */
2385 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2386 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2392 if (start
+ len
- 1 < start
) {
2393 /* We've wrapped around. */
2397 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2398 start
= start
& TARGET_PAGE_MASK
;
2400 for (addr
= start
, len
= end
- start
;
2402 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2403 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2406 if( !(p
->flags
& PAGE_VALID
) )
2409 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2411 if (flags
& PAGE_WRITE
) {
2412 if (!(p
->flags
& PAGE_WRITE_ORG
))
2414 /* unprotect the page if it was put read-only because it
2415 contains translated code */
2416 if (!(p
->flags
& PAGE_WRITE
)) {
2417 if (!page_unprotect(addr
, 0, NULL
))
2426 /* called from signal handler: invalidate the code and unprotect the
2427 page. Return TRUE if the fault was successfully handled. */
2428 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2432 target_ulong host_start
, host_end
, addr
;
2434 /* Technically this isn't safe inside a signal handler. However we
2435 know this only ever happens in a synchronous SEGV handler, so in
2436 practice it seems to be ok. */
2439 p
= page_find(address
>> TARGET_PAGE_BITS
);
2445 /* if the page was really writable, then we change its
2446 protection back to writable */
2447 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2448 host_start
= address
& qemu_host_page_mask
;
2449 host_end
= host_start
+ qemu_host_page_size
;
2452 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2453 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2454 p
->flags
|= PAGE_WRITE
;
2457 /* and since the content will be modified, we must invalidate
2458 the corresponding translated code. */
2459 tb_invalidate_phys_page(addr
, pc
, puc
);
2460 #ifdef DEBUG_TB_CHECK
2461 tb_invalidate_check(addr
);
2464 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2474 static inline void tlb_set_dirty(CPUState
*env
,
2475 unsigned long addr
, target_ulong vaddr
)
2478 #endif /* defined(CONFIG_USER_ONLY) */
2480 #if !defined(CONFIG_USER_ONLY)
2482 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2483 typedef struct subpage_t
{
2485 target_phys_addr_t base
;
2486 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2487 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2490 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2491 ram_addr_t memory
, ram_addr_t region_offset
);
2492 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2493 ram_addr_t orig_memory
,
2494 ram_addr_t region_offset
);
2495 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2498 if (addr > start_addr) \
2501 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2502 if (start_addr2 > 0) \
2506 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2507 end_addr2 = TARGET_PAGE_SIZE - 1; \
2509 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2510 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2515 /* register physical memory.
2516 For RAM, 'size' must be a multiple of the target page size.
2517 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2518 io memory page. The address used when calling the IO function is
2519 the offset from the start of the region, plus region_offset. Both
2520 start_addr and region_offset are rounded down to a page boundary
2521 before calculating this offset. This should not be a problem unless
2522 the low bits of start_addr and region_offset differ. */
2523 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2524 bool readable
, bool readonly
)
2526 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2527 ram_addr_t size
= section
->size
;
2528 ram_addr_t phys_offset
= section
->mr
->ram_addr
;
2529 ram_addr_t region_offset
= section
->offset_within_region
;
2530 target_phys_addr_t addr
, end_addr
;
2533 ram_addr_t orig_size
= size
;
2536 if (memory_region_is_ram(section
->mr
)) {
2537 phys_offset
+= region_offset
;
2542 phys_offset
|= io_mem_rom
.ram_addr
;
2547 if (phys_offset
== io_mem_unassigned
.ram_addr
) {
2548 region_offset
= start_addr
;
2550 region_offset
&= TARGET_PAGE_MASK
;
2551 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2552 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2556 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 0);
2557 if (p
&& p
->phys_offset
!= io_mem_unassigned
.ram_addr
) {
2558 ram_addr_t orig_memory
= p
->phys_offset
;
2559 target_phys_addr_t start_addr2
, end_addr2
;
2560 int need_subpage
= 0;
2561 MemoryRegion
*mr
= io_mem_region
[orig_memory
& ~TARGET_PAGE_MASK
];
2563 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2566 if (!(mr
->subpage
)) {
2567 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2568 &p
->phys_offset
, orig_memory
,
2571 subpage
= container_of(mr
, subpage_t
, iomem
);
2573 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2575 p
->region_offset
= 0;
2577 p
->phys_offset
= phys_offset
;
2578 p
->region_offset
= region_offset
;
2579 if (is_ram_rom_romd(phys_offset
))
2580 phys_offset
+= TARGET_PAGE_SIZE
;
2583 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2584 p
->phys_offset
= phys_offset
;
2585 p
->region_offset
= region_offset
;
2586 if (is_ram_rom_romd(phys_offset
)) {
2587 phys_offset
+= TARGET_PAGE_SIZE
;
2589 target_phys_addr_t start_addr2
, end_addr2
;
2590 int need_subpage
= 0;
2592 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2593 end_addr2
, need_subpage
);
2596 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2598 io_mem_unassigned
.ram_addr
,
2599 addr
& TARGET_PAGE_MASK
);
2600 subpage_register(subpage
, start_addr2
, end_addr2
,
2601 phys_offset
, region_offset
);
2602 p
->region_offset
= 0;
2606 region_offset
+= TARGET_PAGE_SIZE
;
2607 addr
+= TARGET_PAGE_SIZE
;
2608 } while (addr
!= end_addr
);
2610 /* since each CPU stores ram addresses in its TLB cache, we must
2611 reset the modified entries */
2613 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2618 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2621 kvm_coalesce_mmio_region(addr
, size
);
2624 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2627 kvm_uncoalesce_mmio_region(addr
, size
);
2630 void qemu_flush_coalesced_mmio_buffer(void)
2633 kvm_flush_coalesced_mmio_buffer();
2636 #if defined(__linux__) && !defined(TARGET_S390X)
2638 #include <sys/vfs.h>
2640 #define HUGETLBFS_MAGIC 0x958458f6
2642 static long gethugepagesize(const char *path
)
2648 ret
= statfs(path
, &fs
);
2649 } while (ret
!= 0 && errno
== EINTR
);
2656 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2657 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2662 static void *file_ram_alloc(RAMBlock
*block
,
2672 unsigned long hpagesize
;
2674 hpagesize
= gethugepagesize(path
);
2679 if (memory
< hpagesize
) {
2683 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2684 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2688 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2692 fd
= mkstemp(filename
);
2694 perror("unable to create backing store for hugepages");
2701 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2704 * ftruncate is not supported by hugetlbfs in older
2705 * hosts, so don't bother bailing out on errors.
2706 * If anything goes wrong with it under other filesystems,
2709 if (ftruncate(fd
, memory
))
2710 perror("ftruncate");
2713 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2714 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2715 * to sidestep this quirk.
2717 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2718 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2720 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2722 if (area
== MAP_FAILED
) {
2723 perror("file_ram_alloc: can't mmap RAM pages");
2732 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2734 RAMBlock
*block
, *next_block
;
2735 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2737 if (QLIST_EMPTY(&ram_list
.blocks
))
2740 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2741 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2743 end
= block
->offset
+ block
->length
;
2745 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2746 if (next_block
->offset
>= end
) {
2747 next
= MIN(next
, next_block
->offset
);
2750 if (next
- end
>= size
&& next
- end
< mingap
) {
2752 mingap
= next
- end
;
2756 if (offset
== RAM_ADDR_MAX
) {
2757 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2765 static ram_addr_t
last_ram_offset(void)
2768 ram_addr_t last
= 0;
2770 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2771 last
= MAX(last
, block
->offset
+ block
->length
);
2776 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2778 RAMBlock
*new_block
, *block
;
2781 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2782 if (block
->offset
== addr
) {
2788 assert(!new_block
->idstr
[0]);
2790 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2791 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2793 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2797 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2799 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2800 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2801 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2808 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2811 RAMBlock
*new_block
;
2813 size
= TARGET_PAGE_ALIGN(size
);
2814 new_block
= g_malloc0(sizeof(*new_block
));
2817 new_block
->offset
= find_ram_offset(size
);
2819 new_block
->host
= host
;
2820 new_block
->flags
|= RAM_PREALLOC_MASK
;
2823 #if defined (__linux__) && !defined(TARGET_S390X)
2824 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2825 if (!new_block
->host
) {
2826 new_block
->host
= qemu_vmalloc(size
);
2827 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2830 fprintf(stderr
, "-mem-path option unsupported\n");
2834 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2835 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2836 an system defined value, which is at least 256GB. Larger systems
2837 have larger values. We put the guest between the end of data
2838 segment (system break) and this value. We use 32GB as a base to
2839 have enough room for the system break to grow. */
2840 new_block
->host
= mmap((void*)0x800000000, size
,
2841 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2842 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2843 if (new_block
->host
== MAP_FAILED
) {
2844 fprintf(stderr
, "Allocating RAM failed\n");
2848 if (xen_enabled()) {
2849 xen_ram_alloc(new_block
->offset
, size
, mr
);
2851 new_block
->host
= qemu_vmalloc(size
);
2854 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2857 new_block
->length
= size
;
2859 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2861 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2862 last_ram_offset() >> TARGET_PAGE_BITS
);
2863 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2864 0xff, size
>> TARGET_PAGE_BITS
);
2867 kvm_setup_guest_memory(new_block
->host
, size
);
2869 return new_block
->offset
;
2872 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2874 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2877 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2881 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2882 if (addr
== block
->offset
) {
2883 QLIST_REMOVE(block
, next
);
2890 void qemu_ram_free(ram_addr_t addr
)
2894 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2895 if (addr
== block
->offset
) {
2896 QLIST_REMOVE(block
, next
);
2897 if (block
->flags
& RAM_PREALLOC_MASK
) {
2899 } else if (mem_path
) {
2900 #if defined (__linux__) && !defined(TARGET_S390X)
2902 munmap(block
->host
, block
->length
);
2905 qemu_vfree(block
->host
);
2911 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2912 munmap(block
->host
, block
->length
);
2914 if (xen_enabled()) {
2915 xen_invalidate_map_cache_entry(block
->host
);
2917 qemu_vfree(block
->host
);
2929 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2936 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2937 offset
= addr
- block
->offset
;
2938 if (offset
< block
->length
) {
2939 vaddr
= block
->host
+ offset
;
2940 if (block
->flags
& RAM_PREALLOC_MASK
) {
2944 munmap(vaddr
, length
);
2946 #if defined(__linux__) && !defined(TARGET_S390X)
2949 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2952 flags
|= MAP_PRIVATE
;
2954 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2955 flags
, block
->fd
, offset
);
2957 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2958 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2965 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2966 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2967 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2970 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2971 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2975 if (area
!= vaddr
) {
2976 fprintf(stderr
, "Could not remap addr: "
2977 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2981 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
2987 #endif /* !_WIN32 */
2989 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2990 With the exception of the softmmu code in this file, this should
2991 only be used for local memory (e.g. video ram) that the device owns,
2992 and knows it isn't going to access beyond the end of the block.
2994 It should not be used for general purpose DMA.
2995 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2997 void *qemu_get_ram_ptr(ram_addr_t addr
)
3001 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3002 if (addr
- block
->offset
< block
->length
) {
3003 /* Move this entry to to start of the list. */
3004 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3005 QLIST_REMOVE(block
, next
);
3006 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3008 if (xen_enabled()) {
3009 /* We need to check if the requested address is in the RAM
3010 * because we don't want to map the entire memory in QEMU.
3011 * In that case just map until the end of the page.
3013 if (block
->offset
== 0) {
3014 return xen_map_cache(addr
, 0, 0);
3015 } else if (block
->host
== NULL
) {
3017 xen_map_cache(block
->offset
, block
->length
, 1);
3020 return block
->host
+ (addr
- block
->offset
);
3024 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3030 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3031 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3033 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3037 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3038 if (addr
- block
->offset
< block
->length
) {
3039 if (xen_enabled()) {
3040 /* We need to check if the requested address is in the RAM
3041 * because we don't want to map the entire memory in QEMU.
3042 * In that case just map until the end of the page.
3044 if (block
->offset
== 0) {
3045 return xen_map_cache(addr
, 0, 0);
3046 } else if (block
->host
== NULL
) {
3048 xen_map_cache(block
->offset
, block
->length
, 1);
3051 return block
->host
+ (addr
- block
->offset
);
3055 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3061 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3062 * but takes a size argument */
3063 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3068 if (xen_enabled()) {
3069 return xen_map_cache(addr
, *size
, 1);
3073 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3074 if (addr
- block
->offset
< block
->length
) {
3075 if (addr
- block
->offset
+ *size
> block
->length
)
3076 *size
= block
->length
- addr
+ block
->offset
;
3077 return block
->host
+ (addr
- block
->offset
);
3081 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3086 void qemu_put_ram_ptr(void *addr
)
3088 trace_qemu_put_ram_ptr(addr
);
3091 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3094 uint8_t *host
= ptr
;
3096 if (xen_enabled()) {
3097 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3101 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3102 /* This case append when the block is not mapped. */
3103 if (block
->host
== NULL
) {
3106 if (host
- block
->host
< block
->length
) {
3107 *ram_addr
= block
->offset
+ (host
- block
->host
);
3115 /* Some of the softmmu routines need to translate from a host pointer
3116 (typically a TLB entry) back to a ram offset. */
3117 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3119 ram_addr_t ram_addr
;
3121 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3122 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3128 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
3131 #ifdef DEBUG_UNASSIGNED
3132 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3134 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3135 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
3140 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
3141 uint64_t val
, unsigned size
)
3143 #ifdef DEBUG_UNASSIGNED
3144 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
3146 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3147 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
3151 static const MemoryRegionOps unassigned_mem_ops
= {
3152 .read
= unassigned_mem_read
,
3153 .write
= unassigned_mem_write
,
3154 .endianness
= DEVICE_NATIVE_ENDIAN
,
3157 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
3163 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
3164 uint64_t value
, unsigned size
)
3169 static const MemoryRegionOps error_mem_ops
= {
3170 .read
= error_mem_read
,
3171 .write
= error_mem_write
,
3172 .endianness
= DEVICE_NATIVE_ENDIAN
,
3175 static const MemoryRegionOps rom_mem_ops
= {
3176 .read
= error_mem_read
,
3177 .write
= unassigned_mem_write
,
3178 .endianness
= DEVICE_NATIVE_ENDIAN
,
3181 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
3182 uint64_t val
, unsigned size
)
3185 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3186 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3187 #if !defined(CONFIG_USER_ONLY)
3188 tb_invalidate_phys_page_fast(ram_addr
, size
);
3189 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3194 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3197 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3200 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3205 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3206 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3207 /* we remove the notdirty callback only if the code has been
3209 if (dirty_flags
== 0xff)
3210 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3213 static const MemoryRegionOps notdirty_mem_ops
= {
3214 .read
= error_mem_read
,
3215 .write
= notdirty_mem_write
,
3216 .endianness
= DEVICE_NATIVE_ENDIAN
,
3219 /* Generate a debug exception if a watchpoint has been hit. */
3220 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3222 CPUState
*env
= cpu_single_env
;
3223 target_ulong pc
, cs_base
;
3224 TranslationBlock
*tb
;
3229 if (env
->watchpoint_hit
) {
3230 /* We re-entered the check after replacing the TB. Now raise
3231 * the debug interrupt so that is will trigger after the
3232 * current instruction. */
3233 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3236 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3237 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3238 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3239 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3240 wp
->flags
|= BP_WATCHPOINT_HIT
;
3241 if (!env
->watchpoint_hit
) {
3242 env
->watchpoint_hit
= wp
;
3243 tb
= tb_find_pc(env
->mem_io_pc
);
3245 cpu_abort(env
, "check_watchpoint: could not find TB for "
3246 "pc=%p", (void *)env
->mem_io_pc
);
3248 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3249 tb_phys_invalidate(tb
, -1);
3250 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3251 env
->exception_index
= EXCP_DEBUG
;
3253 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3254 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3256 cpu_resume_from_signal(env
, NULL
);
3259 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3264 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3265 so these check for a hit then pass through to the normal out-of-line
3267 static uint64_t watch_mem_read(void *opaque
, target_phys_addr_t addr
,
3270 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
3272 case 1: return ldub_phys(addr
);
3273 case 2: return lduw_phys(addr
);
3274 case 4: return ldl_phys(addr
);
3279 static void watch_mem_write(void *opaque
, target_phys_addr_t addr
,
3280 uint64_t val
, unsigned size
)
3282 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
3284 case 1: stb_phys(addr
, val
);
3285 case 2: stw_phys(addr
, val
);
3286 case 4: stl_phys(addr
, val
);
3291 static const MemoryRegionOps watch_mem_ops
= {
3292 .read
= watch_mem_read
,
3293 .write
= watch_mem_write
,
3294 .endianness
= DEVICE_NATIVE_ENDIAN
,
3297 static uint64_t subpage_read(void *opaque
, target_phys_addr_t addr
,
3300 subpage_t
*mmio
= opaque
;
3301 unsigned int idx
= SUBPAGE_IDX(addr
);
3302 #if defined(DEBUG_SUBPAGE)
3303 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3304 mmio
, len
, addr
, idx
);
3307 addr
+= mmio
->region_offset
[idx
];
3308 idx
= mmio
->sub_io_index
[idx
];
3309 return io_mem_read(idx
, addr
, len
);
3312 static void subpage_write(void *opaque
, target_phys_addr_t addr
,
3313 uint64_t value
, unsigned len
)
3315 subpage_t
*mmio
= opaque
;
3316 unsigned int idx
= SUBPAGE_IDX(addr
);
3317 #if defined(DEBUG_SUBPAGE)
3318 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3319 " idx %d value %"PRIx64
"\n",
3320 __func__
, mmio
, len
, addr
, idx
, value
);
3323 addr
+= mmio
->region_offset
[idx
];
3324 idx
= mmio
->sub_io_index
[idx
];
3325 io_mem_write(idx
, addr
, value
, len
);
3328 static const MemoryRegionOps subpage_ops
= {
3329 .read
= subpage_read
,
3330 .write
= subpage_write
,
3331 .endianness
= DEVICE_NATIVE_ENDIAN
,
3334 static uint64_t subpage_ram_read(void *opaque
, target_phys_addr_t addr
,
3337 ram_addr_t raddr
= addr
;
3338 void *ptr
= qemu_get_ram_ptr(raddr
);
3340 case 1: return ldub_p(ptr
);
3341 case 2: return lduw_p(ptr
);
3342 case 4: return ldl_p(ptr
);
3347 static void subpage_ram_write(void *opaque
, target_phys_addr_t addr
,
3348 uint64_t value
, unsigned size
)
3350 ram_addr_t raddr
= addr
;
3351 void *ptr
= qemu_get_ram_ptr(raddr
);
3353 case 1: return stb_p(ptr
, value
);
3354 case 2: return stw_p(ptr
, value
);
3355 case 4: return stl_p(ptr
, value
);
3360 static const MemoryRegionOps subpage_ram_ops
= {
3361 .read
= subpage_ram_read
,
3362 .write
= subpage_ram_write
,
3363 .endianness
= DEVICE_NATIVE_ENDIAN
,
3366 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3367 ram_addr_t memory
, ram_addr_t region_offset
)
3371 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3373 idx
= SUBPAGE_IDX(start
);
3374 eidx
= SUBPAGE_IDX(end
);
3375 #if defined(DEBUG_SUBPAGE)
3376 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3377 mmio
, start
, end
, idx
, eidx
, memory
);
3379 if ((memory
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
3380 memory
= io_mem_subpage_ram
.ram_addr
;
3382 memory
&= IO_MEM_NB_ENTRIES
- 1;
3383 for (; idx
<= eidx
; idx
++) {
3384 mmio
->sub_io_index
[idx
] = memory
;
3385 mmio
->region_offset
[idx
] = region_offset
;
3391 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3392 ram_addr_t orig_memory
,
3393 ram_addr_t region_offset
)
3398 mmio
= g_malloc0(sizeof(subpage_t
));
3401 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3402 "subpage", TARGET_PAGE_SIZE
);
3403 mmio
->iomem
.subpage
= true;
3404 subpage_memory
= mmio
->iomem
.ram_addr
;
3405 #if defined(DEBUG_SUBPAGE)
3406 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3407 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3409 *phys
= subpage_memory
;
3410 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3415 static int get_free_io_mem_idx(void)
3419 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3420 if (!io_mem_used
[i
]) {
3424 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3428 /* mem_read and mem_write are arrays of functions containing the
3429 function to access byte (index 0), word (index 1) and dword (index
3430 2). Functions can be omitted with a NULL function pointer.
3431 If io_index is non zero, the corresponding io zone is
3432 modified. If it is zero, a new io zone is allocated. The return
3433 value can be used with cpu_register_physical_memory(). (-1) is
3434 returned if error. */
3435 static int cpu_register_io_memory_fixed(int io_index
, MemoryRegion
*mr
)
3437 if (io_index
<= 0) {
3438 io_index
= get_free_io_mem_idx();
3442 if (io_index
>= IO_MEM_NB_ENTRIES
)
3446 io_mem_region
[io_index
] = mr
;
3451 int cpu_register_io_memory(MemoryRegion
*mr
)
3453 return cpu_register_io_memory_fixed(0, mr
);
3456 void cpu_unregister_io_memory(int io_index
)
3458 io_mem_region
[io_index
] = NULL
;
3459 io_mem_used
[io_index
] = 0;
3462 static void io_mem_init(void)
3466 /* Must be first: */
3467 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3468 assert(io_mem_ram
.ram_addr
== 0);
3469 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3470 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3471 "unassigned", UINT64_MAX
);
3472 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3473 "notdirty", UINT64_MAX
);
3474 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3475 "subpage-ram", UINT64_MAX
);
3479 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3480 "watch", UINT64_MAX
);
3483 static void memory_map_init(void)
3485 system_memory
= g_malloc(sizeof(*system_memory
));
3486 memory_region_init(system_memory
, "system", INT64_MAX
);
3487 set_system_memory_map(system_memory
);
3489 system_io
= g_malloc(sizeof(*system_io
));
3490 memory_region_init(system_io
, "io", 65536);
3491 set_system_io_map(system_io
);
3494 MemoryRegion
*get_system_memory(void)
3496 return system_memory
;
3499 MemoryRegion
*get_system_io(void)
3504 #endif /* !defined(CONFIG_USER_ONLY) */
3506 /* physical memory access (slow version, mainly for debug) */
3507 #if defined(CONFIG_USER_ONLY)
3508 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3509 uint8_t *buf
, int len
, int is_write
)
3516 page
= addr
& TARGET_PAGE_MASK
;
3517 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3520 flags
= page_get_flags(page
);
3521 if (!(flags
& PAGE_VALID
))
3524 if (!(flags
& PAGE_WRITE
))
3526 /* XXX: this code should not depend on lock_user */
3527 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3530 unlock_user(p
, addr
, l
);
3532 if (!(flags
& PAGE_READ
))
3534 /* XXX: this code should not depend on lock_user */
3535 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3538 unlock_user(p
, addr
, 0);
3548 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3549 int len
, int is_write
)
3554 target_phys_addr_t page
;
3559 page
= addr
& TARGET_PAGE_MASK
;
3560 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3563 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3567 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
3568 target_phys_addr_t addr1
;
3569 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
3570 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3571 /* XXX: could force cpu_single_env to NULL to avoid
3573 if (l
>= 4 && ((addr1
& 3) == 0)) {
3574 /* 32 bit write access */
3576 io_mem_write(io_index
, addr1
, val
, 4);
3578 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3579 /* 16 bit write access */
3581 io_mem_write(io_index
, addr1
, val
, 2);
3584 /* 8 bit write access */
3586 io_mem_write(io_index
, addr1
, val
, 1);
3591 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3593 ptr
= qemu_get_ram_ptr(addr1
);
3594 memcpy(ptr
, buf
, l
);
3595 if (!cpu_physical_memory_is_dirty(addr1
)) {
3596 /* invalidate code */
3597 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3599 cpu_physical_memory_set_dirty_flags(
3600 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3602 /* qemu doesn't execute guest code directly, but kvm does
3603 therefore flush instruction caches */
3605 flush_icache_range((unsigned long)ptr
,
3606 ((unsigned long)ptr
)+l
);
3607 qemu_put_ram_ptr(ptr
);
3610 if (!is_ram_rom_romd(pd
)) {
3611 target_phys_addr_t addr1
;
3613 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
3614 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3615 if (l
>= 4 && ((addr1
& 3) == 0)) {
3616 /* 32 bit read access */
3617 val
= io_mem_read(io_index
, addr1
, 4);
3620 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3621 /* 16 bit read access */
3622 val
= io_mem_read(io_index
, addr1
, 2);
3626 /* 8 bit read access */
3627 val
= io_mem_read(io_index
, addr1
, 1);
3633 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
3634 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3635 qemu_put_ram_ptr(ptr
);
3644 /* used for ROM loading : can write in RAM and ROM */
3645 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3646 const uint8_t *buf
, int len
)
3650 target_phys_addr_t page
;
3655 page
= addr
& TARGET_PAGE_MASK
;
3656 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3659 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3662 if (!is_ram_rom_romd(pd
)) {
3665 unsigned long addr1
;
3666 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3668 ptr
= qemu_get_ram_ptr(addr1
);
3669 memcpy(ptr
, buf
, l
);
3670 qemu_put_ram_ptr(ptr
);
3680 target_phys_addr_t addr
;
3681 target_phys_addr_t len
;
3684 static BounceBuffer bounce
;
3686 typedef struct MapClient
{
3688 void (*callback
)(void *opaque
);
3689 QLIST_ENTRY(MapClient
) link
;
3692 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3693 = QLIST_HEAD_INITIALIZER(map_client_list
);
3695 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3697 MapClient
*client
= g_malloc(sizeof(*client
));
3699 client
->opaque
= opaque
;
3700 client
->callback
= callback
;
3701 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3705 void cpu_unregister_map_client(void *_client
)
3707 MapClient
*client
= (MapClient
*)_client
;
3709 QLIST_REMOVE(client
, link
);
3713 static void cpu_notify_map_clients(void)
3717 while (!QLIST_EMPTY(&map_client_list
)) {
3718 client
= QLIST_FIRST(&map_client_list
);
3719 client
->callback(client
->opaque
);
3720 cpu_unregister_map_client(client
);
3724 /* Map a physical memory region into a host virtual address.
3725 * May map a subset of the requested range, given by and returned in *plen.
3726 * May return NULL if resources needed to perform the mapping are exhausted.
3727 * Use only for reads OR writes - not for read-modify-write operations.
3728 * Use cpu_register_map_client() to know when retrying the map operation is
3729 * likely to succeed.
3731 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3732 target_phys_addr_t
*plen
,
3735 target_phys_addr_t len
= *plen
;
3736 target_phys_addr_t todo
= 0;
3738 target_phys_addr_t page
;
3741 ram_addr_t raddr
= RAM_ADDR_MAX
;
3746 page
= addr
& TARGET_PAGE_MASK
;
3747 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3750 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3753 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
3754 if (todo
|| bounce
.buffer
) {
3757 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3761 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3765 return bounce
.buffer
;
3768 raddr
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3776 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3781 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3782 * Will also mark the memory as dirty if is_write == 1. access_len gives
3783 * the amount of memory that was actually read or written by the caller.
3785 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3786 int is_write
, target_phys_addr_t access_len
)
3788 if (buffer
!= bounce
.buffer
) {
3790 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3791 while (access_len
) {
3793 l
= TARGET_PAGE_SIZE
;
3796 if (!cpu_physical_memory_is_dirty(addr1
)) {
3797 /* invalidate code */
3798 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3800 cpu_physical_memory_set_dirty_flags(
3801 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3807 if (xen_enabled()) {
3808 xen_invalidate_map_cache_entry(buffer
);
3813 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3815 qemu_vfree(bounce
.buffer
);
3816 bounce
.buffer
= NULL
;
3817 cpu_notify_map_clients();
3820 /* warning: addr must be aligned */
3821 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
3822 enum device_endian endian
)
3830 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3833 if (!is_ram_rom_romd(pd
)) {
3835 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
3836 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3837 val
= io_mem_read(io_index
, addr
, 4);
3838 #if defined(TARGET_WORDS_BIGENDIAN)
3839 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3843 if (endian
== DEVICE_BIG_ENDIAN
) {
3849 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3850 (addr
& ~TARGET_PAGE_MASK
);
3852 case DEVICE_LITTLE_ENDIAN
:
3853 val
= ldl_le_p(ptr
);
3855 case DEVICE_BIG_ENDIAN
:
3856 val
= ldl_be_p(ptr
);
3866 uint32_t ldl_phys(target_phys_addr_t addr
)
3868 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3871 uint32_t ldl_le_phys(target_phys_addr_t addr
)
3873 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3876 uint32_t ldl_be_phys(target_phys_addr_t addr
)
3878 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3881 /* warning: addr must be aligned */
3882 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
3883 enum device_endian endian
)
3891 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3894 if (!is_ram_rom_romd(pd
)) {
3896 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
3897 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3899 /* XXX This is broken when device endian != cpu endian.
3900 Fix and add "endian" variable check */
3901 #ifdef TARGET_WORDS_BIGENDIAN
3902 val
= io_mem_read(io_index
, addr
, 4) << 32;
3903 val
|= io_mem_read(io_index
, addr
+ 4, 4);
3905 val
= io_mem_read(io_index
, addr
, 4);
3906 val
|= io_mem_read(io_index
, addr
+ 4, 4) << 32;
3910 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3911 (addr
& ~TARGET_PAGE_MASK
);
3913 case DEVICE_LITTLE_ENDIAN
:
3914 val
= ldq_le_p(ptr
);
3916 case DEVICE_BIG_ENDIAN
:
3917 val
= ldq_be_p(ptr
);
3927 uint64_t ldq_phys(target_phys_addr_t addr
)
3929 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3932 uint64_t ldq_le_phys(target_phys_addr_t addr
)
3934 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3937 uint64_t ldq_be_phys(target_phys_addr_t addr
)
3939 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3943 uint32_t ldub_phys(target_phys_addr_t addr
)
3946 cpu_physical_memory_read(addr
, &val
, 1);
3950 /* warning: addr must be aligned */
3951 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
3952 enum device_endian endian
)
3960 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3963 if (!is_ram_rom_romd(pd
)) {
3965 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
3966 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3967 val
= io_mem_read(io_index
, addr
, 2);
3968 #if defined(TARGET_WORDS_BIGENDIAN)
3969 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3973 if (endian
== DEVICE_BIG_ENDIAN
) {
3979 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3980 (addr
& ~TARGET_PAGE_MASK
);
3982 case DEVICE_LITTLE_ENDIAN
:
3983 val
= lduw_le_p(ptr
);
3985 case DEVICE_BIG_ENDIAN
:
3986 val
= lduw_be_p(ptr
);
3996 uint32_t lduw_phys(target_phys_addr_t addr
)
3998 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4001 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4003 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4006 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4008 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4011 /* warning: addr must be aligned. The ram page is not masked as dirty
4012 and the code inside is not invalidated. It is useful if the dirty
4013 bits are used to track modified PTEs */
4014 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4021 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4024 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4025 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4026 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4027 io_mem_write(io_index
, addr
, val
, 4);
4029 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4030 ptr
= qemu_get_ram_ptr(addr1
);
4033 if (unlikely(in_migration
)) {
4034 if (!cpu_physical_memory_is_dirty(addr1
)) {
4035 /* invalidate code */
4036 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4038 cpu_physical_memory_set_dirty_flags(
4039 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4045 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4052 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4055 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4056 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4057 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4058 #ifdef TARGET_WORDS_BIGENDIAN
4059 io_mem_write(io_index
, addr
, val
>> 32, 4);
4060 io_mem_write(io_index
, addr
+ 4, (uint32_t)val
, 4);
4062 io_mem_write(io_index
, addr
, (uint32_t)val
, 4);
4063 io_mem_write(io_index
, addr
+ 4, val
>> 32, 4);
4066 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4067 (addr
& ~TARGET_PAGE_MASK
);
4072 /* warning: addr must be aligned */
4073 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4074 enum device_endian endian
)
4081 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4084 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4085 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4086 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4087 #if defined(TARGET_WORDS_BIGENDIAN)
4088 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4092 if (endian
== DEVICE_BIG_ENDIAN
) {
4096 io_mem_write(io_index
, addr
, val
, 4);
4098 unsigned long addr1
;
4099 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4101 ptr
= qemu_get_ram_ptr(addr1
);
4103 case DEVICE_LITTLE_ENDIAN
:
4106 case DEVICE_BIG_ENDIAN
:
4113 if (!cpu_physical_memory_is_dirty(addr1
)) {
4114 /* invalidate code */
4115 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4117 cpu_physical_memory_set_dirty_flags(addr1
,
4118 (0xff & ~CODE_DIRTY_FLAG
));
4123 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4125 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4128 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4130 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4133 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4135 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4139 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4142 cpu_physical_memory_write(addr
, &v
, 1);
4145 /* warning: addr must be aligned */
4146 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4147 enum device_endian endian
)
4154 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4157 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4158 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4159 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4160 #if defined(TARGET_WORDS_BIGENDIAN)
4161 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4165 if (endian
== DEVICE_BIG_ENDIAN
) {
4169 io_mem_write(io_index
, addr
, val
, 2);
4171 unsigned long addr1
;
4172 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4174 ptr
= qemu_get_ram_ptr(addr1
);
4176 case DEVICE_LITTLE_ENDIAN
:
4179 case DEVICE_BIG_ENDIAN
:
4186 if (!cpu_physical_memory_is_dirty(addr1
)) {
4187 /* invalidate code */
4188 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4190 cpu_physical_memory_set_dirty_flags(addr1
,
4191 (0xff & ~CODE_DIRTY_FLAG
));
4196 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4198 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4201 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4203 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4206 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4208 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4212 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4215 cpu_physical_memory_write(addr
, &val
, 8);
4218 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4220 val
= cpu_to_le64(val
);
4221 cpu_physical_memory_write(addr
, &val
, 8);
4224 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4226 val
= cpu_to_be64(val
);
4227 cpu_physical_memory_write(addr
, &val
, 8);
4230 /* virtual memory access for debug (includes writing to ROM) */
4231 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4232 uint8_t *buf
, int len
, int is_write
)
4235 target_phys_addr_t phys_addr
;
4239 page
= addr
& TARGET_PAGE_MASK
;
4240 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4241 /* if no physical page mapped, return an error */
4242 if (phys_addr
== -1)
4244 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4247 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4249 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4251 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4260 /* in deterministic execution mode, instructions doing device I/Os
4261 must be at the end of the TB */
4262 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4264 TranslationBlock
*tb
;
4266 target_ulong pc
, cs_base
;
4269 tb
= tb_find_pc((unsigned long)retaddr
);
4271 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4274 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4275 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4276 /* Calculate how many instructions had been executed before the fault
4278 n
= n
- env
->icount_decr
.u16
.low
;
4279 /* Generate a new TB ending on the I/O insn. */
4281 /* On MIPS and SH, delay slot instructions can only be restarted if
4282 they were already the first instruction in the TB. If this is not
4283 the first instruction in a TB then re-execute the preceding
4285 #if defined(TARGET_MIPS)
4286 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4287 env
->active_tc
.PC
-= 4;
4288 env
->icount_decr
.u16
.low
++;
4289 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4291 #elif defined(TARGET_SH4)
4292 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4295 env
->icount_decr
.u16
.low
++;
4296 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4299 /* This should never happen. */
4300 if (n
> CF_COUNT_MASK
)
4301 cpu_abort(env
, "TB too big during recompile");
4303 cflags
= n
| CF_LAST_IO
;
4305 cs_base
= tb
->cs_base
;
4307 tb_phys_invalidate(tb
, -1);
4308 /* FIXME: In theory this could raise an exception. In practice
4309 we have already translated the block once so it's probably ok. */
4310 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4311 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4312 the first in the TB) then we end up generating a whole new TB and
4313 repeating the fault, which is horribly inefficient.
4314 Better would be to execute just this insn uncached, or generate a
4316 cpu_resume_from_signal(env
, NULL
);
4319 #if !defined(CONFIG_USER_ONLY)
4321 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4323 int i
, target_code_size
, max_target_code_size
;
4324 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4325 TranslationBlock
*tb
;
4327 target_code_size
= 0;
4328 max_target_code_size
= 0;
4330 direct_jmp_count
= 0;
4331 direct_jmp2_count
= 0;
4332 for(i
= 0; i
< nb_tbs
; i
++) {
4334 target_code_size
+= tb
->size
;
4335 if (tb
->size
> max_target_code_size
)
4336 max_target_code_size
= tb
->size
;
4337 if (tb
->page_addr
[1] != -1)
4339 if (tb
->tb_next_offset
[0] != 0xffff) {
4341 if (tb
->tb_next_offset
[1] != 0xffff) {
4342 direct_jmp2_count
++;
4346 /* XXX: avoid using doubles ? */
4347 cpu_fprintf(f
, "Translation buffer state:\n");
4348 cpu_fprintf(f
, "gen code size %td/%ld\n",
4349 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4350 cpu_fprintf(f
, "TB count %d/%d\n",
4351 nb_tbs
, code_gen_max_blocks
);
4352 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4353 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4354 max_target_code_size
);
4355 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4356 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4357 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4358 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4360 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4361 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4363 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4365 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4366 cpu_fprintf(f
, "\nStatistics:\n");
4367 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4368 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4369 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4370 #ifdef CONFIG_PROFILER
4371 tcg_dump_info(f
, cpu_fprintf
);
4375 /* NOTE: this function can trigger an exception */
4376 /* NOTE2: the returned address is not exactly the physical address: it
4377 is the offset relative to phys_ram_base */
4378 tb_page_addr_t
get_page_addr_code(CPUState
*env1
, target_ulong addr
)
4380 int mmu_idx
, page_index
, pd
;
4383 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
4384 mmu_idx
= cpu_mmu_index(env1
);
4385 if (unlikely(env1
->tlb_table
[mmu_idx
][page_index
].addr_code
!=
4386 (addr
& TARGET_PAGE_MASK
))) {
4389 pd
= env1
->tlb_table
[mmu_idx
][page_index
].addr_code
& ~TARGET_PAGE_MASK
;
4390 if (pd
!= io_mem_ram
.ram_addr
&& pd
!= io_mem_rom
.ram_addr
4392 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4393 cpu_unassigned_access(env1
, addr
, 0, 1, 0, 4);
4395 cpu_abort(env1
, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx
"\n", addr
);
4398 p
= (void *)((uintptr_t)addr
+ env1
->tlb_table
[mmu_idx
][page_index
].addend
);
4399 return qemu_ram_addr_from_host_nofail(p
);
4402 #define MMUSUFFIX _cmmu
4404 #define GETPC() NULL
4405 #define env cpu_single_env
4406 #define SOFTMMU_CODE_ACCESS
4409 #include "softmmu_template.h"
4412 #include "softmmu_template.h"
4415 #include "softmmu_template.h"
4418 #include "softmmu_template.h"