2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
63 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 static uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 static int in_migration
;
116 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
118 static MemoryRegion
*system_memory
;
119 static MemoryRegion
*system_io
;
121 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
122 static MemoryRegion io_mem_subpage_ram
;
127 /* current CPU in the current thread. It is only valid inside
129 DEFINE_TLS(CPUState
*,cpu_single_env
);
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
135 typedef struct PageDesc
{
136 /* list of TBs intersecting this ram page */
137 TranslationBlock
*first_tb
;
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count
;
141 uint8_t *code_bitmap
;
142 #if defined(CONFIG_USER_ONLY)
147 /* In system mode we want L1_MAP to be based on ram offsets,
148 while in user mode we want it to be based on virtual addresses. */
149 #if !defined(CONFIG_USER_ONLY)
150 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
156 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
159 /* Size of the L2 (and L3, etc) page tables. */
161 #define L2_SIZE (1 << L2_BITS)
163 /* The bits remaining after N lower levels of page tables. */
164 #define P_L1_BITS_REM \
165 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166 #define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169 /* Size of the L1 page table. Avoid silly small sizes. */
170 #if P_L1_BITS_REM < 4
171 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
173 #define P_L1_BITS P_L1_BITS_REM
176 #if V_L1_BITS_REM < 4
177 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
179 #define V_L1_BITS V_L1_BITS_REM
182 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
183 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
185 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
188 unsigned long qemu_real_host_page_size
;
189 unsigned long qemu_host_page_size
;
190 unsigned long qemu_host_page_mask
;
192 /* This is a multi-level map on the virtual address space.
193 The bottom level has pointers to PageDesc. */
194 static void *l1_map
[V_L1_SIZE
];
196 #if !defined(CONFIG_USER_ONLY)
197 typedef struct PhysPageDesc
{
198 /* offset in host memory of the page + io_index in the low bits */
199 ram_addr_t phys_offset
;
200 ram_addr_t region_offset
;
203 /* This is a multi-level map on the physical address space.
204 The bottom level has pointers to PhysPageDesc. */
205 static void *l1_phys_map
[P_L1_SIZE
];
207 static void io_mem_init(void);
208 static void memory_map_init(void);
210 /* io memory support */
211 CPUWriteMemoryFunc
*_io_mem_write
[IO_MEM_NB_ENTRIES
][4];
212 CPUReadMemoryFunc
*_io_mem_read
[IO_MEM_NB_ENTRIES
][4];
213 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
214 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
215 static MemoryRegion io_mem_watch
;
220 static const char *logfilename
= "qemu.log";
222 static const char *logfilename
= "/tmp/qemu.log";
226 static int log_append
= 0;
229 #if !defined(CONFIG_USER_ONLY)
230 static int tlb_flush_count
;
232 static int tb_flush_count
;
233 static int tb_phys_invalidate_count
;
236 static void map_exec(void *addr
, long size
)
239 VirtualProtect(addr
, size
,
240 PAGE_EXECUTE_READWRITE
, &old_protect
);
244 static void map_exec(void *addr
, long size
)
246 unsigned long start
, end
, page_size
;
248 page_size
= getpagesize();
249 start
= (unsigned long)addr
;
250 start
&= ~(page_size
- 1);
252 end
= (unsigned long)addr
+ size
;
253 end
+= page_size
- 1;
254 end
&= ~(page_size
- 1);
256 mprotect((void *)start
, end
- start
,
257 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
261 static void page_init(void)
263 /* NOTE: we can always suppose that qemu_host_page_size >=
267 SYSTEM_INFO system_info
;
269 GetSystemInfo(&system_info
);
270 qemu_real_host_page_size
= system_info
.dwPageSize
;
273 qemu_real_host_page_size
= getpagesize();
275 if (qemu_host_page_size
== 0)
276 qemu_host_page_size
= qemu_real_host_page_size
;
277 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
278 qemu_host_page_size
= TARGET_PAGE_SIZE
;
279 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
281 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
283 #ifdef HAVE_KINFO_GETVMMAP
284 struct kinfo_vmentry
*freep
;
287 freep
= kinfo_getvmmap(getpid(), &cnt
);
290 for (i
= 0; i
< cnt
; i
++) {
291 unsigned long startaddr
, endaddr
;
293 startaddr
= freep
[i
].kve_start
;
294 endaddr
= freep
[i
].kve_end
;
295 if (h2g_valid(startaddr
)) {
296 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
298 if (h2g_valid(endaddr
)) {
299 endaddr
= h2g(endaddr
);
300 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
302 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
304 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
315 last_brk
= (unsigned long)sbrk(0);
317 f
= fopen("/compat/linux/proc/self/maps", "r");
322 unsigned long startaddr
, endaddr
;
325 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
327 if (n
== 2 && h2g_valid(startaddr
)) {
328 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
330 if (h2g_valid(endaddr
)) {
331 endaddr
= h2g(endaddr
);
335 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
347 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
353 #if defined(CONFIG_USER_ONLY)
354 /* We can't use g_malloc because it may recurse into a locked mutex. */
355 # define ALLOC(P, SIZE) \
357 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
358 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
361 # define ALLOC(P, SIZE) \
362 do { P = g_malloc0(SIZE); } while (0)
365 /* Level 1. Always allocated. */
366 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
369 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
376 ALLOC(p
, sizeof(void *) * L2_SIZE
);
380 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
388 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
394 return pd
+ (index
& (L2_SIZE
- 1));
397 static inline PageDesc
*page_find(tb_page_addr_t index
)
399 return page_find_alloc(index
, 0);
402 #if !defined(CONFIG_USER_ONLY)
403 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
409 /* Level 1. Always allocated. */
410 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
413 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
419 *lp
= p
= g_malloc0(sizeof(void *) * L2_SIZE
);
421 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
427 int first_index
= index
& ~(L2_SIZE
- 1);
433 *lp
= pd
= g_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
435 for (i
= 0; i
< L2_SIZE
; i
++) {
436 pd
[i
].phys_offset
= io_mem_unassigned
.ram_addr
;
437 pd
[i
].region_offset
= (first_index
+ i
) << TARGET_PAGE_BITS
;
441 return pd
+ (index
& (L2_SIZE
- 1));
444 static inline PhysPageDesc
phys_page_find(target_phys_addr_t index
)
446 PhysPageDesc
*p
= phys_page_find_alloc(index
, 0);
451 return (PhysPageDesc
) {
452 .phys_offset
= io_mem_unassigned
.ram_addr
,
453 .region_offset
= index
<< TARGET_PAGE_BITS
,
458 static void tlb_protect_code(ram_addr_t ram_addr
);
459 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
461 #define mmap_lock() do { } while(0)
462 #define mmap_unlock() do { } while(0)
465 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
467 #if defined(CONFIG_USER_ONLY)
468 /* Currently it is not recommended to allocate big chunks of data in
469 user mode. It will change when a dedicated libc will be used */
470 #define USE_STATIC_CODE_GEN_BUFFER
473 #ifdef USE_STATIC_CODE_GEN_BUFFER
474 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
475 __attribute__((aligned (CODE_GEN_ALIGN
)));
478 static void code_gen_alloc(unsigned long tb_size
)
480 #ifdef USE_STATIC_CODE_GEN_BUFFER
481 code_gen_buffer
= static_code_gen_buffer
;
482 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
483 map_exec(code_gen_buffer
, code_gen_buffer_size
);
485 code_gen_buffer_size
= tb_size
;
486 if (code_gen_buffer_size
== 0) {
487 #if defined(CONFIG_USER_ONLY)
488 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
490 /* XXX: needs adjustments */
491 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
494 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
495 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
496 /* The code gen buffer location may have constraints depending on
497 the host cpu and OS */
498 #if defined(__linux__)
503 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
504 #if defined(__x86_64__)
506 /* Cannot map more than that */
507 if (code_gen_buffer_size
> (800 * 1024 * 1024))
508 code_gen_buffer_size
= (800 * 1024 * 1024);
509 #elif defined(__sparc_v9__)
510 // Map the buffer below 2G, so we can use direct calls and branches
512 start
= (void *) 0x60000000UL
;
513 if (code_gen_buffer_size
> (512 * 1024 * 1024))
514 code_gen_buffer_size
= (512 * 1024 * 1024);
515 #elif defined(__arm__)
516 /* Keep the buffer no bigger than 16GB to branch between blocks */
517 if (code_gen_buffer_size
> 16 * 1024 * 1024)
518 code_gen_buffer_size
= 16 * 1024 * 1024;
519 #elif defined(__s390x__)
520 /* Map the buffer so that we can use direct calls and branches. */
521 /* We have a +- 4GB range on the branches; leave some slop. */
522 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
523 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
525 start
= (void *)0x90000000UL
;
527 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
528 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
530 if (code_gen_buffer
== MAP_FAILED
) {
531 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
535 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
536 || defined(__DragonFly__) || defined(__OpenBSD__) \
537 || defined(__NetBSD__)
541 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
542 #if defined(__x86_64__)
543 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
544 * 0x40000000 is free */
546 addr
= (void *)0x40000000;
547 /* Cannot map more than that */
548 if (code_gen_buffer_size
> (800 * 1024 * 1024))
549 code_gen_buffer_size
= (800 * 1024 * 1024);
550 #elif defined(__sparc_v9__)
551 // Map the buffer below 2G, so we can use direct calls and branches
553 addr
= (void *) 0x60000000UL
;
554 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
555 code_gen_buffer_size
= (512 * 1024 * 1024);
558 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
559 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
561 if (code_gen_buffer
== MAP_FAILED
) {
562 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
567 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
568 map_exec(code_gen_buffer
, code_gen_buffer_size
);
570 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
571 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
572 code_gen_buffer_max_size
= code_gen_buffer_size
-
573 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
574 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
575 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
578 /* Must be called before using the QEMU cpus. 'tb_size' is the size
579 (in bytes) allocated to the translation buffer. Zero means default
581 void tcg_exec_init(unsigned long tb_size
)
584 code_gen_alloc(tb_size
);
585 code_gen_ptr
= code_gen_buffer
;
587 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
588 /* There's no guest base to take into account, so go ahead and
589 initialize the prologue now. */
590 tcg_prologue_init(&tcg_ctx
);
594 bool tcg_enabled(void)
596 return code_gen_buffer
!= NULL
;
599 void cpu_exec_init_all(void)
601 #if !defined(CONFIG_USER_ONLY)
607 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
609 static int cpu_common_post_load(void *opaque
, int version_id
)
611 CPUState
*env
= opaque
;
613 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
614 version_id is increased. */
615 env
->interrupt_request
&= ~0x01;
621 static const VMStateDescription vmstate_cpu_common
= {
622 .name
= "cpu_common",
624 .minimum_version_id
= 1,
625 .minimum_version_id_old
= 1,
626 .post_load
= cpu_common_post_load
,
627 .fields
= (VMStateField
[]) {
628 VMSTATE_UINT32(halted
, CPUState
),
629 VMSTATE_UINT32(interrupt_request
, CPUState
),
630 VMSTATE_END_OF_LIST()
635 CPUState
*qemu_get_cpu(int cpu
)
637 CPUState
*env
= first_cpu
;
640 if (env
->cpu_index
== cpu
)
648 void cpu_exec_init(CPUState
*env
)
653 #if defined(CONFIG_USER_ONLY)
656 env
->next_cpu
= NULL
;
659 while (*penv
!= NULL
) {
660 penv
= &(*penv
)->next_cpu
;
663 env
->cpu_index
= cpu_index
;
665 QTAILQ_INIT(&env
->breakpoints
);
666 QTAILQ_INIT(&env
->watchpoints
);
667 #ifndef CONFIG_USER_ONLY
668 env
->thread_id
= qemu_get_thread_id();
671 #if defined(CONFIG_USER_ONLY)
674 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
675 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
676 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
677 cpu_save
, cpu_load
, env
);
681 /* Allocate a new translation block. Flush the translation buffer if
682 too many translation blocks or too much generated code. */
683 static TranslationBlock
*tb_alloc(target_ulong pc
)
685 TranslationBlock
*tb
;
687 if (nb_tbs
>= code_gen_max_blocks
||
688 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
696 void tb_free(TranslationBlock
*tb
)
698 /* In practice this is mostly used for single use temporary TB
699 Ignore the hard cases and just back up if this TB happens to
700 be the last one generated. */
701 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
702 code_gen_ptr
= tb
->tc_ptr
;
707 static inline void invalidate_page_bitmap(PageDesc
*p
)
709 if (p
->code_bitmap
) {
710 g_free(p
->code_bitmap
);
711 p
->code_bitmap
= NULL
;
713 p
->code_write_count
= 0;
716 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
718 static void page_flush_tb_1 (int level
, void **lp
)
727 for (i
= 0; i
< L2_SIZE
; ++i
) {
728 pd
[i
].first_tb
= NULL
;
729 invalidate_page_bitmap(pd
+ i
);
733 for (i
= 0; i
< L2_SIZE
; ++i
) {
734 page_flush_tb_1 (level
- 1, pp
+ i
);
739 static void page_flush_tb(void)
742 for (i
= 0; i
< V_L1_SIZE
; i
++) {
743 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
747 /* flush all the translation blocks */
748 /* XXX: tb_flush is currently not thread safe */
749 void tb_flush(CPUState
*env1
)
752 #if defined(DEBUG_FLUSH)
753 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
754 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
756 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
758 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
759 cpu_abort(env1
, "Internal error: code buffer overflow\n");
763 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
764 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
767 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
770 code_gen_ptr
= code_gen_buffer
;
771 /* XXX: flush processor icache at this point if cache flush is
776 #ifdef DEBUG_TB_CHECK
778 static void tb_invalidate_check(target_ulong address
)
780 TranslationBlock
*tb
;
782 address
&= TARGET_PAGE_MASK
;
783 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
784 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
785 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
786 address
>= tb
->pc
+ tb
->size
)) {
787 printf("ERROR invalidate: address=" TARGET_FMT_lx
788 " PC=%08lx size=%04x\n",
789 address
, (long)tb
->pc
, tb
->size
);
795 /* verify that all the pages have correct rights for code */
796 static void tb_page_check(void)
798 TranslationBlock
*tb
;
799 int i
, flags1
, flags2
;
801 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
802 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
803 flags1
= page_get_flags(tb
->pc
);
804 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
805 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
806 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
807 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
815 /* invalidate one TB */
816 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
819 TranslationBlock
*tb1
;
823 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
826 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
830 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
832 TranslationBlock
*tb1
;
838 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
840 *ptb
= tb1
->page_next
[n1
];
843 ptb
= &tb1
->page_next
[n1
];
847 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
849 TranslationBlock
*tb1
, **ptb
;
852 ptb
= &tb
->jmp_next
[n
];
855 /* find tb(n) in circular list */
859 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
860 if (n1
== n
&& tb1
== tb
)
863 ptb
= &tb1
->jmp_first
;
865 ptb
= &tb1
->jmp_next
[n1
];
868 /* now we can suppress tb(n) from the list */
869 *ptb
= tb
->jmp_next
[n
];
871 tb
->jmp_next
[n
] = NULL
;
875 /* reset the jump entry 'n' of a TB so that it is not chained to
877 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
879 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
882 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
887 tb_page_addr_t phys_pc
;
888 TranslationBlock
*tb1
, *tb2
;
890 /* remove the TB from the hash list */
891 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
892 h
= tb_phys_hash_func(phys_pc
);
893 tb_remove(&tb_phys_hash
[h
], tb
,
894 offsetof(TranslationBlock
, phys_hash_next
));
896 /* remove the TB from the page list */
897 if (tb
->page_addr
[0] != page_addr
) {
898 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
899 tb_page_remove(&p
->first_tb
, tb
);
900 invalidate_page_bitmap(p
);
902 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
903 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
904 tb_page_remove(&p
->first_tb
, tb
);
905 invalidate_page_bitmap(p
);
908 tb_invalidated_flag
= 1;
910 /* remove the TB from the hash list */
911 h
= tb_jmp_cache_hash_func(tb
->pc
);
912 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
913 if (env
->tb_jmp_cache
[h
] == tb
)
914 env
->tb_jmp_cache
[h
] = NULL
;
917 /* suppress this TB from the two jump lists */
918 tb_jmp_remove(tb
, 0);
919 tb_jmp_remove(tb
, 1);
921 /* suppress any remaining jumps to this TB */
927 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
928 tb2
= tb1
->jmp_next
[n1
];
929 tb_reset_jump(tb1
, n1
);
930 tb1
->jmp_next
[n1
] = NULL
;
933 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
935 tb_phys_invalidate_count
++;
938 static inline void set_bits(uint8_t *tab
, int start
, int len
)
944 mask
= 0xff << (start
& 7);
945 if ((start
& ~7) == (end
& ~7)) {
947 mask
&= ~(0xff << (end
& 7));
952 start
= (start
+ 8) & ~7;
954 while (start
< end1
) {
959 mask
= ~(0xff << (end
& 7));
965 static void build_page_bitmap(PageDesc
*p
)
967 int n
, tb_start
, tb_end
;
968 TranslationBlock
*tb
;
970 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
975 tb
= (TranslationBlock
*)((long)tb
& ~3);
976 /* NOTE: this is subtle as a TB may span two physical pages */
978 /* NOTE: tb_end may be after the end of the page, but
979 it is not a problem */
980 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
981 tb_end
= tb_start
+ tb
->size
;
982 if (tb_end
> TARGET_PAGE_SIZE
)
983 tb_end
= TARGET_PAGE_SIZE
;
986 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
988 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
989 tb
= tb
->page_next
[n
];
993 TranslationBlock
*tb_gen_code(CPUState
*env
,
994 target_ulong pc
, target_ulong cs_base
,
995 int flags
, int cflags
)
997 TranslationBlock
*tb
;
999 tb_page_addr_t phys_pc
, phys_page2
;
1000 target_ulong virt_page2
;
1003 phys_pc
= get_page_addr_code(env
, pc
);
1006 /* flush must be done */
1008 /* cannot fail at this point */
1010 /* Don't forget to invalidate previous TB info. */
1011 tb_invalidated_flag
= 1;
1013 tc_ptr
= code_gen_ptr
;
1014 tb
->tc_ptr
= tc_ptr
;
1015 tb
->cs_base
= cs_base
;
1017 tb
->cflags
= cflags
;
1018 cpu_gen_code(env
, tb
, &code_gen_size
);
1019 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1021 /* check next page if needed */
1022 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1024 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1025 phys_page2
= get_page_addr_code(env
, virt_page2
);
1027 tb_link_page(tb
, phys_pc
, phys_page2
);
1031 /* invalidate all TBs which intersect with the target physical page
1032 starting in range [start;end[. NOTE: start and end must refer to
1033 the same physical page. 'is_cpu_write_access' should be true if called
1034 from a real cpu write access: the virtual CPU will exit the current
1035 TB if code is modified inside this TB. */
1036 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1037 int is_cpu_write_access
)
1039 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1040 CPUState
*env
= cpu_single_env
;
1041 tb_page_addr_t tb_start
, tb_end
;
1044 #ifdef TARGET_HAS_PRECISE_SMC
1045 int current_tb_not_found
= is_cpu_write_access
;
1046 TranslationBlock
*current_tb
= NULL
;
1047 int current_tb_modified
= 0;
1048 target_ulong current_pc
= 0;
1049 target_ulong current_cs_base
= 0;
1050 int current_flags
= 0;
1051 #endif /* TARGET_HAS_PRECISE_SMC */
1053 p
= page_find(start
>> TARGET_PAGE_BITS
);
1056 if (!p
->code_bitmap
&&
1057 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1058 is_cpu_write_access
) {
1059 /* build code bitmap */
1060 build_page_bitmap(p
);
1063 /* we remove all the TBs in the range [start, end[ */
1064 /* XXX: see if in some cases it could be faster to invalidate all the code */
1066 while (tb
!= NULL
) {
1068 tb
= (TranslationBlock
*)((long)tb
& ~3);
1069 tb_next
= tb
->page_next
[n
];
1070 /* NOTE: this is subtle as a TB may span two physical pages */
1072 /* NOTE: tb_end may be after the end of the page, but
1073 it is not a problem */
1074 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1075 tb_end
= tb_start
+ tb
->size
;
1077 tb_start
= tb
->page_addr
[1];
1078 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1080 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1081 #ifdef TARGET_HAS_PRECISE_SMC
1082 if (current_tb_not_found
) {
1083 current_tb_not_found
= 0;
1085 if (env
->mem_io_pc
) {
1086 /* now we have a real cpu fault */
1087 current_tb
= tb_find_pc(env
->mem_io_pc
);
1090 if (current_tb
== tb
&&
1091 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1092 /* If we are modifying the current TB, we must stop
1093 its execution. We could be more precise by checking
1094 that the modification is after the current PC, but it
1095 would require a specialized function to partially
1096 restore the CPU state */
1098 current_tb_modified
= 1;
1099 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1100 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1103 #endif /* TARGET_HAS_PRECISE_SMC */
1104 /* we need to do that to handle the case where a signal
1105 occurs while doing tb_phys_invalidate() */
1108 saved_tb
= env
->current_tb
;
1109 env
->current_tb
= NULL
;
1111 tb_phys_invalidate(tb
, -1);
1113 env
->current_tb
= saved_tb
;
1114 if (env
->interrupt_request
&& env
->current_tb
)
1115 cpu_interrupt(env
, env
->interrupt_request
);
1120 #if !defined(CONFIG_USER_ONLY)
1121 /* if no code remaining, no need to continue to use slow writes */
1123 invalidate_page_bitmap(p
);
1124 if (is_cpu_write_access
) {
1125 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1129 #ifdef TARGET_HAS_PRECISE_SMC
1130 if (current_tb_modified
) {
1131 /* we generate a block containing just the instruction
1132 modifying the memory. It will ensure that it cannot modify
1134 env
->current_tb
= NULL
;
1135 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1136 cpu_resume_from_signal(env
, NULL
);
1141 /* len must be <= 8 and start must be a multiple of len */
1142 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1148 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1149 cpu_single_env
->mem_io_vaddr
, len
,
1150 cpu_single_env
->eip
,
1151 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1154 p
= page_find(start
>> TARGET_PAGE_BITS
);
1157 if (p
->code_bitmap
) {
1158 offset
= start
& ~TARGET_PAGE_MASK
;
1159 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1160 if (b
& ((1 << len
) - 1))
1164 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1168 #if !defined(CONFIG_SOFTMMU)
1169 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1170 unsigned long pc
, void *puc
)
1172 TranslationBlock
*tb
;
1175 #ifdef TARGET_HAS_PRECISE_SMC
1176 TranslationBlock
*current_tb
= NULL
;
1177 CPUState
*env
= cpu_single_env
;
1178 int current_tb_modified
= 0;
1179 target_ulong current_pc
= 0;
1180 target_ulong current_cs_base
= 0;
1181 int current_flags
= 0;
1184 addr
&= TARGET_PAGE_MASK
;
1185 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1189 #ifdef TARGET_HAS_PRECISE_SMC
1190 if (tb
&& pc
!= 0) {
1191 current_tb
= tb_find_pc(pc
);
1194 while (tb
!= NULL
) {
1196 tb
= (TranslationBlock
*)((long)tb
& ~3);
1197 #ifdef TARGET_HAS_PRECISE_SMC
1198 if (current_tb
== tb
&&
1199 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1200 /* If we are modifying the current TB, we must stop
1201 its execution. We could be more precise by checking
1202 that the modification is after the current PC, but it
1203 would require a specialized function to partially
1204 restore the CPU state */
1206 current_tb_modified
= 1;
1207 cpu_restore_state(current_tb
, env
, pc
);
1208 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1211 #endif /* TARGET_HAS_PRECISE_SMC */
1212 tb_phys_invalidate(tb
, addr
);
1213 tb
= tb
->page_next
[n
];
1216 #ifdef TARGET_HAS_PRECISE_SMC
1217 if (current_tb_modified
) {
1218 /* we generate a block containing just the instruction
1219 modifying the memory. It will ensure that it cannot modify
1221 env
->current_tb
= NULL
;
1222 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1223 cpu_resume_from_signal(env
, puc
);
1229 /* add the tb in the target page and protect it if necessary */
1230 static inline void tb_alloc_page(TranslationBlock
*tb
,
1231 unsigned int n
, tb_page_addr_t page_addr
)
1234 #ifndef CONFIG_USER_ONLY
1235 bool page_already_protected
;
1238 tb
->page_addr
[n
] = page_addr
;
1239 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1240 tb
->page_next
[n
] = p
->first_tb
;
1241 #ifndef CONFIG_USER_ONLY
1242 page_already_protected
= p
->first_tb
!= NULL
;
1244 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1245 invalidate_page_bitmap(p
);
1247 #if defined(TARGET_HAS_SMC) || 1
1249 #if defined(CONFIG_USER_ONLY)
1250 if (p
->flags
& PAGE_WRITE
) {
1255 /* force the host page as non writable (writes will have a
1256 page fault + mprotect overhead) */
1257 page_addr
&= qemu_host_page_mask
;
1259 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1260 addr
+= TARGET_PAGE_SIZE
) {
1262 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1266 p2
->flags
&= ~PAGE_WRITE
;
1268 mprotect(g2h(page_addr
), qemu_host_page_size
,
1269 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1270 #ifdef DEBUG_TB_INVALIDATE
1271 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1276 /* if some code is already present, then the pages are already
1277 protected. So we handle the case where only the first TB is
1278 allocated in a physical page */
1279 if (!page_already_protected
) {
1280 tlb_protect_code(page_addr
);
1284 #endif /* TARGET_HAS_SMC */
1287 /* add a new TB and link it to the physical page tables. phys_page2 is
1288 (-1) to indicate that only one page contains the TB. */
1289 void tb_link_page(TranslationBlock
*tb
,
1290 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1293 TranslationBlock
**ptb
;
1295 /* Grab the mmap lock to stop another thread invalidating this TB
1296 before we are done. */
1298 /* add in the physical hash table */
1299 h
= tb_phys_hash_func(phys_pc
);
1300 ptb
= &tb_phys_hash
[h
];
1301 tb
->phys_hash_next
= *ptb
;
1304 /* add in the page list */
1305 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1306 if (phys_page2
!= -1)
1307 tb_alloc_page(tb
, 1, phys_page2
);
1309 tb
->page_addr
[1] = -1;
1311 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1312 tb
->jmp_next
[0] = NULL
;
1313 tb
->jmp_next
[1] = NULL
;
1315 /* init original jump addresses */
1316 if (tb
->tb_next_offset
[0] != 0xffff)
1317 tb_reset_jump(tb
, 0);
1318 if (tb
->tb_next_offset
[1] != 0xffff)
1319 tb_reset_jump(tb
, 1);
1321 #ifdef DEBUG_TB_CHECK
1327 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1328 tb[1].tc_ptr. Return NULL if not found */
1329 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1331 int m_min
, m_max
, m
;
1333 TranslationBlock
*tb
;
1337 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1338 tc_ptr
>= (unsigned long)code_gen_ptr
)
1340 /* binary search (cf Knuth) */
1343 while (m_min
<= m_max
) {
1344 m
= (m_min
+ m_max
) >> 1;
1346 v
= (unsigned long)tb
->tc_ptr
;
1349 else if (tc_ptr
< v
) {
1358 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1360 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1362 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1365 tb1
= tb
->jmp_next
[n
];
1367 /* find head of list */
1370 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1373 tb1
= tb1
->jmp_next
[n1
];
1375 /* we are now sure now that tb jumps to tb1 */
1378 /* remove tb from the jmp_first list */
1379 ptb
= &tb_next
->jmp_first
;
1383 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1384 if (n1
== n
&& tb1
== tb
)
1386 ptb
= &tb1
->jmp_next
[n1
];
1388 *ptb
= tb
->jmp_next
[n
];
1389 tb
->jmp_next
[n
] = NULL
;
1391 /* suppress the jump to next tb in generated code */
1392 tb_reset_jump(tb
, n
);
1394 /* suppress jumps in the tb on which we could have jumped */
1395 tb_reset_jump_recursive(tb_next
);
1399 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1401 tb_reset_jump_recursive2(tb
, 0);
1402 tb_reset_jump_recursive2(tb
, 1);
1405 #if defined(TARGET_HAS_ICE)
1406 #if defined(CONFIG_USER_ONLY)
1407 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1409 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1412 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1414 target_phys_addr_t addr
;
1416 ram_addr_t ram_addr
;
1419 addr
= cpu_get_phys_page_debug(env
, pc
);
1420 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1422 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1423 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1426 #endif /* TARGET_HAS_ICE */
1428 #if defined(CONFIG_USER_ONLY)
1429 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1434 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1435 int flags
, CPUWatchpoint
**watchpoint
)
1440 /* Add a watchpoint. */
1441 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1442 int flags
, CPUWatchpoint
**watchpoint
)
1444 target_ulong len_mask
= ~(len
- 1);
1447 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1448 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1449 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1450 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1453 wp
= g_malloc(sizeof(*wp
));
1456 wp
->len_mask
= len_mask
;
1459 /* keep all GDB-injected watchpoints in front */
1461 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1463 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1465 tlb_flush_page(env
, addr
);
1472 /* Remove a specific watchpoint. */
1473 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1476 target_ulong len_mask
= ~(len
- 1);
1479 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1480 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1481 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1482 cpu_watchpoint_remove_by_ref(env
, wp
);
1489 /* Remove a specific watchpoint by reference. */
1490 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1492 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1494 tlb_flush_page(env
, watchpoint
->vaddr
);
1499 /* Remove all matching watchpoints. */
1500 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1502 CPUWatchpoint
*wp
, *next
;
1504 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1505 if (wp
->flags
& mask
)
1506 cpu_watchpoint_remove_by_ref(env
, wp
);
1511 /* Add a breakpoint. */
1512 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1513 CPUBreakpoint
**breakpoint
)
1515 #if defined(TARGET_HAS_ICE)
1518 bp
= g_malloc(sizeof(*bp
));
1523 /* keep all GDB-injected breakpoints in front */
1525 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1527 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1529 breakpoint_invalidate(env
, pc
);
1539 /* Remove a specific breakpoint. */
1540 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1542 #if defined(TARGET_HAS_ICE)
1545 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1546 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1547 cpu_breakpoint_remove_by_ref(env
, bp
);
1557 /* Remove a specific breakpoint by reference. */
1558 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1560 #if defined(TARGET_HAS_ICE)
1561 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1563 breakpoint_invalidate(env
, breakpoint
->pc
);
1569 /* Remove all matching breakpoints. */
1570 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1572 #if defined(TARGET_HAS_ICE)
1573 CPUBreakpoint
*bp
, *next
;
1575 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1576 if (bp
->flags
& mask
)
1577 cpu_breakpoint_remove_by_ref(env
, bp
);
1582 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1583 CPU loop after each instruction */
1584 void cpu_single_step(CPUState
*env
, int enabled
)
1586 #if defined(TARGET_HAS_ICE)
1587 if (env
->singlestep_enabled
!= enabled
) {
1588 env
->singlestep_enabled
= enabled
;
1590 kvm_update_guest_debug(env
, 0);
1592 /* must flush all the translated code to avoid inconsistencies */
1593 /* XXX: only flush what is necessary */
1600 /* enable or disable low levels log */
1601 void cpu_set_log(int log_flags
)
1603 loglevel
= log_flags
;
1604 if (loglevel
&& !logfile
) {
1605 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1607 perror(logfilename
);
1610 #if !defined(CONFIG_SOFTMMU)
1611 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1613 static char logfile_buf
[4096];
1614 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1616 #elif defined(_WIN32)
1617 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1618 setvbuf(logfile
, NULL
, _IONBF
, 0);
1620 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1624 if (!loglevel
&& logfile
) {
1630 void cpu_set_log_filename(const char *filename
)
1632 logfilename
= strdup(filename
);
1637 cpu_set_log(loglevel
);
1640 static void cpu_unlink_tb(CPUState
*env
)
1642 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1643 problem and hope the cpu will stop of its own accord. For userspace
1644 emulation this often isn't actually as bad as it sounds. Often
1645 signals are used primarily to interrupt blocking syscalls. */
1646 TranslationBlock
*tb
;
1647 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1649 spin_lock(&interrupt_lock
);
1650 tb
= env
->current_tb
;
1651 /* if the cpu is currently executing code, we must unlink it and
1652 all the potentially executing TB */
1654 env
->current_tb
= NULL
;
1655 tb_reset_jump_recursive(tb
);
1657 spin_unlock(&interrupt_lock
);
1660 #ifndef CONFIG_USER_ONLY
1661 /* mask must never be zero, except for A20 change call */
1662 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1666 old_mask
= env
->interrupt_request
;
1667 env
->interrupt_request
|= mask
;
1670 * If called from iothread context, wake the target cpu in
1673 if (!qemu_cpu_is_self(env
)) {
1679 env
->icount_decr
.u16
.high
= 0xffff;
1681 && (mask
& ~old_mask
) != 0) {
1682 cpu_abort(env
, "Raised interrupt while not in I/O function");
1689 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1691 #else /* CONFIG_USER_ONLY */
1693 void cpu_interrupt(CPUState
*env
, int mask
)
1695 env
->interrupt_request
|= mask
;
1698 #endif /* CONFIG_USER_ONLY */
1700 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1702 env
->interrupt_request
&= ~mask
;
1705 void cpu_exit(CPUState
*env
)
1707 env
->exit_request
= 1;
1711 const CPULogItem cpu_log_items
[] = {
1712 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1713 "show generated host assembly code for each compiled TB" },
1714 { CPU_LOG_TB_IN_ASM
, "in_asm",
1715 "show target assembly code for each compiled TB" },
1716 { CPU_LOG_TB_OP
, "op",
1717 "show micro ops for each compiled TB" },
1718 { CPU_LOG_TB_OP_OPT
, "op_opt",
1721 "before eflags optimization and "
1723 "after liveness analysis" },
1724 { CPU_LOG_INT
, "int",
1725 "show interrupts/exceptions in short format" },
1726 { CPU_LOG_EXEC
, "exec",
1727 "show trace before each executed TB (lots of logs)" },
1728 { CPU_LOG_TB_CPU
, "cpu",
1729 "show CPU state before block translation" },
1731 { CPU_LOG_PCALL
, "pcall",
1732 "show protected mode far calls/returns/exceptions" },
1733 { CPU_LOG_RESET
, "cpu_reset",
1734 "show CPU state before CPU resets" },
1737 { CPU_LOG_IOPORT
, "ioport",
1738 "show all i/o ports accesses" },
1743 static int cmp1(const char *s1
, int n
, const char *s2
)
1745 if (strlen(s2
) != n
)
1747 return memcmp(s1
, s2
, n
) == 0;
1750 /* takes a comma separated list of log masks. Return 0 if error. */
1751 int cpu_str_to_log_mask(const char *str
)
1753 const CPULogItem
*item
;
1760 p1
= strchr(p
, ',');
1763 if(cmp1(p
,p1
-p
,"all")) {
1764 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1768 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1769 if (cmp1(p
, p1
- p
, item
->name
))
1783 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1790 fprintf(stderr
, "qemu: fatal: ");
1791 vfprintf(stderr
, fmt
, ap
);
1792 fprintf(stderr
, "\n");
1794 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1796 cpu_dump_state(env
, stderr
, fprintf
, 0);
1798 if (qemu_log_enabled()) {
1799 qemu_log("qemu: fatal: ");
1800 qemu_log_vprintf(fmt
, ap2
);
1803 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1805 log_cpu_state(env
, 0);
1812 #if defined(CONFIG_USER_ONLY)
1814 struct sigaction act
;
1815 sigfillset(&act
.sa_mask
);
1816 act
.sa_handler
= SIG_DFL
;
1817 sigaction(SIGABRT
, &act
, NULL
);
1823 CPUState
*cpu_copy(CPUState
*env
)
1825 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1826 CPUState
*next_cpu
= new_env
->next_cpu
;
1827 int cpu_index
= new_env
->cpu_index
;
1828 #if defined(TARGET_HAS_ICE)
1833 memcpy(new_env
, env
, sizeof(CPUState
));
1835 /* Preserve chaining and index. */
1836 new_env
->next_cpu
= next_cpu
;
1837 new_env
->cpu_index
= cpu_index
;
1839 /* Clone all break/watchpoints.
1840 Note: Once we support ptrace with hw-debug register access, make sure
1841 BP_CPU break/watchpoints are handled correctly on clone. */
1842 QTAILQ_INIT(&env
->breakpoints
);
1843 QTAILQ_INIT(&env
->watchpoints
);
1844 #if defined(TARGET_HAS_ICE)
1845 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1846 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1848 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1849 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1857 #if !defined(CONFIG_USER_ONLY)
1859 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1863 /* Discard jump cache entries for any tb which might potentially
1864 overlap the flushed page. */
1865 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1866 memset (&env
->tb_jmp_cache
[i
], 0,
1867 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1869 i
= tb_jmp_cache_hash_page(addr
);
1870 memset (&env
->tb_jmp_cache
[i
], 0,
1871 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1874 static CPUTLBEntry s_cputlb_empty_entry
= {
1881 /* NOTE: if flush_global is true, also flush global entries (not
1883 void tlb_flush(CPUState
*env
, int flush_global
)
1887 #if defined(DEBUG_TLB)
1888 printf("tlb_flush:\n");
1890 /* must reset current TB so that interrupts cannot modify the
1891 links while we are modifying them */
1892 env
->current_tb
= NULL
;
1894 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1896 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1897 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1901 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1903 env
->tlb_flush_addr
= -1;
1904 env
->tlb_flush_mask
= 0;
1908 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1910 if (addr
== (tlb_entry
->addr_read
&
1911 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1912 addr
== (tlb_entry
->addr_write
&
1913 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1914 addr
== (tlb_entry
->addr_code
&
1915 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1916 *tlb_entry
= s_cputlb_empty_entry
;
1920 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1925 #if defined(DEBUG_TLB)
1926 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1928 /* Check if we need to flush due to large pages. */
1929 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1930 #if defined(DEBUG_TLB)
1931 printf("tlb_flush_page: forced full flush ("
1932 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1933 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1938 /* must reset current TB so that interrupts cannot modify the
1939 links while we are modifying them */
1940 env
->current_tb
= NULL
;
1942 addr
&= TARGET_PAGE_MASK
;
1943 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1944 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1945 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1947 tlb_flush_jmp_cache(env
, addr
);
1950 /* update the TLBs so that writes to code in the virtual page 'addr'
1952 static void tlb_protect_code(ram_addr_t ram_addr
)
1954 cpu_physical_memory_reset_dirty(ram_addr
,
1955 ram_addr
+ TARGET_PAGE_SIZE
,
1959 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1960 tested for self modifying code */
1961 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1964 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
1967 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1968 unsigned long start
, unsigned long length
)
1971 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
1972 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1973 if ((addr
- start
) < length
) {
1974 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1979 /* Note: start and end must be within the same ram block. */
1980 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1984 unsigned long length
, start1
;
1987 start
&= TARGET_PAGE_MASK
;
1988 end
= TARGET_PAGE_ALIGN(end
);
1990 length
= end
- start
;
1993 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1995 /* we modify the TLB cache so that the dirty bit will be set again
1996 when accessing the range */
1997 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
1998 /* Check that we don't span multiple blocks - this breaks the
1999 address comparisons below. */
2000 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2001 != (end
- 1) - start
) {
2005 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2007 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2008 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2009 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2015 int cpu_physical_memory_set_dirty_tracking(int enable
)
2018 in_migration
= enable
;
2022 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2024 ram_addr_t ram_addr
;
2027 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
2028 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2029 + tlb_entry
->addend
);
2030 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2031 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2032 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2037 /* update the TLB according to the current state of the dirty bits */
2038 void cpu_tlb_update_dirty(CPUState
*env
)
2042 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2043 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2044 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2048 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2050 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2051 tlb_entry
->addr_write
= vaddr
;
2054 /* update the TLB corresponding to virtual page vaddr
2055 so that it is no longer dirty */
2056 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2061 vaddr
&= TARGET_PAGE_MASK
;
2062 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2063 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2064 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2067 /* Our TLB does not support large pages, so remember the area covered by
2068 large pages and trigger a full TLB flush if these are invalidated. */
2069 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2072 target_ulong mask
= ~(size
- 1);
2074 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2075 env
->tlb_flush_addr
= vaddr
& mask
;
2076 env
->tlb_flush_mask
= mask
;
2079 /* Extend the existing region to include the new page.
2080 This is a compromise between unnecessary flushes and the cost
2081 of maintaining a full variable size TLB. */
2082 mask
&= env
->tlb_flush_mask
;
2083 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2086 env
->tlb_flush_addr
&= mask
;
2087 env
->tlb_flush_mask
= mask
;
2090 static bool is_ram_rom(ram_addr_t pd
)
2092 pd
&= ~TARGET_PAGE_MASK
;
2093 return pd
== io_mem_ram
.ram_addr
|| pd
== io_mem_rom
.ram_addr
;
2096 static bool is_ram_rom_romd(ram_addr_t pd
)
2098 return is_ram_rom(pd
) || (pd
& IO_MEM_ROMD
);
2101 /* Add a new TLB entry. At most one entry for a given virtual address
2102 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2103 supplied size is only used by tlb_flush_page. */
2104 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2105 target_phys_addr_t paddr
, int prot
,
2106 int mmu_idx
, target_ulong size
)
2111 target_ulong address
;
2112 target_ulong code_address
;
2113 unsigned long addend
;
2116 target_phys_addr_t iotlb
;
2118 assert(size
>= TARGET_PAGE_SIZE
);
2119 if (size
!= TARGET_PAGE_SIZE
) {
2120 tlb_add_large_page(env
, vaddr
, size
);
2122 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2124 #if defined(DEBUG_TLB)
2125 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2126 " prot=%x idx=%d pd=0x%08lx\n",
2127 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2131 if (!is_ram_rom_romd(pd
)) {
2132 /* IO memory case (romd handled later) */
2133 address
|= TLB_MMIO
;
2135 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2136 if (is_ram_rom(pd
)) {
2138 iotlb
= pd
& TARGET_PAGE_MASK
;
2139 if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
)
2140 iotlb
|= io_mem_notdirty
.ram_addr
;
2142 iotlb
|= io_mem_rom
.ram_addr
;
2144 /* IO handlers are currently passed a physical address.
2145 It would be nice to pass an offset from the base address
2146 of that region. This would avoid having to special case RAM,
2147 and avoid full address decoding in every device.
2148 We can't use the high bits of pd for this because
2149 IO_MEM_ROMD uses these as a ram address. */
2150 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2151 iotlb
+= p
.region_offset
;
2154 code_address
= address
;
2155 /* Make accesses to pages with watchpoints go via the
2156 watchpoint trap routines. */
2157 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2158 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2159 /* Avoid trapping reads of pages with a write breakpoint. */
2160 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2161 iotlb
= io_mem_watch
.ram_addr
+ paddr
;
2162 address
|= TLB_MMIO
;
2168 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2169 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2170 te
= &env
->tlb_table
[mmu_idx
][index
];
2171 te
->addend
= addend
- vaddr
;
2172 if (prot
& PAGE_READ
) {
2173 te
->addr_read
= address
;
2178 if (prot
& PAGE_EXEC
) {
2179 te
->addr_code
= code_address
;
2183 if (prot
& PAGE_WRITE
) {
2184 if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_rom
.ram_addr
||
2185 (pd
& IO_MEM_ROMD
)) {
2186 /* Write access calls the I/O callback. */
2187 te
->addr_write
= address
| TLB_MMIO
;
2188 } else if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
&&
2189 !cpu_physical_memory_is_dirty(pd
)) {
2190 te
->addr_write
= address
| TLB_NOTDIRTY
;
2192 te
->addr_write
= address
;
2195 te
->addr_write
= -1;
2201 void tlb_flush(CPUState
*env
, int flush_global
)
2205 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2210 * Walks guest process memory "regions" one by one
2211 * and calls callback function 'fn' for each region.
2214 struct walk_memory_regions_data
2216 walk_memory_regions_fn fn
;
2218 unsigned long start
;
2222 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2223 abi_ulong end
, int new_prot
)
2225 if (data
->start
!= -1ul) {
2226 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2232 data
->start
= (new_prot
? end
: -1ul);
2233 data
->prot
= new_prot
;
2238 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2239 abi_ulong base
, int level
, void **lp
)
2245 return walk_memory_regions_end(data
, base
, 0);
2250 for (i
= 0; i
< L2_SIZE
; ++i
) {
2251 int prot
= pd
[i
].flags
;
2253 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2254 if (prot
!= data
->prot
) {
2255 rc
= walk_memory_regions_end(data
, pa
, prot
);
2263 for (i
= 0; i
< L2_SIZE
; ++i
) {
2264 pa
= base
| ((abi_ulong
)i
<<
2265 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2266 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2276 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2278 struct walk_memory_regions_data data
;
2286 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2287 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2288 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2294 return walk_memory_regions_end(&data
, 0, 0);
2297 static int dump_region(void *priv
, abi_ulong start
,
2298 abi_ulong end
, unsigned long prot
)
2300 FILE *f
= (FILE *)priv
;
2302 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2303 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2304 start
, end
, end
- start
,
2305 ((prot
& PAGE_READ
) ? 'r' : '-'),
2306 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2307 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2312 /* dump memory mappings */
2313 void page_dump(FILE *f
)
2315 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2316 "start", "end", "size", "prot");
2317 walk_memory_regions(f
, dump_region
);
2320 int page_get_flags(target_ulong address
)
2324 p
= page_find(address
>> TARGET_PAGE_BITS
);
2330 /* Modify the flags of a page and invalidate the code if necessary.
2331 The flag PAGE_WRITE_ORG is positioned automatically depending
2332 on PAGE_WRITE. The mmap_lock should already be held. */
2333 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2335 target_ulong addr
, len
;
2337 /* This function should never be called with addresses outside the
2338 guest address space. If this assert fires, it probably indicates
2339 a missing call to h2g_valid. */
2340 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2341 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2343 assert(start
< end
);
2345 start
= start
& TARGET_PAGE_MASK
;
2346 end
= TARGET_PAGE_ALIGN(end
);
2348 if (flags
& PAGE_WRITE
) {
2349 flags
|= PAGE_WRITE_ORG
;
2352 for (addr
= start
, len
= end
- start
;
2354 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2355 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2357 /* If the write protection bit is set, then we invalidate
2359 if (!(p
->flags
& PAGE_WRITE
) &&
2360 (flags
& PAGE_WRITE
) &&
2362 tb_invalidate_phys_page(addr
, 0, NULL
);
2368 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2374 /* This function should never be called with addresses outside the
2375 guest address space. If this assert fires, it probably indicates
2376 a missing call to h2g_valid. */
2377 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2378 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2384 if (start
+ len
- 1 < start
) {
2385 /* We've wrapped around. */
2389 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2390 start
= start
& TARGET_PAGE_MASK
;
2392 for (addr
= start
, len
= end
- start
;
2394 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2395 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2398 if( !(p
->flags
& PAGE_VALID
) )
2401 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2403 if (flags
& PAGE_WRITE
) {
2404 if (!(p
->flags
& PAGE_WRITE_ORG
))
2406 /* unprotect the page if it was put read-only because it
2407 contains translated code */
2408 if (!(p
->flags
& PAGE_WRITE
)) {
2409 if (!page_unprotect(addr
, 0, NULL
))
2418 /* called from signal handler: invalidate the code and unprotect the
2419 page. Return TRUE if the fault was successfully handled. */
2420 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2424 target_ulong host_start
, host_end
, addr
;
2426 /* Technically this isn't safe inside a signal handler. However we
2427 know this only ever happens in a synchronous SEGV handler, so in
2428 practice it seems to be ok. */
2431 p
= page_find(address
>> TARGET_PAGE_BITS
);
2437 /* if the page was really writable, then we change its
2438 protection back to writable */
2439 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2440 host_start
= address
& qemu_host_page_mask
;
2441 host_end
= host_start
+ qemu_host_page_size
;
2444 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2445 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2446 p
->flags
|= PAGE_WRITE
;
2449 /* and since the content will be modified, we must invalidate
2450 the corresponding translated code. */
2451 tb_invalidate_phys_page(addr
, pc
, puc
);
2452 #ifdef DEBUG_TB_CHECK
2453 tb_invalidate_check(addr
);
2456 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2466 static inline void tlb_set_dirty(CPUState
*env
,
2467 unsigned long addr
, target_ulong vaddr
)
2470 #endif /* defined(CONFIG_USER_ONLY) */
2472 #if !defined(CONFIG_USER_ONLY)
2474 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2475 typedef struct subpage_t
{
2477 target_phys_addr_t base
;
2478 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2479 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2482 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2483 ram_addr_t memory
, ram_addr_t region_offset
);
2484 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2485 ram_addr_t orig_memory
,
2486 ram_addr_t region_offset
);
2487 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2490 if (addr > start_addr) \
2493 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2494 if (start_addr2 > 0) \
2498 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2499 end_addr2 = TARGET_PAGE_SIZE - 1; \
2501 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2502 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2507 /* register physical memory.
2508 For RAM, 'size' must be a multiple of the target page size.
2509 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2510 io memory page. The address used when calling the IO function is
2511 the offset from the start of the region, plus region_offset. Both
2512 start_addr and region_offset are rounded down to a page boundary
2513 before calculating this offset. This should not be a problem unless
2514 the low bits of start_addr and region_offset differ. */
2515 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2516 bool readable
, bool readonly
)
2518 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2519 ram_addr_t size
= section
->size
;
2520 ram_addr_t phys_offset
= section
->mr
->ram_addr
;
2521 ram_addr_t region_offset
= section
->offset_within_region
;
2522 target_phys_addr_t addr
, end_addr
;
2525 ram_addr_t orig_size
= size
;
2528 if (memory_region_is_ram(section
->mr
)) {
2529 phys_offset
+= region_offset
;
2534 phys_offset
&= ~TARGET_PAGE_MASK
& ~IO_MEM_ROMD
;
2538 phys_offset
|= io_mem_rom
.ram_addr
;
2543 if (phys_offset
== io_mem_unassigned
.ram_addr
) {
2544 region_offset
= start_addr
;
2546 region_offset
&= TARGET_PAGE_MASK
;
2547 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2548 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2552 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 0);
2553 if (p
&& p
->phys_offset
!= io_mem_unassigned
.ram_addr
) {
2554 ram_addr_t orig_memory
= p
->phys_offset
;
2555 target_phys_addr_t start_addr2
, end_addr2
;
2556 int need_subpage
= 0;
2558 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2561 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2562 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2563 &p
->phys_offset
, orig_memory
,
2566 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2569 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2571 p
->region_offset
= 0;
2573 p
->phys_offset
= phys_offset
;
2574 p
->region_offset
= region_offset
;
2575 if (is_ram_rom_romd(phys_offset
))
2576 phys_offset
+= TARGET_PAGE_SIZE
;
2579 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2580 p
->phys_offset
= phys_offset
;
2581 p
->region_offset
= region_offset
;
2582 if (is_ram_rom_romd(phys_offset
)) {
2583 phys_offset
+= TARGET_PAGE_SIZE
;
2585 target_phys_addr_t start_addr2
, end_addr2
;
2586 int need_subpage
= 0;
2588 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2589 end_addr2
, need_subpage
);
2592 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2594 io_mem_unassigned
.ram_addr
,
2595 addr
& TARGET_PAGE_MASK
);
2596 subpage_register(subpage
, start_addr2
, end_addr2
,
2597 phys_offset
, region_offset
);
2598 p
->region_offset
= 0;
2602 region_offset
+= TARGET_PAGE_SIZE
;
2603 addr
+= TARGET_PAGE_SIZE
;
2604 } while (addr
!= end_addr
);
2606 /* since each CPU stores ram addresses in its TLB cache, we must
2607 reset the modified entries */
2609 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2614 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2617 kvm_coalesce_mmio_region(addr
, size
);
2620 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2623 kvm_uncoalesce_mmio_region(addr
, size
);
2626 void qemu_flush_coalesced_mmio_buffer(void)
2629 kvm_flush_coalesced_mmio_buffer();
2632 #if defined(__linux__) && !defined(TARGET_S390X)
2634 #include <sys/vfs.h>
2636 #define HUGETLBFS_MAGIC 0x958458f6
2638 static long gethugepagesize(const char *path
)
2644 ret
= statfs(path
, &fs
);
2645 } while (ret
!= 0 && errno
== EINTR
);
2652 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2653 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2658 static void *file_ram_alloc(RAMBlock
*block
,
2668 unsigned long hpagesize
;
2670 hpagesize
= gethugepagesize(path
);
2675 if (memory
< hpagesize
) {
2679 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2680 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2684 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2688 fd
= mkstemp(filename
);
2690 perror("unable to create backing store for hugepages");
2697 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2700 * ftruncate is not supported by hugetlbfs in older
2701 * hosts, so don't bother bailing out on errors.
2702 * If anything goes wrong with it under other filesystems,
2705 if (ftruncate(fd
, memory
))
2706 perror("ftruncate");
2709 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2710 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2711 * to sidestep this quirk.
2713 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2714 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2716 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2718 if (area
== MAP_FAILED
) {
2719 perror("file_ram_alloc: can't mmap RAM pages");
2728 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2730 RAMBlock
*block
, *next_block
;
2731 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2733 if (QLIST_EMPTY(&ram_list
.blocks
))
2736 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2737 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2739 end
= block
->offset
+ block
->length
;
2741 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2742 if (next_block
->offset
>= end
) {
2743 next
= MIN(next
, next_block
->offset
);
2746 if (next
- end
>= size
&& next
- end
< mingap
) {
2748 mingap
= next
- end
;
2752 if (offset
== RAM_ADDR_MAX
) {
2753 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2761 static ram_addr_t
last_ram_offset(void)
2764 ram_addr_t last
= 0;
2766 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2767 last
= MAX(last
, block
->offset
+ block
->length
);
2772 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2774 RAMBlock
*new_block
, *block
;
2777 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2778 if (block
->offset
== addr
) {
2784 assert(!new_block
->idstr
[0]);
2786 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2787 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2789 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2793 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2795 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2796 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2797 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2804 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2807 RAMBlock
*new_block
;
2809 size
= TARGET_PAGE_ALIGN(size
);
2810 new_block
= g_malloc0(sizeof(*new_block
));
2813 new_block
->offset
= find_ram_offset(size
);
2815 new_block
->host
= host
;
2816 new_block
->flags
|= RAM_PREALLOC_MASK
;
2819 #if defined (__linux__) && !defined(TARGET_S390X)
2820 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2821 if (!new_block
->host
) {
2822 new_block
->host
= qemu_vmalloc(size
);
2823 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2826 fprintf(stderr
, "-mem-path option unsupported\n");
2830 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2831 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2832 an system defined value, which is at least 256GB. Larger systems
2833 have larger values. We put the guest between the end of data
2834 segment (system break) and this value. We use 32GB as a base to
2835 have enough room for the system break to grow. */
2836 new_block
->host
= mmap((void*)0x800000000, size
,
2837 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2838 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2839 if (new_block
->host
== MAP_FAILED
) {
2840 fprintf(stderr
, "Allocating RAM failed\n");
2844 if (xen_enabled()) {
2845 xen_ram_alloc(new_block
->offset
, size
, mr
);
2847 new_block
->host
= qemu_vmalloc(size
);
2850 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2853 new_block
->length
= size
;
2855 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2857 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2858 last_ram_offset() >> TARGET_PAGE_BITS
);
2859 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2860 0xff, size
>> TARGET_PAGE_BITS
);
2863 kvm_setup_guest_memory(new_block
->host
, size
);
2865 return new_block
->offset
;
2868 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2870 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2873 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2877 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2878 if (addr
== block
->offset
) {
2879 QLIST_REMOVE(block
, next
);
2886 void qemu_ram_free(ram_addr_t addr
)
2890 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2891 if (addr
== block
->offset
) {
2892 QLIST_REMOVE(block
, next
);
2893 if (block
->flags
& RAM_PREALLOC_MASK
) {
2895 } else if (mem_path
) {
2896 #if defined (__linux__) && !defined(TARGET_S390X)
2898 munmap(block
->host
, block
->length
);
2901 qemu_vfree(block
->host
);
2907 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2908 munmap(block
->host
, block
->length
);
2910 if (xen_enabled()) {
2911 xen_invalidate_map_cache_entry(block
->host
);
2913 qemu_vfree(block
->host
);
2925 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2932 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2933 offset
= addr
- block
->offset
;
2934 if (offset
< block
->length
) {
2935 vaddr
= block
->host
+ offset
;
2936 if (block
->flags
& RAM_PREALLOC_MASK
) {
2940 munmap(vaddr
, length
);
2942 #if defined(__linux__) && !defined(TARGET_S390X)
2945 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2948 flags
|= MAP_PRIVATE
;
2950 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2951 flags
, block
->fd
, offset
);
2953 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2954 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2961 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2962 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2963 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2966 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2967 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2971 if (area
!= vaddr
) {
2972 fprintf(stderr
, "Could not remap addr: "
2973 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2977 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
2983 #endif /* !_WIN32 */
2985 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2986 With the exception of the softmmu code in this file, this should
2987 only be used for local memory (e.g. video ram) that the device owns,
2988 and knows it isn't going to access beyond the end of the block.
2990 It should not be used for general purpose DMA.
2991 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2993 void *qemu_get_ram_ptr(ram_addr_t addr
)
2997 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2998 if (addr
- block
->offset
< block
->length
) {
2999 /* Move this entry to to start of the list. */
3000 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3001 QLIST_REMOVE(block
, next
);
3002 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3004 if (xen_enabled()) {
3005 /* We need to check if the requested address is in the RAM
3006 * because we don't want to map the entire memory in QEMU.
3007 * In that case just map until the end of the page.
3009 if (block
->offset
== 0) {
3010 return xen_map_cache(addr
, 0, 0);
3011 } else if (block
->host
== NULL
) {
3013 xen_map_cache(block
->offset
, block
->length
, 1);
3016 return block
->host
+ (addr
- block
->offset
);
3020 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3026 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3027 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3029 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3033 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3034 if (addr
- block
->offset
< block
->length
) {
3035 if (xen_enabled()) {
3036 /* We need to check if the requested address is in the RAM
3037 * because we don't want to map the entire memory in QEMU.
3038 * In that case just map until the end of the page.
3040 if (block
->offset
== 0) {
3041 return xen_map_cache(addr
, 0, 0);
3042 } else if (block
->host
== NULL
) {
3044 xen_map_cache(block
->offset
, block
->length
, 1);
3047 return block
->host
+ (addr
- block
->offset
);
3051 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3057 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3058 * but takes a size argument */
3059 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3064 if (xen_enabled()) {
3065 return xen_map_cache(addr
, *size
, 1);
3069 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3070 if (addr
- block
->offset
< block
->length
) {
3071 if (addr
- block
->offset
+ *size
> block
->length
)
3072 *size
= block
->length
- addr
+ block
->offset
;
3073 return block
->host
+ (addr
- block
->offset
);
3077 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3082 void qemu_put_ram_ptr(void *addr
)
3084 trace_qemu_put_ram_ptr(addr
);
3087 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3090 uint8_t *host
= ptr
;
3092 if (xen_enabled()) {
3093 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3097 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3098 /* This case append when the block is not mapped. */
3099 if (block
->host
== NULL
) {
3102 if (host
- block
->host
< block
->length
) {
3103 *ram_addr
= block
->offset
+ (host
- block
->host
);
3111 /* Some of the softmmu routines need to translate from a host pointer
3112 (typically a TLB entry) back to a ram offset. */
3113 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3115 ram_addr_t ram_addr
;
3117 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3118 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3124 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
3127 #ifdef DEBUG_UNASSIGNED
3128 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3130 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3131 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
3136 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
3137 uint64_t val
, unsigned size
)
3139 #ifdef DEBUG_UNASSIGNED
3140 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
3142 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3143 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
3147 static const MemoryRegionOps unassigned_mem_ops
= {
3148 .read
= unassigned_mem_read
,
3149 .write
= unassigned_mem_write
,
3150 .endianness
= DEVICE_NATIVE_ENDIAN
,
3153 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
3159 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
3160 uint64_t value
, unsigned size
)
3165 static const MemoryRegionOps error_mem_ops
= {
3166 .read
= error_mem_read
,
3167 .write
= error_mem_write
,
3168 .endianness
= DEVICE_NATIVE_ENDIAN
,
3171 static const MemoryRegionOps rom_mem_ops
= {
3172 .read
= error_mem_read
,
3173 .write
= unassigned_mem_write
,
3174 .endianness
= DEVICE_NATIVE_ENDIAN
,
3177 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
3178 uint64_t val
, unsigned size
)
3181 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3182 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3183 #if !defined(CONFIG_USER_ONLY)
3184 tb_invalidate_phys_page_fast(ram_addr
, size
);
3185 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3190 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3193 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3196 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3201 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3202 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3203 /* we remove the notdirty callback only if the code has been
3205 if (dirty_flags
== 0xff)
3206 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3209 static const MemoryRegionOps notdirty_mem_ops
= {
3210 .read
= error_mem_read
,
3211 .write
= notdirty_mem_write
,
3212 .endianness
= DEVICE_NATIVE_ENDIAN
,
3215 /* Generate a debug exception if a watchpoint has been hit. */
3216 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3218 CPUState
*env
= cpu_single_env
;
3219 target_ulong pc
, cs_base
;
3220 TranslationBlock
*tb
;
3225 if (env
->watchpoint_hit
) {
3226 /* We re-entered the check after replacing the TB. Now raise
3227 * the debug interrupt so that is will trigger after the
3228 * current instruction. */
3229 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3232 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3233 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3234 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3235 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3236 wp
->flags
|= BP_WATCHPOINT_HIT
;
3237 if (!env
->watchpoint_hit
) {
3238 env
->watchpoint_hit
= wp
;
3239 tb
= tb_find_pc(env
->mem_io_pc
);
3241 cpu_abort(env
, "check_watchpoint: could not find TB for "
3242 "pc=%p", (void *)env
->mem_io_pc
);
3244 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3245 tb_phys_invalidate(tb
, -1);
3246 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3247 env
->exception_index
= EXCP_DEBUG
;
3249 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3250 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3252 cpu_resume_from_signal(env
, NULL
);
3255 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3260 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3261 so these check for a hit then pass through to the normal out-of-line
3263 static uint64_t watch_mem_read(void *opaque
, target_phys_addr_t addr
,
3266 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
3268 case 1: return ldub_phys(addr
);
3269 case 2: return lduw_phys(addr
);
3270 case 4: return ldl_phys(addr
);
3275 static void watch_mem_write(void *opaque
, target_phys_addr_t addr
,
3276 uint64_t val
, unsigned size
)
3278 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
3280 case 1: stb_phys(addr
, val
);
3281 case 2: stw_phys(addr
, val
);
3282 case 4: stl_phys(addr
, val
);
3287 static const MemoryRegionOps watch_mem_ops
= {
3288 .read
= watch_mem_read
,
3289 .write
= watch_mem_write
,
3290 .endianness
= DEVICE_NATIVE_ENDIAN
,
3293 static uint64_t subpage_read(void *opaque
, target_phys_addr_t addr
,
3296 subpage_t
*mmio
= opaque
;
3297 unsigned int idx
= SUBPAGE_IDX(addr
);
3298 #if defined(DEBUG_SUBPAGE)
3299 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3300 mmio
, len
, addr
, idx
);
3303 addr
+= mmio
->region_offset
[idx
];
3304 idx
= mmio
->sub_io_index
[idx
];
3305 return io_mem_read(idx
, addr
, len
);
3308 static void subpage_write(void *opaque
, target_phys_addr_t addr
,
3309 uint64_t value
, unsigned len
)
3311 subpage_t
*mmio
= opaque
;
3312 unsigned int idx
= SUBPAGE_IDX(addr
);
3313 #if defined(DEBUG_SUBPAGE)
3314 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3315 " idx %d value %"PRIx64
"\n",
3316 __func__
, mmio
, len
, addr
, idx
, value
);
3319 addr
+= mmio
->region_offset
[idx
];
3320 idx
= mmio
->sub_io_index
[idx
];
3321 io_mem_write(idx
, addr
, value
, len
);
3324 static const MemoryRegionOps subpage_ops
= {
3325 .read
= subpage_read
,
3326 .write
= subpage_write
,
3327 .endianness
= DEVICE_NATIVE_ENDIAN
,
3330 static uint64_t subpage_ram_read(void *opaque
, target_phys_addr_t addr
,
3333 ram_addr_t raddr
= addr
;
3334 void *ptr
= qemu_get_ram_ptr(raddr
);
3336 case 1: return ldub_p(ptr
);
3337 case 2: return lduw_p(ptr
);
3338 case 4: return ldl_p(ptr
);
3343 static void subpage_ram_write(void *opaque
, target_phys_addr_t addr
,
3344 uint64_t value
, unsigned size
)
3346 ram_addr_t raddr
= addr
;
3347 void *ptr
= qemu_get_ram_ptr(raddr
);
3349 case 1: return stb_p(ptr
, value
);
3350 case 2: return stw_p(ptr
, value
);
3351 case 4: return stl_p(ptr
, value
);
3356 static const MemoryRegionOps subpage_ram_ops
= {
3357 .read
= subpage_ram_read
,
3358 .write
= subpage_ram_write
,
3359 .endianness
= DEVICE_NATIVE_ENDIAN
,
3362 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3363 ram_addr_t memory
, ram_addr_t region_offset
)
3367 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3369 idx
= SUBPAGE_IDX(start
);
3370 eidx
= SUBPAGE_IDX(end
);
3371 #if defined(DEBUG_SUBPAGE)
3372 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3373 mmio
, start
, end
, idx
, eidx
, memory
);
3375 if ((memory
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
3376 memory
= io_mem_subpage_ram
.ram_addr
;
3378 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3379 for (; idx
<= eidx
; idx
++) {
3380 mmio
->sub_io_index
[idx
] = memory
;
3381 mmio
->region_offset
[idx
] = region_offset
;
3387 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3388 ram_addr_t orig_memory
,
3389 ram_addr_t region_offset
)
3394 mmio
= g_malloc0(sizeof(subpage_t
));
3397 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3398 "subpage", TARGET_PAGE_SIZE
);
3399 subpage_memory
= mmio
->iomem
.ram_addr
;
3400 #if defined(DEBUG_SUBPAGE)
3401 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3402 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3404 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3405 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3410 static int get_free_io_mem_idx(void)
3414 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3415 if (!io_mem_used
[i
]) {
3419 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3423 /* mem_read and mem_write are arrays of functions containing the
3424 function to access byte (index 0), word (index 1) and dword (index
3425 2). Functions can be omitted with a NULL function pointer.
3426 If io_index is non zero, the corresponding io zone is
3427 modified. If it is zero, a new io zone is allocated. The return
3428 value can be used with cpu_register_physical_memory(). (-1) is
3429 returned if error. */
3430 static int cpu_register_io_memory_fixed(int io_index
,
3431 CPUReadMemoryFunc
* const *mem_read
,
3432 CPUWriteMemoryFunc
* const *mem_write
,
3437 if (io_index
<= 0) {
3438 io_index
= get_free_io_mem_idx();
3442 io_index
>>= IO_MEM_SHIFT
;
3443 if (io_index
>= IO_MEM_NB_ENTRIES
)
3447 for (i
= 0; i
< 3; ++i
) {
3448 assert(mem_read
[i
]);
3449 _io_mem_read
[io_index
][i
] = mem_read
[i
];
3451 for (i
= 0; i
< 3; ++i
) {
3452 assert(mem_write
[i
]);
3453 _io_mem_write
[io_index
][i
] = mem_write
[i
];
3455 io_mem_opaque
[io_index
] = opaque
;
3457 return (io_index
<< IO_MEM_SHIFT
);
3460 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3461 CPUWriteMemoryFunc
* const *mem_write
,
3464 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3467 void cpu_unregister_io_memory(int io_table_address
)
3470 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3472 for (i
=0;i
< 3; i
++) {
3473 _io_mem_read
[io_index
][i
] = NULL
;
3474 _io_mem_write
[io_index
][i
] = NULL
;
3476 io_mem_opaque
[io_index
] = NULL
;
3477 io_mem_used
[io_index
] = 0;
3480 static void io_mem_init(void)
3484 /* Must be first: */
3485 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3486 assert(io_mem_ram
.ram_addr
== 0);
3487 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3488 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3489 "unassigned", UINT64_MAX
);
3490 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3491 "notdirty", UINT64_MAX
);
3492 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3493 "subpage-ram", UINT64_MAX
);
3497 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3498 "watch", UINT64_MAX
);
3501 static void memory_map_init(void)
3503 system_memory
= g_malloc(sizeof(*system_memory
));
3504 memory_region_init(system_memory
, "system", INT64_MAX
);
3505 set_system_memory_map(system_memory
);
3507 system_io
= g_malloc(sizeof(*system_io
));
3508 memory_region_init(system_io
, "io", 65536);
3509 set_system_io_map(system_io
);
3512 MemoryRegion
*get_system_memory(void)
3514 return system_memory
;
3517 MemoryRegion
*get_system_io(void)
3522 #endif /* !defined(CONFIG_USER_ONLY) */
3524 /* physical memory access (slow version, mainly for debug) */
3525 #if defined(CONFIG_USER_ONLY)
3526 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3527 uint8_t *buf
, int len
, int is_write
)
3534 page
= addr
& TARGET_PAGE_MASK
;
3535 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3538 flags
= page_get_flags(page
);
3539 if (!(flags
& PAGE_VALID
))
3542 if (!(flags
& PAGE_WRITE
))
3544 /* XXX: this code should not depend on lock_user */
3545 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3548 unlock_user(p
, addr
, l
);
3550 if (!(flags
& PAGE_READ
))
3552 /* XXX: this code should not depend on lock_user */
3553 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3556 unlock_user(p
, addr
, 0);
3566 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3567 int len
, int is_write
)
3572 target_phys_addr_t page
;
3577 page
= addr
& TARGET_PAGE_MASK
;
3578 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3581 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3585 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
3586 target_phys_addr_t addr1
;
3587 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3588 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3589 /* XXX: could force cpu_single_env to NULL to avoid
3591 if (l
>= 4 && ((addr1
& 3) == 0)) {
3592 /* 32 bit write access */
3594 io_mem_write(io_index
, addr1
, val
, 4);
3596 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3597 /* 16 bit write access */
3599 io_mem_write(io_index
, addr1
, val
, 2);
3602 /* 8 bit write access */
3604 io_mem_write(io_index
, addr1
, val
, 1);
3609 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3611 ptr
= qemu_get_ram_ptr(addr1
);
3612 memcpy(ptr
, buf
, l
);
3613 if (!cpu_physical_memory_is_dirty(addr1
)) {
3614 /* invalidate code */
3615 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3617 cpu_physical_memory_set_dirty_flags(
3618 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3620 qemu_put_ram_ptr(ptr
);
3623 if (!is_ram_rom_romd(pd
)) {
3624 target_phys_addr_t addr1
;
3626 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3627 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3628 if (l
>= 4 && ((addr1
& 3) == 0)) {
3629 /* 32 bit read access */
3630 val
= io_mem_read(io_index
, addr1
, 4);
3633 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3634 /* 16 bit read access */
3635 val
= io_mem_read(io_index
, addr1
, 2);
3639 /* 8 bit read access */
3640 val
= io_mem_read(io_index
, addr1
, 1);
3646 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
3647 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3648 qemu_put_ram_ptr(ptr
);
3657 /* used for ROM loading : can write in RAM and ROM */
3658 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3659 const uint8_t *buf
, int len
)
3663 target_phys_addr_t page
;
3668 page
= addr
& TARGET_PAGE_MASK
;
3669 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3672 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3675 if (!is_ram_rom_romd(pd
)) {
3678 unsigned long addr1
;
3679 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3681 ptr
= qemu_get_ram_ptr(addr1
);
3682 memcpy(ptr
, buf
, l
);
3683 qemu_put_ram_ptr(ptr
);
3693 target_phys_addr_t addr
;
3694 target_phys_addr_t len
;
3697 static BounceBuffer bounce
;
3699 typedef struct MapClient
{
3701 void (*callback
)(void *opaque
);
3702 QLIST_ENTRY(MapClient
) link
;
3705 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3706 = QLIST_HEAD_INITIALIZER(map_client_list
);
3708 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3710 MapClient
*client
= g_malloc(sizeof(*client
));
3712 client
->opaque
= opaque
;
3713 client
->callback
= callback
;
3714 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3718 void cpu_unregister_map_client(void *_client
)
3720 MapClient
*client
= (MapClient
*)_client
;
3722 QLIST_REMOVE(client
, link
);
3726 static void cpu_notify_map_clients(void)
3730 while (!QLIST_EMPTY(&map_client_list
)) {
3731 client
= QLIST_FIRST(&map_client_list
);
3732 client
->callback(client
->opaque
);
3733 cpu_unregister_map_client(client
);
3737 /* Map a physical memory region into a host virtual address.
3738 * May map a subset of the requested range, given by and returned in *plen.
3739 * May return NULL if resources needed to perform the mapping are exhausted.
3740 * Use only for reads OR writes - not for read-modify-write operations.
3741 * Use cpu_register_map_client() to know when retrying the map operation is
3742 * likely to succeed.
3744 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3745 target_phys_addr_t
*plen
,
3748 target_phys_addr_t len
= *plen
;
3749 target_phys_addr_t todo
= 0;
3751 target_phys_addr_t page
;
3754 ram_addr_t raddr
= RAM_ADDR_MAX
;
3759 page
= addr
& TARGET_PAGE_MASK
;
3760 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3763 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3766 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
3767 if (todo
|| bounce
.buffer
) {
3770 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3774 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3778 return bounce
.buffer
;
3781 raddr
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3789 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3794 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3795 * Will also mark the memory as dirty if is_write == 1. access_len gives
3796 * the amount of memory that was actually read or written by the caller.
3798 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3799 int is_write
, target_phys_addr_t access_len
)
3801 if (buffer
!= bounce
.buffer
) {
3803 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3804 while (access_len
) {
3806 l
= TARGET_PAGE_SIZE
;
3809 if (!cpu_physical_memory_is_dirty(addr1
)) {
3810 /* invalidate code */
3811 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3813 cpu_physical_memory_set_dirty_flags(
3814 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3820 if (xen_enabled()) {
3821 xen_invalidate_map_cache_entry(buffer
);
3826 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3828 qemu_vfree(bounce
.buffer
);
3829 bounce
.buffer
= NULL
;
3830 cpu_notify_map_clients();
3833 /* warning: addr must be aligned */
3834 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
3835 enum device_endian endian
)
3843 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3846 if (!is_ram_rom_romd(pd
)) {
3848 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3849 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3850 val
= io_mem_read(io_index
, addr
, 4);
3851 #if defined(TARGET_WORDS_BIGENDIAN)
3852 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3856 if (endian
== DEVICE_BIG_ENDIAN
) {
3862 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3863 (addr
& ~TARGET_PAGE_MASK
);
3865 case DEVICE_LITTLE_ENDIAN
:
3866 val
= ldl_le_p(ptr
);
3868 case DEVICE_BIG_ENDIAN
:
3869 val
= ldl_be_p(ptr
);
3879 uint32_t ldl_phys(target_phys_addr_t addr
)
3881 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3884 uint32_t ldl_le_phys(target_phys_addr_t addr
)
3886 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3889 uint32_t ldl_be_phys(target_phys_addr_t addr
)
3891 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3894 /* warning: addr must be aligned */
3895 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
3896 enum device_endian endian
)
3904 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3907 if (!is_ram_rom_romd(pd
)) {
3909 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3910 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3912 /* XXX This is broken when device endian != cpu endian.
3913 Fix and add "endian" variable check */
3914 #ifdef TARGET_WORDS_BIGENDIAN
3915 val
= io_mem_read(io_index
, addr
, 4) << 32;
3916 val
|= io_mem_read(io_index
, addr
+ 4, 4);
3918 val
= io_mem_read(io_index
, addr
, 4);
3919 val
|= io_mem_read(io_index
, addr
+ 4, 4) << 32;
3923 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3924 (addr
& ~TARGET_PAGE_MASK
);
3926 case DEVICE_LITTLE_ENDIAN
:
3927 val
= ldq_le_p(ptr
);
3929 case DEVICE_BIG_ENDIAN
:
3930 val
= ldq_be_p(ptr
);
3940 uint64_t ldq_phys(target_phys_addr_t addr
)
3942 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3945 uint64_t ldq_le_phys(target_phys_addr_t addr
)
3947 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3950 uint64_t ldq_be_phys(target_phys_addr_t addr
)
3952 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3956 uint32_t ldub_phys(target_phys_addr_t addr
)
3959 cpu_physical_memory_read(addr
, &val
, 1);
3963 /* warning: addr must be aligned */
3964 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
3965 enum device_endian endian
)
3973 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3976 if (!is_ram_rom_romd(pd
)) {
3978 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3979 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3980 val
= io_mem_read(io_index
, addr
, 2);
3981 #if defined(TARGET_WORDS_BIGENDIAN)
3982 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3986 if (endian
== DEVICE_BIG_ENDIAN
) {
3992 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3993 (addr
& ~TARGET_PAGE_MASK
);
3995 case DEVICE_LITTLE_ENDIAN
:
3996 val
= lduw_le_p(ptr
);
3998 case DEVICE_BIG_ENDIAN
:
3999 val
= lduw_be_p(ptr
);
4009 uint32_t lduw_phys(target_phys_addr_t addr
)
4011 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4014 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4016 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4019 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4021 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4024 /* warning: addr must be aligned. The ram page is not masked as dirty
4025 and the code inside is not invalidated. It is useful if the dirty
4026 bits are used to track modified PTEs */
4027 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4034 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4037 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4038 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4039 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4040 io_mem_write(io_index
, addr
, val
, 4);
4042 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4043 ptr
= qemu_get_ram_ptr(addr1
);
4046 if (unlikely(in_migration
)) {
4047 if (!cpu_physical_memory_is_dirty(addr1
)) {
4048 /* invalidate code */
4049 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4051 cpu_physical_memory_set_dirty_flags(
4052 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4058 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4065 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4068 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4069 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4070 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4071 #ifdef TARGET_WORDS_BIGENDIAN
4072 io_mem_write(io_index
, addr
, val
>> 32, 4);
4073 io_mem_write(io_index
, addr
+ 4, (uint32_t)val
, 4);
4075 io_mem_write(io_index
, addr
, (uint32_t)val
, 4);
4076 io_mem_write(io_index
, addr
+ 4, val
>> 32, 4);
4079 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4080 (addr
& ~TARGET_PAGE_MASK
);
4085 /* warning: addr must be aligned */
4086 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4087 enum device_endian endian
)
4094 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4097 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4098 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4099 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4100 #if defined(TARGET_WORDS_BIGENDIAN)
4101 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4105 if (endian
== DEVICE_BIG_ENDIAN
) {
4109 io_mem_write(io_index
, addr
, val
, 4);
4111 unsigned long addr1
;
4112 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4114 ptr
= qemu_get_ram_ptr(addr1
);
4116 case DEVICE_LITTLE_ENDIAN
:
4119 case DEVICE_BIG_ENDIAN
:
4126 if (!cpu_physical_memory_is_dirty(addr1
)) {
4127 /* invalidate code */
4128 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4130 cpu_physical_memory_set_dirty_flags(addr1
,
4131 (0xff & ~CODE_DIRTY_FLAG
));
4136 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4138 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4141 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4143 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4146 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4148 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4152 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4155 cpu_physical_memory_write(addr
, &v
, 1);
4158 /* warning: addr must be aligned */
4159 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4160 enum device_endian endian
)
4167 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4170 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4171 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4172 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4173 #if defined(TARGET_WORDS_BIGENDIAN)
4174 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4178 if (endian
== DEVICE_BIG_ENDIAN
) {
4182 io_mem_write(io_index
, addr
, val
, 2);
4184 unsigned long addr1
;
4185 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4187 ptr
= qemu_get_ram_ptr(addr1
);
4189 case DEVICE_LITTLE_ENDIAN
:
4192 case DEVICE_BIG_ENDIAN
:
4199 if (!cpu_physical_memory_is_dirty(addr1
)) {
4200 /* invalidate code */
4201 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4203 cpu_physical_memory_set_dirty_flags(addr1
,
4204 (0xff & ~CODE_DIRTY_FLAG
));
4209 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4211 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4214 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4216 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4219 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4221 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4225 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4228 cpu_physical_memory_write(addr
, &val
, 8);
4231 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4233 val
= cpu_to_le64(val
);
4234 cpu_physical_memory_write(addr
, &val
, 8);
4237 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4239 val
= cpu_to_be64(val
);
4240 cpu_physical_memory_write(addr
, &val
, 8);
4243 /* virtual memory access for debug (includes writing to ROM) */
4244 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4245 uint8_t *buf
, int len
, int is_write
)
4248 target_phys_addr_t phys_addr
;
4252 page
= addr
& TARGET_PAGE_MASK
;
4253 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4254 /* if no physical page mapped, return an error */
4255 if (phys_addr
== -1)
4257 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4260 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4262 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4264 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4273 /* in deterministic execution mode, instructions doing device I/Os
4274 must be at the end of the TB */
4275 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4277 TranslationBlock
*tb
;
4279 target_ulong pc
, cs_base
;
4282 tb
= tb_find_pc((unsigned long)retaddr
);
4284 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4287 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4288 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4289 /* Calculate how many instructions had been executed before the fault
4291 n
= n
- env
->icount_decr
.u16
.low
;
4292 /* Generate a new TB ending on the I/O insn. */
4294 /* On MIPS and SH, delay slot instructions can only be restarted if
4295 they were already the first instruction in the TB. If this is not
4296 the first instruction in a TB then re-execute the preceding
4298 #if defined(TARGET_MIPS)
4299 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4300 env
->active_tc
.PC
-= 4;
4301 env
->icount_decr
.u16
.low
++;
4302 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4304 #elif defined(TARGET_SH4)
4305 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4308 env
->icount_decr
.u16
.low
++;
4309 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4312 /* This should never happen. */
4313 if (n
> CF_COUNT_MASK
)
4314 cpu_abort(env
, "TB too big during recompile");
4316 cflags
= n
| CF_LAST_IO
;
4318 cs_base
= tb
->cs_base
;
4320 tb_phys_invalidate(tb
, -1);
4321 /* FIXME: In theory this could raise an exception. In practice
4322 we have already translated the block once so it's probably ok. */
4323 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4324 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4325 the first in the TB) then we end up generating a whole new TB and
4326 repeating the fault, which is horribly inefficient.
4327 Better would be to execute just this insn uncached, or generate a
4329 cpu_resume_from_signal(env
, NULL
);
4332 #if !defined(CONFIG_USER_ONLY)
4334 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4336 int i
, target_code_size
, max_target_code_size
;
4337 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4338 TranslationBlock
*tb
;
4340 target_code_size
= 0;
4341 max_target_code_size
= 0;
4343 direct_jmp_count
= 0;
4344 direct_jmp2_count
= 0;
4345 for(i
= 0; i
< nb_tbs
; i
++) {
4347 target_code_size
+= tb
->size
;
4348 if (tb
->size
> max_target_code_size
)
4349 max_target_code_size
= tb
->size
;
4350 if (tb
->page_addr
[1] != -1)
4352 if (tb
->tb_next_offset
[0] != 0xffff) {
4354 if (tb
->tb_next_offset
[1] != 0xffff) {
4355 direct_jmp2_count
++;
4359 /* XXX: avoid using doubles ? */
4360 cpu_fprintf(f
, "Translation buffer state:\n");
4361 cpu_fprintf(f
, "gen code size %td/%ld\n",
4362 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4363 cpu_fprintf(f
, "TB count %d/%d\n",
4364 nb_tbs
, code_gen_max_blocks
);
4365 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4366 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4367 max_target_code_size
);
4368 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4369 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4370 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4371 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4373 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4374 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4376 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4378 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4379 cpu_fprintf(f
, "\nStatistics:\n");
4380 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4381 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4382 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4383 tcg_dump_info(f
, cpu_fprintf
);
4386 /* NOTE: this function can trigger an exception */
4387 /* NOTE2: the returned address is not exactly the physical address: it
4388 is the offset relative to phys_ram_base */
4389 tb_page_addr_t
get_page_addr_code(CPUState
*env1
, target_ulong addr
)
4391 int mmu_idx
, page_index
, pd
;
4394 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
4395 mmu_idx
= cpu_mmu_index(env1
);
4396 if (unlikely(env1
->tlb_table
[mmu_idx
][page_index
].addr_code
!=
4397 (addr
& TARGET_PAGE_MASK
))) {
4400 pd
= env1
->tlb_table
[mmu_idx
][page_index
].addr_code
& ~TARGET_PAGE_MASK
;
4401 if (pd
!= io_mem_ram
.ram_addr
&& pd
!= io_mem_rom
.ram_addr
4402 && !(pd
& IO_MEM_ROMD
)) {
4403 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4404 cpu_unassigned_access(env1
, addr
, 0, 1, 0, 4);
4406 cpu_abort(env1
, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx
"\n", addr
);
4409 p
= (void *)((uintptr_t)addr
+ env1
->tlb_table
[mmu_idx
][page_index
].addend
);
4410 return qemu_ram_addr_from_host_nofail(p
);
4413 #define MMUSUFFIX _cmmu
4415 #define GETPC() NULL
4416 #define env cpu_single_env
4417 #define SOFTMMU_CODE_ACCESS
4420 #include "softmmu_template.h"
4423 #include "softmmu_template.h"
4426 #include "softmmu_template.h"
4429 #include "softmmu_template.h"