2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
63 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 static uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 static int in_migration
;
116 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
118 static MemoryRegion
*system_memory
;
119 static MemoryRegion
*system_io
;
124 /* current CPU in the current thread. It is only valid inside
126 DEFINE_TLS(CPUState
*,cpu_single_env
);
127 /* 0 = Do not count executed instructions.
128 1 = Precise instruction counting.
129 2 = Adaptive rate instruction counting. */
132 typedef struct PageDesc
{
133 /* list of TBs intersecting this ram page */
134 TranslationBlock
*first_tb
;
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count
;
138 uint8_t *code_bitmap
;
139 #if defined(CONFIG_USER_ONLY)
144 /* In system mode we want L1_MAP to be based on ram offsets,
145 while in user mode we want it to be based on virtual addresses. */
146 #if !defined(CONFIG_USER_ONLY)
147 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
150 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
156 /* Size of the L2 (and L3, etc) page tables. */
158 #define L2_SIZE (1 << L2_BITS)
160 /* The bits remaining after N lower levels of page tables. */
161 #define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163 #define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166 /* Size of the L1 page table. Avoid silly small sizes. */
167 #if P_L1_BITS_REM < 4
168 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
170 #define P_L1_BITS P_L1_BITS_REM
173 #if V_L1_BITS_REM < 4
174 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
176 #define V_L1_BITS V_L1_BITS_REM
179 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
182 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
185 unsigned long qemu_real_host_page_size
;
186 unsigned long qemu_host_page_size
;
187 unsigned long qemu_host_page_mask
;
189 /* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191 static void *l1_map
[V_L1_SIZE
];
193 #if !defined(CONFIG_USER_ONLY)
194 typedef struct PhysPageDesc
{
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset
;
197 ram_addr_t region_offset
;
200 /* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202 static void *l1_phys_map
[P_L1_SIZE
];
204 static void io_mem_init(void);
205 static void memory_map_init(void);
207 /* io memory support */
208 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
209 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
210 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
211 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
212 static int io_mem_watch
;
217 static const char *logfilename
= "qemu.log";
219 static const char *logfilename
= "/tmp/qemu.log";
223 static int log_append
= 0;
226 #if !defined(CONFIG_USER_ONLY)
227 static int tlb_flush_count
;
229 static int tb_flush_count
;
230 static int tb_phys_invalidate_count
;
233 static void map_exec(void *addr
, long size
)
236 VirtualProtect(addr
, size
,
237 PAGE_EXECUTE_READWRITE
, &old_protect
);
241 static void map_exec(void *addr
, long size
)
243 unsigned long start
, end
, page_size
;
245 page_size
= getpagesize();
246 start
= (unsigned long)addr
;
247 start
&= ~(page_size
- 1);
249 end
= (unsigned long)addr
+ size
;
250 end
+= page_size
- 1;
251 end
&= ~(page_size
- 1);
253 mprotect((void *)start
, end
- start
,
254 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
258 static void page_init(void)
260 /* NOTE: we can always suppose that qemu_host_page_size >=
264 SYSTEM_INFO system_info
;
266 GetSystemInfo(&system_info
);
267 qemu_real_host_page_size
= system_info
.dwPageSize
;
270 qemu_real_host_page_size
= getpagesize();
272 if (qemu_host_page_size
== 0)
273 qemu_host_page_size
= qemu_real_host_page_size
;
274 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
275 qemu_host_page_size
= TARGET_PAGE_SIZE
;
276 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
278 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
280 #ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry
*freep
;
284 freep
= kinfo_getvmmap(getpid(), &cnt
);
287 for (i
= 0; i
< cnt
; i
++) {
288 unsigned long startaddr
, endaddr
;
290 startaddr
= freep
[i
].kve_start
;
291 endaddr
= freep
[i
].kve_end
;
292 if (h2g_valid(startaddr
)) {
293 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
295 if (h2g_valid(endaddr
)) {
296 endaddr
= h2g(endaddr
);
297 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
299 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
312 last_brk
= (unsigned long)sbrk(0);
314 f
= fopen("/compat/linux/proc/self/maps", "r");
319 unsigned long startaddr
, endaddr
;
322 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
324 if (n
== 2 && h2g_valid(startaddr
)) {
325 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
327 if (h2g_valid(endaddr
)) {
328 endaddr
= h2g(endaddr
);
332 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
344 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
350 #if defined(CONFIG_USER_ONLY)
351 /* We can't use g_malloc because it may recurse into a locked mutex. */
352 # define ALLOC(P, SIZE) \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
358 # define ALLOC(P, SIZE) \
359 do { P = g_malloc0(SIZE); } while (0)
362 /* Level 1. Always allocated. */
363 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
366 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
373 ALLOC(p
, sizeof(void *) * L2_SIZE
);
377 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
385 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
391 return pd
+ (index
& (L2_SIZE
- 1));
394 static inline PageDesc
*page_find(tb_page_addr_t index
)
396 return page_find_alloc(index
, 0);
399 #if !defined(CONFIG_USER_ONLY)
400 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
406 /* Level 1. Always allocated. */
407 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
410 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
416 *lp
= p
= g_malloc0(sizeof(void *) * L2_SIZE
);
418 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
424 int first_index
= index
& ~(L2_SIZE
- 1);
430 *lp
= pd
= g_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
432 for (i
= 0; i
< L2_SIZE
; i
++) {
433 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
434 pd
[i
].region_offset
= (first_index
+ i
) << TARGET_PAGE_BITS
;
438 return pd
+ (index
& (L2_SIZE
- 1));
441 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
443 return phys_page_find_alloc(index
, 0);
446 static void tlb_protect_code(ram_addr_t ram_addr
);
447 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
449 #define mmap_lock() do { } while(0)
450 #define mmap_unlock() do { } while(0)
453 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
455 #if defined(CONFIG_USER_ONLY)
456 /* Currently it is not recommended to allocate big chunks of data in
457 user mode. It will change when a dedicated libc will be used */
458 #define USE_STATIC_CODE_GEN_BUFFER
461 #ifdef USE_STATIC_CODE_GEN_BUFFER
462 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
463 __attribute__((aligned (CODE_GEN_ALIGN
)));
466 static void code_gen_alloc(unsigned long tb_size
)
468 #ifdef USE_STATIC_CODE_GEN_BUFFER
469 code_gen_buffer
= static_code_gen_buffer
;
470 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
471 map_exec(code_gen_buffer
, code_gen_buffer_size
);
473 code_gen_buffer_size
= tb_size
;
474 if (code_gen_buffer_size
== 0) {
475 #if defined(CONFIG_USER_ONLY)
476 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
478 /* XXX: needs adjustments */
479 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
482 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
483 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
484 /* The code gen buffer location may have constraints depending on
485 the host cpu and OS */
486 #if defined(__linux__)
491 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
492 #if defined(__x86_64__)
494 /* Cannot map more than that */
495 if (code_gen_buffer_size
> (800 * 1024 * 1024))
496 code_gen_buffer_size
= (800 * 1024 * 1024);
497 #elif defined(__sparc_v9__)
498 // Map the buffer below 2G, so we can use direct calls and branches
500 start
= (void *) 0x60000000UL
;
501 if (code_gen_buffer_size
> (512 * 1024 * 1024))
502 code_gen_buffer_size
= (512 * 1024 * 1024);
503 #elif defined(__arm__)
504 /* Keep the buffer no bigger than 16GB to branch between blocks */
505 if (code_gen_buffer_size
> 16 * 1024 * 1024)
506 code_gen_buffer_size
= 16 * 1024 * 1024;
507 #elif defined(__s390x__)
508 /* Map the buffer so that we can use direct calls and branches. */
509 /* We have a +- 4GB range on the branches; leave some slop. */
510 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
511 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
513 start
= (void *)0x90000000UL
;
515 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
516 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
518 if (code_gen_buffer
== MAP_FAILED
) {
519 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
523 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
524 || defined(__DragonFly__) || defined(__OpenBSD__) \
525 || defined(__NetBSD__)
529 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
530 #if defined(__x86_64__)
531 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
532 * 0x40000000 is free */
534 addr
= (void *)0x40000000;
535 /* Cannot map more than that */
536 if (code_gen_buffer_size
> (800 * 1024 * 1024))
537 code_gen_buffer_size
= (800 * 1024 * 1024);
538 #elif defined(__sparc_v9__)
539 // Map the buffer below 2G, so we can use direct calls and branches
541 addr
= (void *) 0x60000000UL
;
542 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
543 code_gen_buffer_size
= (512 * 1024 * 1024);
546 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
547 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
549 if (code_gen_buffer
== MAP_FAILED
) {
550 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
555 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
556 map_exec(code_gen_buffer
, code_gen_buffer_size
);
558 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
559 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
560 code_gen_buffer_max_size
= code_gen_buffer_size
-
561 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
562 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
563 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
566 /* Must be called before using the QEMU cpus. 'tb_size' is the size
567 (in bytes) allocated to the translation buffer. Zero means default
569 void tcg_exec_init(unsigned long tb_size
)
572 code_gen_alloc(tb_size
);
573 code_gen_ptr
= code_gen_buffer
;
575 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
576 /* There's no guest base to take into account, so go ahead and
577 initialize the prologue now. */
578 tcg_prologue_init(&tcg_ctx
);
582 bool tcg_enabled(void)
584 return code_gen_buffer
!= NULL
;
587 void cpu_exec_init_all(void)
589 #if !defined(CONFIG_USER_ONLY)
595 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
597 static int cpu_common_post_load(void *opaque
, int version_id
)
599 CPUState
*env
= opaque
;
601 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
602 version_id is increased. */
603 env
->interrupt_request
&= ~0x01;
609 static const VMStateDescription vmstate_cpu_common
= {
610 .name
= "cpu_common",
612 .minimum_version_id
= 1,
613 .minimum_version_id_old
= 1,
614 .post_load
= cpu_common_post_load
,
615 .fields
= (VMStateField
[]) {
616 VMSTATE_UINT32(halted
, CPUState
),
617 VMSTATE_UINT32(interrupt_request
, CPUState
),
618 VMSTATE_END_OF_LIST()
623 CPUState
*qemu_get_cpu(int cpu
)
625 CPUState
*env
= first_cpu
;
628 if (env
->cpu_index
== cpu
)
636 void cpu_exec_init(CPUState
*env
)
641 #if defined(CONFIG_USER_ONLY)
644 env
->next_cpu
= NULL
;
647 while (*penv
!= NULL
) {
648 penv
= &(*penv
)->next_cpu
;
651 env
->cpu_index
= cpu_index
;
653 QTAILQ_INIT(&env
->breakpoints
);
654 QTAILQ_INIT(&env
->watchpoints
);
655 #ifndef CONFIG_USER_ONLY
656 env
->thread_id
= qemu_get_thread_id();
659 #if defined(CONFIG_USER_ONLY)
662 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
663 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
664 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
665 cpu_save
, cpu_load
, env
);
669 /* Allocate a new translation block. Flush the translation buffer if
670 too many translation blocks or too much generated code. */
671 static TranslationBlock
*tb_alloc(target_ulong pc
)
673 TranslationBlock
*tb
;
675 if (nb_tbs
>= code_gen_max_blocks
||
676 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
684 void tb_free(TranslationBlock
*tb
)
686 /* In practice this is mostly used for single use temporary TB
687 Ignore the hard cases and just back up if this TB happens to
688 be the last one generated. */
689 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
690 code_gen_ptr
= tb
->tc_ptr
;
695 static inline void invalidate_page_bitmap(PageDesc
*p
)
697 if (p
->code_bitmap
) {
698 g_free(p
->code_bitmap
);
699 p
->code_bitmap
= NULL
;
701 p
->code_write_count
= 0;
704 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
706 static void page_flush_tb_1 (int level
, void **lp
)
715 for (i
= 0; i
< L2_SIZE
; ++i
) {
716 pd
[i
].first_tb
= NULL
;
717 invalidate_page_bitmap(pd
+ i
);
721 for (i
= 0; i
< L2_SIZE
; ++i
) {
722 page_flush_tb_1 (level
- 1, pp
+ i
);
727 static void page_flush_tb(void)
730 for (i
= 0; i
< V_L1_SIZE
; i
++) {
731 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
735 /* flush all the translation blocks */
736 /* XXX: tb_flush is currently not thread safe */
737 void tb_flush(CPUState
*env1
)
740 #if defined(DEBUG_FLUSH)
741 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
742 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
744 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
746 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
747 cpu_abort(env1
, "Internal error: code buffer overflow\n");
751 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
752 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
755 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
758 code_gen_ptr
= code_gen_buffer
;
759 /* XXX: flush processor icache at this point if cache flush is
764 #ifdef DEBUG_TB_CHECK
766 static void tb_invalidate_check(target_ulong address
)
768 TranslationBlock
*tb
;
770 address
&= TARGET_PAGE_MASK
;
771 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
772 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
773 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
774 address
>= tb
->pc
+ tb
->size
)) {
775 printf("ERROR invalidate: address=" TARGET_FMT_lx
776 " PC=%08lx size=%04x\n",
777 address
, (long)tb
->pc
, tb
->size
);
783 /* verify that all the pages have correct rights for code */
784 static void tb_page_check(void)
786 TranslationBlock
*tb
;
787 int i
, flags1
, flags2
;
789 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
790 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
791 flags1
= page_get_flags(tb
->pc
);
792 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
793 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
794 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
795 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
803 /* invalidate one TB */
804 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
807 TranslationBlock
*tb1
;
811 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
814 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
818 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
820 TranslationBlock
*tb1
;
826 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
828 *ptb
= tb1
->page_next
[n1
];
831 ptb
= &tb1
->page_next
[n1
];
835 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
837 TranslationBlock
*tb1
, **ptb
;
840 ptb
= &tb
->jmp_next
[n
];
843 /* find tb(n) in circular list */
847 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
848 if (n1
== n
&& tb1
== tb
)
851 ptb
= &tb1
->jmp_first
;
853 ptb
= &tb1
->jmp_next
[n1
];
856 /* now we can suppress tb(n) from the list */
857 *ptb
= tb
->jmp_next
[n
];
859 tb
->jmp_next
[n
] = NULL
;
863 /* reset the jump entry 'n' of a TB so that it is not chained to
865 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
867 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
870 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
875 tb_page_addr_t phys_pc
;
876 TranslationBlock
*tb1
, *tb2
;
878 /* remove the TB from the hash list */
879 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
880 h
= tb_phys_hash_func(phys_pc
);
881 tb_remove(&tb_phys_hash
[h
], tb
,
882 offsetof(TranslationBlock
, phys_hash_next
));
884 /* remove the TB from the page list */
885 if (tb
->page_addr
[0] != page_addr
) {
886 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
887 tb_page_remove(&p
->first_tb
, tb
);
888 invalidate_page_bitmap(p
);
890 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
891 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
892 tb_page_remove(&p
->first_tb
, tb
);
893 invalidate_page_bitmap(p
);
896 tb_invalidated_flag
= 1;
898 /* remove the TB from the hash list */
899 h
= tb_jmp_cache_hash_func(tb
->pc
);
900 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
901 if (env
->tb_jmp_cache
[h
] == tb
)
902 env
->tb_jmp_cache
[h
] = NULL
;
905 /* suppress this TB from the two jump lists */
906 tb_jmp_remove(tb
, 0);
907 tb_jmp_remove(tb
, 1);
909 /* suppress any remaining jumps to this TB */
915 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
916 tb2
= tb1
->jmp_next
[n1
];
917 tb_reset_jump(tb1
, n1
);
918 tb1
->jmp_next
[n1
] = NULL
;
921 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
923 tb_phys_invalidate_count
++;
926 static inline void set_bits(uint8_t *tab
, int start
, int len
)
932 mask
= 0xff << (start
& 7);
933 if ((start
& ~7) == (end
& ~7)) {
935 mask
&= ~(0xff << (end
& 7));
940 start
= (start
+ 8) & ~7;
942 while (start
< end1
) {
947 mask
= ~(0xff << (end
& 7));
953 static void build_page_bitmap(PageDesc
*p
)
955 int n
, tb_start
, tb_end
;
956 TranslationBlock
*tb
;
958 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
963 tb
= (TranslationBlock
*)((long)tb
& ~3);
964 /* NOTE: this is subtle as a TB may span two physical pages */
966 /* NOTE: tb_end may be after the end of the page, but
967 it is not a problem */
968 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
969 tb_end
= tb_start
+ tb
->size
;
970 if (tb_end
> TARGET_PAGE_SIZE
)
971 tb_end
= TARGET_PAGE_SIZE
;
974 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
976 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
977 tb
= tb
->page_next
[n
];
981 TranslationBlock
*tb_gen_code(CPUState
*env
,
982 target_ulong pc
, target_ulong cs_base
,
983 int flags
, int cflags
)
985 TranslationBlock
*tb
;
987 tb_page_addr_t phys_pc
, phys_page2
;
988 target_ulong virt_page2
;
991 phys_pc
= get_page_addr_code(env
, pc
);
994 /* flush must be done */
996 /* cannot fail at this point */
998 /* Don't forget to invalidate previous TB info. */
999 tb_invalidated_flag
= 1;
1001 tc_ptr
= code_gen_ptr
;
1002 tb
->tc_ptr
= tc_ptr
;
1003 tb
->cs_base
= cs_base
;
1005 tb
->cflags
= cflags
;
1006 cpu_gen_code(env
, tb
, &code_gen_size
);
1007 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1009 /* check next page if needed */
1010 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1012 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1013 phys_page2
= get_page_addr_code(env
, virt_page2
);
1015 tb_link_page(tb
, phys_pc
, phys_page2
);
1019 /* invalidate all TBs which intersect with the target physical page
1020 starting in range [start;end[. NOTE: start and end must refer to
1021 the same physical page. 'is_cpu_write_access' should be true if called
1022 from a real cpu write access: the virtual CPU will exit the current
1023 TB if code is modified inside this TB. */
1024 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1025 int is_cpu_write_access
)
1027 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1028 CPUState
*env
= cpu_single_env
;
1029 tb_page_addr_t tb_start
, tb_end
;
1032 #ifdef TARGET_HAS_PRECISE_SMC
1033 int current_tb_not_found
= is_cpu_write_access
;
1034 TranslationBlock
*current_tb
= NULL
;
1035 int current_tb_modified
= 0;
1036 target_ulong current_pc
= 0;
1037 target_ulong current_cs_base
= 0;
1038 int current_flags
= 0;
1039 #endif /* TARGET_HAS_PRECISE_SMC */
1041 p
= page_find(start
>> TARGET_PAGE_BITS
);
1044 if (!p
->code_bitmap
&&
1045 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1046 is_cpu_write_access
) {
1047 /* build code bitmap */
1048 build_page_bitmap(p
);
1051 /* we remove all the TBs in the range [start, end[ */
1052 /* XXX: see if in some cases it could be faster to invalidate all the code */
1054 while (tb
!= NULL
) {
1056 tb
= (TranslationBlock
*)((long)tb
& ~3);
1057 tb_next
= tb
->page_next
[n
];
1058 /* NOTE: this is subtle as a TB may span two physical pages */
1060 /* NOTE: tb_end may be after the end of the page, but
1061 it is not a problem */
1062 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1063 tb_end
= tb_start
+ tb
->size
;
1065 tb_start
= tb
->page_addr
[1];
1066 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1068 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1069 #ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb_not_found
) {
1071 current_tb_not_found
= 0;
1073 if (env
->mem_io_pc
) {
1074 /* now we have a real cpu fault */
1075 current_tb
= tb_find_pc(env
->mem_io_pc
);
1078 if (current_tb
== tb
&&
1079 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1080 /* If we are modifying the current TB, we must stop
1081 its execution. We could be more precise by checking
1082 that the modification is after the current PC, but it
1083 would require a specialized function to partially
1084 restore the CPU state */
1086 current_tb_modified
= 1;
1087 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1088 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1091 #endif /* TARGET_HAS_PRECISE_SMC */
1092 /* we need to do that to handle the case where a signal
1093 occurs while doing tb_phys_invalidate() */
1096 saved_tb
= env
->current_tb
;
1097 env
->current_tb
= NULL
;
1099 tb_phys_invalidate(tb
, -1);
1101 env
->current_tb
= saved_tb
;
1102 if (env
->interrupt_request
&& env
->current_tb
)
1103 cpu_interrupt(env
, env
->interrupt_request
);
1108 #if !defined(CONFIG_USER_ONLY)
1109 /* if no code remaining, no need to continue to use slow writes */
1111 invalidate_page_bitmap(p
);
1112 if (is_cpu_write_access
) {
1113 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1117 #ifdef TARGET_HAS_PRECISE_SMC
1118 if (current_tb_modified
) {
1119 /* we generate a block containing just the instruction
1120 modifying the memory. It will ensure that it cannot modify
1122 env
->current_tb
= NULL
;
1123 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1124 cpu_resume_from_signal(env
, NULL
);
1129 /* len must be <= 8 and start must be a multiple of len */
1130 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1136 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1137 cpu_single_env
->mem_io_vaddr
, len
,
1138 cpu_single_env
->eip
,
1139 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1142 p
= page_find(start
>> TARGET_PAGE_BITS
);
1145 if (p
->code_bitmap
) {
1146 offset
= start
& ~TARGET_PAGE_MASK
;
1147 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1148 if (b
& ((1 << len
) - 1))
1152 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1156 #if !defined(CONFIG_SOFTMMU)
1157 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1158 unsigned long pc
, void *puc
)
1160 TranslationBlock
*tb
;
1163 #ifdef TARGET_HAS_PRECISE_SMC
1164 TranslationBlock
*current_tb
= NULL
;
1165 CPUState
*env
= cpu_single_env
;
1166 int current_tb_modified
= 0;
1167 target_ulong current_pc
= 0;
1168 target_ulong current_cs_base
= 0;
1169 int current_flags
= 0;
1172 addr
&= TARGET_PAGE_MASK
;
1173 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1177 #ifdef TARGET_HAS_PRECISE_SMC
1178 if (tb
&& pc
!= 0) {
1179 current_tb
= tb_find_pc(pc
);
1182 while (tb
!= NULL
) {
1184 tb
= (TranslationBlock
*)((long)tb
& ~3);
1185 #ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb
== tb
&&
1187 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1188 /* If we are modifying the current TB, we must stop
1189 its execution. We could be more precise by checking
1190 that the modification is after the current PC, but it
1191 would require a specialized function to partially
1192 restore the CPU state */
1194 current_tb_modified
= 1;
1195 cpu_restore_state(current_tb
, env
, pc
);
1196 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1199 #endif /* TARGET_HAS_PRECISE_SMC */
1200 tb_phys_invalidate(tb
, addr
);
1201 tb
= tb
->page_next
[n
];
1204 #ifdef TARGET_HAS_PRECISE_SMC
1205 if (current_tb_modified
) {
1206 /* we generate a block containing just the instruction
1207 modifying the memory. It will ensure that it cannot modify
1209 env
->current_tb
= NULL
;
1210 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1211 cpu_resume_from_signal(env
, puc
);
1217 /* add the tb in the target page and protect it if necessary */
1218 static inline void tb_alloc_page(TranslationBlock
*tb
,
1219 unsigned int n
, tb_page_addr_t page_addr
)
1222 #ifndef CONFIG_USER_ONLY
1223 bool page_already_protected
;
1226 tb
->page_addr
[n
] = page_addr
;
1227 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1228 tb
->page_next
[n
] = p
->first_tb
;
1229 #ifndef CONFIG_USER_ONLY
1230 page_already_protected
= p
->first_tb
!= NULL
;
1232 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1233 invalidate_page_bitmap(p
);
1235 #if defined(TARGET_HAS_SMC) || 1
1237 #if defined(CONFIG_USER_ONLY)
1238 if (p
->flags
& PAGE_WRITE
) {
1243 /* force the host page as non writable (writes will have a
1244 page fault + mprotect overhead) */
1245 page_addr
&= qemu_host_page_mask
;
1247 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1248 addr
+= TARGET_PAGE_SIZE
) {
1250 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1254 p2
->flags
&= ~PAGE_WRITE
;
1256 mprotect(g2h(page_addr
), qemu_host_page_size
,
1257 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1258 #ifdef DEBUG_TB_INVALIDATE
1259 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1264 /* if some code is already present, then the pages are already
1265 protected. So we handle the case where only the first TB is
1266 allocated in a physical page */
1267 if (!page_already_protected
) {
1268 tlb_protect_code(page_addr
);
1272 #endif /* TARGET_HAS_SMC */
1275 /* add a new TB and link it to the physical page tables. phys_page2 is
1276 (-1) to indicate that only one page contains the TB. */
1277 void tb_link_page(TranslationBlock
*tb
,
1278 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1281 TranslationBlock
**ptb
;
1283 /* Grab the mmap lock to stop another thread invalidating this TB
1284 before we are done. */
1286 /* add in the physical hash table */
1287 h
= tb_phys_hash_func(phys_pc
);
1288 ptb
= &tb_phys_hash
[h
];
1289 tb
->phys_hash_next
= *ptb
;
1292 /* add in the page list */
1293 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1294 if (phys_page2
!= -1)
1295 tb_alloc_page(tb
, 1, phys_page2
);
1297 tb
->page_addr
[1] = -1;
1299 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1300 tb
->jmp_next
[0] = NULL
;
1301 tb
->jmp_next
[1] = NULL
;
1303 /* init original jump addresses */
1304 if (tb
->tb_next_offset
[0] != 0xffff)
1305 tb_reset_jump(tb
, 0);
1306 if (tb
->tb_next_offset
[1] != 0xffff)
1307 tb_reset_jump(tb
, 1);
1309 #ifdef DEBUG_TB_CHECK
1315 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1316 tb[1].tc_ptr. Return NULL if not found */
1317 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1319 int m_min
, m_max
, m
;
1321 TranslationBlock
*tb
;
1325 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1326 tc_ptr
>= (unsigned long)code_gen_ptr
)
1328 /* binary search (cf Knuth) */
1331 while (m_min
<= m_max
) {
1332 m
= (m_min
+ m_max
) >> 1;
1334 v
= (unsigned long)tb
->tc_ptr
;
1337 else if (tc_ptr
< v
) {
1346 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1348 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1350 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1353 tb1
= tb
->jmp_next
[n
];
1355 /* find head of list */
1358 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1361 tb1
= tb1
->jmp_next
[n1
];
1363 /* we are now sure now that tb jumps to tb1 */
1366 /* remove tb from the jmp_first list */
1367 ptb
= &tb_next
->jmp_first
;
1371 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1372 if (n1
== n
&& tb1
== tb
)
1374 ptb
= &tb1
->jmp_next
[n1
];
1376 *ptb
= tb
->jmp_next
[n
];
1377 tb
->jmp_next
[n
] = NULL
;
1379 /* suppress the jump to next tb in generated code */
1380 tb_reset_jump(tb
, n
);
1382 /* suppress jumps in the tb on which we could have jumped */
1383 tb_reset_jump_recursive(tb_next
);
1387 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1389 tb_reset_jump_recursive2(tb
, 0);
1390 tb_reset_jump_recursive2(tb
, 1);
1393 #if defined(TARGET_HAS_ICE)
1394 #if defined(CONFIG_USER_ONLY)
1395 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1397 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1400 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1402 target_phys_addr_t addr
;
1404 ram_addr_t ram_addr
;
1407 addr
= cpu_get_phys_page_debug(env
, pc
);
1408 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1410 pd
= IO_MEM_UNASSIGNED
;
1412 pd
= p
->phys_offset
;
1414 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1415 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1418 #endif /* TARGET_HAS_ICE */
1420 #if defined(CONFIG_USER_ONLY)
1421 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1426 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1427 int flags
, CPUWatchpoint
**watchpoint
)
1432 /* Add a watchpoint. */
1433 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1434 int flags
, CPUWatchpoint
**watchpoint
)
1436 target_ulong len_mask
= ~(len
- 1);
1439 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1440 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1441 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1442 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1445 wp
= g_malloc(sizeof(*wp
));
1448 wp
->len_mask
= len_mask
;
1451 /* keep all GDB-injected watchpoints in front */
1453 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1455 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1457 tlb_flush_page(env
, addr
);
1464 /* Remove a specific watchpoint. */
1465 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1468 target_ulong len_mask
= ~(len
- 1);
1471 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1472 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1473 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1474 cpu_watchpoint_remove_by_ref(env
, wp
);
1481 /* Remove a specific watchpoint by reference. */
1482 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1484 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1486 tlb_flush_page(env
, watchpoint
->vaddr
);
1491 /* Remove all matching watchpoints. */
1492 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1494 CPUWatchpoint
*wp
, *next
;
1496 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1497 if (wp
->flags
& mask
)
1498 cpu_watchpoint_remove_by_ref(env
, wp
);
1503 /* Add a breakpoint. */
1504 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1505 CPUBreakpoint
**breakpoint
)
1507 #if defined(TARGET_HAS_ICE)
1510 bp
= g_malloc(sizeof(*bp
));
1515 /* keep all GDB-injected breakpoints in front */
1517 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1519 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1521 breakpoint_invalidate(env
, pc
);
1531 /* Remove a specific breakpoint. */
1532 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1534 #if defined(TARGET_HAS_ICE)
1537 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1538 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1539 cpu_breakpoint_remove_by_ref(env
, bp
);
1549 /* Remove a specific breakpoint by reference. */
1550 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1552 #if defined(TARGET_HAS_ICE)
1553 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1555 breakpoint_invalidate(env
, breakpoint
->pc
);
1561 /* Remove all matching breakpoints. */
1562 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1564 #if defined(TARGET_HAS_ICE)
1565 CPUBreakpoint
*bp
, *next
;
1567 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1568 if (bp
->flags
& mask
)
1569 cpu_breakpoint_remove_by_ref(env
, bp
);
1574 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1575 CPU loop after each instruction */
1576 void cpu_single_step(CPUState
*env
, int enabled
)
1578 #if defined(TARGET_HAS_ICE)
1579 if (env
->singlestep_enabled
!= enabled
) {
1580 env
->singlestep_enabled
= enabled
;
1582 kvm_update_guest_debug(env
, 0);
1584 /* must flush all the translated code to avoid inconsistencies */
1585 /* XXX: only flush what is necessary */
1592 /* enable or disable low levels log */
1593 void cpu_set_log(int log_flags
)
1595 loglevel
= log_flags
;
1596 if (loglevel
&& !logfile
) {
1597 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1599 perror(logfilename
);
1602 #if !defined(CONFIG_SOFTMMU)
1603 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1605 static char logfile_buf
[4096];
1606 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1608 #elif defined(_WIN32)
1609 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1610 setvbuf(logfile
, NULL
, _IONBF
, 0);
1612 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1616 if (!loglevel
&& logfile
) {
1622 void cpu_set_log_filename(const char *filename
)
1624 logfilename
= strdup(filename
);
1629 cpu_set_log(loglevel
);
1632 static void cpu_unlink_tb(CPUState
*env
)
1634 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1635 problem and hope the cpu will stop of its own accord. For userspace
1636 emulation this often isn't actually as bad as it sounds. Often
1637 signals are used primarily to interrupt blocking syscalls. */
1638 TranslationBlock
*tb
;
1639 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1641 spin_lock(&interrupt_lock
);
1642 tb
= env
->current_tb
;
1643 /* if the cpu is currently executing code, we must unlink it and
1644 all the potentially executing TB */
1646 env
->current_tb
= NULL
;
1647 tb_reset_jump_recursive(tb
);
1649 spin_unlock(&interrupt_lock
);
1652 #ifndef CONFIG_USER_ONLY
1653 /* mask must never be zero, except for A20 change call */
1654 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1658 old_mask
= env
->interrupt_request
;
1659 env
->interrupt_request
|= mask
;
1662 * If called from iothread context, wake the target cpu in
1665 if (!qemu_cpu_is_self(env
)) {
1671 env
->icount_decr
.u16
.high
= 0xffff;
1673 && (mask
& ~old_mask
) != 0) {
1674 cpu_abort(env
, "Raised interrupt while not in I/O function");
1681 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1683 #else /* CONFIG_USER_ONLY */
1685 void cpu_interrupt(CPUState
*env
, int mask
)
1687 env
->interrupt_request
|= mask
;
1690 #endif /* CONFIG_USER_ONLY */
1692 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1694 env
->interrupt_request
&= ~mask
;
1697 void cpu_exit(CPUState
*env
)
1699 env
->exit_request
= 1;
1703 const CPULogItem cpu_log_items
[] = {
1704 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1705 "show generated host assembly code for each compiled TB" },
1706 { CPU_LOG_TB_IN_ASM
, "in_asm",
1707 "show target assembly code for each compiled TB" },
1708 { CPU_LOG_TB_OP
, "op",
1709 "show micro ops for each compiled TB" },
1710 { CPU_LOG_TB_OP_OPT
, "op_opt",
1713 "before eflags optimization and "
1715 "after liveness analysis" },
1716 { CPU_LOG_INT
, "int",
1717 "show interrupts/exceptions in short format" },
1718 { CPU_LOG_EXEC
, "exec",
1719 "show trace before each executed TB (lots of logs)" },
1720 { CPU_LOG_TB_CPU
, "cpu",
1721 "show CPU state before block translation" },
1723 { CPU_LOG_PCALL
, "pcall",
1724 "show protected mode far calls/returns/exceptions" },
1725 { CPU_LOG_RESET
, "cpu_reset",
1726 "show CPU state before CPU resets" },
1729 { CPU_LOG_IOPORT
, "ioport",
1730 "show all i/o ports accesses" },
1735 #ifndef CONFIG_USER_ONLY
1736 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1737 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1739 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1741 ram_addr_t phys_offset
,
1744 CPUPhysMemoryClient
*client
;
1745 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1746 client
->set_memory(client
, start_addr
, size
, phys_offset
, log_dirty
);
1750 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1751 target_phys_addr_t end
)
1753 CPUPhysMemoryClient
*client
;
1754 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1755 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1762 static int cpu_notify_migration_log(int enable
)
1764 CPUPhysMemoryClient
*client
;
1765 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1766 int r
= client
->migration_log(client
, enable
);
1774 target_phys_addr_t start_addr
;
1776 ram_addr_t phys_offset
;
1779 /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1780 * address. Each intermediate table provides the next L2_BITs of guest
1781 * physical address space. The number of levels vary based on host and
1782 * guest configuration, making it efficient to build the final guest
1783 * physical address by seeding the L1 offset and shifting and adding in
1784 * each L2 offset as we recurse through them. */
1785 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
, int level
,
1786 void **lp
, target_phys_addr_t addr
,
1787 struct last_map
*map
)
1795 PhysPageDesc
*pd
= *lp
;
1796 addr
<<= L2_BITS
+ TARGET_PAGE_BITS
;
1797 for (i
= 0; i
< L2_SIZE
; ++i
) {
1798 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1799 target_phys_addr_t start_addr
= addr
| i
<< TARGET_PAGE_BITS
;
1802 start_addr
== map
->start_addr
+ map
->size
&&
1803 pd
[i
].phys_offset
== map
->phys_offset
+ map
->size
) {
1805 map
->size
+= TARGET_PAGE_SIZE
;
1807 } else if (map
->size
) {
1808 client
->set_memory(client
, map
->start_addr
,
1809 map
->size
, map
->phys_offset
, false);
1812 map
->start_addr
= start_addr
;
1813 map
->size
= TARGET_PAGE_SIZE
;
1814 map
->phys_offset
= pd
[i
].phys_offset
;
1819 for (i
= 0; i
< L2_SIZE
; ++i
) {
1820 phys_page_for_each_1(client
, level
- 1, pp
+ i
,
1821 (addr
<< L2_BITS
) | i
, map
);
1826 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1829 struct last_map map
= { };
1831 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1832 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1833 l1_phys_map
+ i
, i
, &map
);
1836 client
->set_memory(client
, map
.start_addr
, map
.size
, map
.phys_offset
,
1841 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1843 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1844 phys_page_for_each(client
);
1847 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1849 QLIST_REMOVE(client
, list
);
1853 static int cmp1(const char *s1
, int n
, const char *s2
)
1855 if (strlen(s2
) != n
)
1857 return memcmp(s1
, s2
, n
) == 0;
1860 /* takes a comma separated list of log masks. Return 0 if error. */
1861 int cpu_str_to_log_mask(const char *str
)
1863 const CPULogItem
*item
;
1870 p1
= strchr(p
, ',');
1873 if(cmp1(p
,p1
-p
,"all")) {
1874 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1878 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1879 if (cmp1(p
, p1
- p
, item
->name
))
1893 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1900 fprintf(stderr
, "qemu: fatal: ");
1901 vfprintf(stderr
, fmt
, ap
);
1902 fprintf(stderr
, "\n");
1904 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1906 cpu_dump_state(env
, stderr
, fprintf
, 0);
1908 if (qemu_log_enabled()) {
1909 qemu_log("qemu: fatal: ");
1910 qemu_log_vprintf(fmt
, ap2
);
1913 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1915 log_cpu_state(env
, 0);
1922 #if defined(CONFIG_USER_ONLY)
1924 struct sigaction act
;
1925 sigfillset(&act
.sa_mask
);
1926 act
.sa_handler
= SIG_DFL
;
1927 sigaction(SIGABRT
, &act
, NULL
);
1933 CPUState
*cpu_copy(CPUState
*env
)
1935 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1936 CPUState
*next_cpu
= new_env
->next_cpu
;
1937 int cpu_index
= new_env
->cpu_index
;
1938 #if defined(TARGET_HAS_ICE)
1943 memcpy(new_env
, env
, sizeof(CPUState
));
1945 /* Preserve chaining and index. */
1946 new_env
->next_cpu
= next_cpu
;
1947 new_env
->cpu_index
= cpu_index
;
1949 /* Clone all break/watchpoints.
1950 Note: Once we support ptrace with hw-debug register access, make sure
1951 BP_CPU break/watchpoints are handled correctly on clone. */
1952 QTAILQ_INIT(&env
->breakpoints
);
1953 QTAILQ_INIT(&env
->watchpoints
);
1954 #if defined(TARGET_HAS_ICE)
1955 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1956 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1958 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1959 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1967 #if !defined(CONFIG_USER_ONLY)
1969 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1973 /* Discard jump cache entries for any tb which might potentially
1974 overlap the flushed page. */
1975 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1976 memset (&env
->tb_jmp_cache
[i
], 0,
1977 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1979 i
= tb_jmp_cache_hash_page(addr
);
1980 memset (&env
->tb_jmp_cache
[i
], 0,
1981 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1984 static CPUTLBEntry s_cputlb_empty_entry
= {
1991 /* NOTE: if flush_global is true, also flush global entries (not
1993 void tlb_flush(CPUState
*env
, int flush_global
)
1997 #if defined(DEBUG_TLB)
1998 printf("tlb_flush:\n");
2000 /* must reset current TB so that interrupts cannot modify the
2001 links while we are modifying them */
2002 env
->current_tb
= NULL
;
2004 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
2006 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2007 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
2011 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
2013 env
->tlb_flush_addr
= -1;
2014 env
->tlb_flush_mask
= 0;
2018 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
2020 if (addr
== (tlb_entry
->addr_read
&
2021 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
2022 addr
== (tlb_entry
->addr_write
&
2023 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
2024 addr
== (tlb_entry
->addr_code
&
2025 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
2026 *tlb_entry
= s_cputlb_empty_entry
;
2030 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2035 #if defined(DEBUG_TLB)
2036 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
2038 /* Check if we need to flush due to large pages. */
2039 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
2040 #if defined(DEBUG_TLB)
2041 printf("tlb_flush_page: forced full flush ("
2042 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
2043 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
2048 /* must reset current TB so that interrupts cannot modify the
2049 links while we are modifying them */
2050 env
->current_tb
= NULL
;
2052 addr
&= TARGET_PAGE_MASK
;
2053 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2054 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2055 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2057 tlb_flush_jmp_cache(env
, addr
);
2060 /* update the TLBs so that writes to code in the virtual page 'addr'
2062 static void tlb_protect_code(ram_addr_t ram_addr
)
2064 cpu_physical_memory_reset_dirty(ram_addr
,
2065 ram_addr
+ TARGET_PAGE_SIZE
,
2069 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2070 tested for self modifying code */
2071 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2074 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2077 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2078 unsigned long start
, unsigned long length
)
2081 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2082 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2083 if ((addr
- start
) < length
) {
2084 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2089 /* Note: start and end must be within the same ram block. */
2090 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2094 unsigned long length
, start1
;
2097 start
&= TARGET_PAGE_MASK
;
2098 end
= TARGET_PAGE_ALIGN(end
);
2100 length
= end
- start
;
2103 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2105 /* we modify the TLB cache so that the dirty bit will be set again
2106 when accessing the range */
2107 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2108 /* Check that we don't span multiple blocks - this breaks the
2109 address comparisons below. */
2110 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2111 != (end
- 1) - start
) {
2115 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2117 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2118 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2119 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2125 int cpu_physical_memory_set_dirty_tracking(int enable
)
2128 in_migration
= enable
;
2129 ret
= cpu_notify_migration_log(!!enable
);
2133 int cpu_physical_memory_get_dirty_tracking(void)
2135 return in_migration
;
2138 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2139 target_phys_addr_t end_addr
)
2143 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2147 int cpu_physical_log_start(target_phys_addr_t start_addr
,
2150 CPUPhysMemoryClient
*client
;
2151 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2152 if (client
->log_start
) {
2153 int r
= client
->log_start(client
, start_addr
, size
);
2162 int cpu_physical_log_stop(target_phys_addr_t start_addr
,
2165 CPUPhysMemoryClient
*client
;
2166 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2167 if (client
->log_stop
) {
2168 int r
= client
->log_stop(client
, start_addr
, size
);
2177 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2179 ram_addr_t ram_addr
;
2182 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2183 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2184 + tlb_entry
->addend
);
2185 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2186 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2187 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2192 /* update the TLB according to the current state of the dirty bits */
2193 void cpu_tlb_update_dirty(CPUState
*env
)
2197 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2198 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2199 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2203 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2205 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2206 tlb_entry
->addr_write
= vaddr
;
2209 /* update the TLB corresponding to virtual page vaddr
2210 so that it is no longer dirty */
2211 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2216 vaddr
&= TARGET_PAGE_MASK
;
2217 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2218 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2219 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2222 /* Our TLB does not support large pages, so remember the area covered by
2223 large pages and trigger a full TLB flush if these are invalidated. */
2224 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2227 target_ulong mask
= ~(size
- 1);
2229 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2230 env
->tlb_flush_addr
= vaddr
& mask
;
2231 env
->tlb_flush_mask
= mask
;
2234 /* Extend the existing region to include the new page.
2235 This is a compromise between unnecessary flushes and the cost
2236 of maintaining a full variable size TLB. */
2237 mask
&= env
->tlb_flush_mask
;
2238 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2241 env
->tlb_flush_addr
&= mask
;
2242 env
->tlb_flush_mask
= mask
;
2245 /* Add a new TLB entry. At most one entry for a given virtual address
2246 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2247 supplied size is only used by tlb_flush_page. */
2248 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2249 target_phys_addr_t paddr
, int prot
,
2250 int mmu_idx
, target_ulong size
)
2255 target_ulong address
;
2256 target_ulong code_address
;
2257 unsigned long addend
;
2260 target_phys_addr_t iotlb
;
2262 assert(size
>= TARGET_PAGE_SIZE
);
2263 if (size
!= TARGET_PAGE_SIZE
) {
2264 tlb_add_large_page(env
, vaddr
, size
);
2266 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2268 pd
= IO_MEM_UNASSIGNED
;
2270 pd
= p
->phys_offset
;
2272 #if defined(DEBUG_TLB)
2273 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2274 " prot=%x idx=%d pd=0x%08lx\n",
2275 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2279 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2280 /* IO memory case (romd handled later) */
2281 address
|= TLB_MMIO
;
2283 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2284 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2286 iotlb
= pd
& TARGET_PAGE_MASK
;
2287 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2288 iotlb
|= IO_MEM_NOTDIRTY
;
2290 iotlb
|= IO_MEM_ROM
;
2292 /* IO handlers are currently passed a physical address.
2293 It would be nice to pass an offset from the base address
2294 of that region. This would avoid having to special case RAM,
2295 and avoid full address decoding in every device.
2296 We can't use the high bits of pd for this because
2297 IO_MEM_ROMD uses these as a ram address. */
2298 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2300 iotlb
+= p
->region_offset
;
2306 code_address
= address
;
2307 /* Make accesses to pages with watchpoints go via the
2308 watchpoint trap routines. */
2309 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2310 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2311 /* Avoid trapping reads of pages with a write breakpoint. */
2312 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2313 iotlb
= io_mem_watch
+ paddr
;
2314 address
|= TLB_MMIO
;
2320 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2321 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2322 te
= &env
->tlb_table
[mmu_idx
][index
];
2323 te
->addend
= addend
- vaddr
;
2324 if (prot
& PAGE_READ
) {
2325 te
->addr_read
= address
;
2330 if (prot
& PAGE_EXEC
) {
2331 te
->addr_code
= code_address
;
2335 if (prot
& PAGE_WRITE
) {
2336 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2337 (pd
& IO_MEM_ROMD
)) {
2338 /* Write access calls the I/O callback. */
2339 te
->addr_write
= address
| TLB_MMIO
;
2340 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2341 !cpu_physical_memory_is_dirty(pd
)) {
2342 te
->addr_write
= address
| TLB_NOTDIRTY
;
2344 te
->addr_write
= address
;
2347 te
->addr_write
= -1;
2353 void tlb_flush(CPUState
*env
, int flush_global
)
2357 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2362 * Walks guest process memory "regions" one by one
2363 * and calls callback function 'fn' for each region.
2366 struct walk_memory_regions_data
2368 walk_memory_regions_fn fn
;
2370 unsigned long start
;
2374 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2375 abi_ulong end
, int new_prot
)
2377 if (data
->start
!= -1ul) {
2378 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2384 data
->start
= (new_prot
? end
: -1ul);
2385 data
->prot
= new_prot
;
2390 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2391 abi_ulong base
, int level
, void **lp
)
2397 return walk_memory_regions_end(data
, base
, 0);
2402 for (i
= 0; i
< L2_SIZE
; ++i
) {
2403 int prot
= pd
[i
].flags
;
2405 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2406 if (prot
!= data
->prot
) {
2407 rc
= walk_memory_regions_end(data
, pa
, prot
);
2415 for (i
= 0; i
< L2_SIZE
; ++i
) {
2416 pa
= base
| ((abi_ulong
)i
<<
2417 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2418 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2428 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2430 struct walk_memory_regions_data data
;
2438 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2439 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2440 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2446 return walk_memory_regions_end(&data
, 0, 0);
2449 static int dump_region(void *priv
, abi_ulong start
,
2450 abi_ulong end
, unsigned long prot
)
2452 FILE *f
= (FILE *)priv
;
2454 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2455 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2456 start
, end
, end
- start
,
2457 ((prot
& PAGE_READ
) ? 'r' : '-'),
2458 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2459 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2464 /* dump memory mappings */
2465 void page_dump(FILE *f
)
2467 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2468 "start", "end", "size", "prot");
2469 walk_memory_regions(f
, dump_region
);
2472 int page_get_flags(target_ulong address
)
2476 p
= page_find(address
>> TARGET_PAGE_BITS
);
2482 /* Modify the flags of a page and invalidate the code if necessary.
2483 The flag PAGE_WRITE_ORG is positioned automatically depending
2484 on PAGE_WRITE. The mmap_lock should already be held. */
2485 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2487 target_ulong addr
, len
;
2489 /* This function should never be called with addresses outside the
2490 guest address space. If this assert fires, it probably indicates
2491 a missing call to h2g_valid. */
2492 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2493 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2495 assert(start
< end
);
2497 start
= start
& TARGET_PAGE_MASK
;
2498 end
= TARGET_PAGE_ALIGN(end
);
2500 if (flags
& PAGE_WRITE
) {
2501 flags
|= PAGE_WRITE_ORG
;
2504 for (addr
= start
, len
= end
- start
;
2506 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2507 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2509 /* If the write protection bit is set, then we invalidate
2511 if (!(p
->flags
& PAGE_WRITE
) &&
2512 (flags
& PAGE_WRITE
) &&
2514 tb_invalidate_phys_page(addr
, 0, NULL
);
2520 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2526 /* This function should never be called with addresses outside the
2527 guest address space. If this assert fires, it probably indicates
2528 a missing call to h2g_valid. */
2529 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2530 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2536 if (start
+ len
- 1 < start
) {
2537 /* We've wrapped around. */
2541 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2542 start
= start
& TARGET_PAGE_MASK
;
2544 for (addr
= start
, len
= end
- start
;
2546 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2547 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2550 if( !(p
->flags
& PAGE_VALID
) )
2553 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2555 if (flags
& PAGE_WRITE
) {
2556 if (!(p
->flags
& PAGE_WRITE_ORG
))
2558 /* unprotect the page if it was put read-only because it
2559 contains translated code */
2560 if (!(p
->flags
& PAGE_WRITE
)) {
2561 if (!page_unprotect(addr
, 0, NULL
))
2570 /* called from signal handler: invalidate the code and unprotect the
2571 page. Return TRUE if the fault was successfully handled. */
2572 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2576 target_ulong host_start
, host_end
, addr
;
2578 /* Technically this isn't safe inside a signal handler. However we
2579 know this only ever happens in a synchronous SEGV handler, so in
2580 practice it seems to be ok. */
2583 p
= page_find(address
>> TARGET_PAGE_BITS
);
2589 /* if the page was really writable, then we change its
2590 protection back to writable */
2591 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2592 host_start
= address
& qemu_host_page_mask
;
2593 host_end
= host_start
+ qemu_host_page_size
;
2596 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2597 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2598 p
->flags
|= PAGE_WRITE
;
2601 /* and since the content will be modified, we must invalidate
2602 the corresponding translated code. */
2603 tb_invalidate_phys_page(addr
, pc
, puc
);
2604 #ifdef DEBUG_TB_CHECK
2605 tb_invalidate_check(addr
);
2608 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2618 static inline void tlb_set_dirty(CPUState
*env
,
2619 unsigned long addr
, target_ulong vaddr
)
2622 #endif /* defined(CONFIG_USER_ONLY) */
2624 #if !defined(CONFIG_USER_ONLY)
2626 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2627 typedef struct subpage_t
{
2628 target_phys_addr_t base
;
2629 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2630 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2633 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2634 ram_addr_t memory
, ram_addr_t region_offset
);
2635 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2636 ram_addr_t orig_memory
,
2637 ram_addr_t region_offset
);
2638 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2641 if (addr > start_addr) \
2644 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2645 if (start_addr2 > 0) \
2649 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2650 end_addr2 = TARGET_PAGE_SIZE - 1; \
2652 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2653 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2658 /* register physical memory.
2659 For RAM, 'size' must be a multiple of the target page size.
2660 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2661 io memory page. The address used when calling the IO function is
2662 the offset from the start of the region, plus region_offset. Both
2663 start_addr and region_offset are rounded down to a page boundary
2664 before calculating this offset. This should not be a problem unless
2665 the low bits of start_addr and region_offset differ. */
2666 void cpu_register_physical_memory_log(target_phys_addr_t start_addr
,
2668 ram_addr_t phys_offset
,
2669 ram_addr_t region_offset
,
2672 target_phys_addr_t addr
, end_addr
;
2675 ram_addr_t orig_size
= size
;
2679 cpu_notify_set_memory(start_addr
, size
, phys_offset
, log_dirty
);
2681 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2682 region_offset
= start_addr
;
2684 region_offset
&= TARGET_PAGE_MASK
;
2685 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2686 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2690 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2691 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2692 ram_addr_t orig_memory
= p
->phys_offset
;
2693 target_phys_addr_t start_addr2
, end_addr2
;
2694 int need_subpage
= 0;
2696 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2699 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2700 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2701 &p
->phys_offset
, orig_memory
,
2704 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2707 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2709 p
->region_offset
= 0;
2711 p
->phys_offset
= phys_offset
;
2712 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2713 (phys_offset
& IO_MEM_ROMD
))
2714 phys_offset
+= TARGET_PAGE_SIZE
;
2717 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2718 p
->phys_offset
= phys_offset
;
2719 p
->region_offset
= region_offset
;
2720 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2721 (phys_offset
& IO_MEM_ROMD
)) {
2722 phys_offset
+= TARGET_PAGE_SIZE
;
2724 target_phys_addr_t start_addr2
, end_addr2
;
2725 int need_subpage
= 0;
2727 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2728 end_addr2
, need_subpage
);
2731 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2732 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2733 addr
& TARGET_PAGE_MASK
);
2734 subpage_register(subpage
, start_addr2
, end_addr2
,
2735 phys_offset
, region_offset
);
2736 p
->region_offset
= 0;
2740 region_offset
+= TARGET_PAGE_SIZE
;
2741 addr
+= TARGET_PAGE_SIZE
;
2742 } while (addr
!= end_addr
);
2744 /* since each CPU stores ram addresses in its TLB cache, we must
2745 reset the modified entries */
2747 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2752 /* XXX: temporary until new memory mapping API */
2753 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2757 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2759 return IO_MEM_UNASSIGNED
;
2760 return p
->phys_offset
;
2763 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2766 kvm_coalesce_mmio_region(addr
, size
);
2769 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2772 kvm_uncoalesce_mmio_region(addr
, size
);
2775 void qemu_flush_coalesced_mmio_buffer(void)
2778 kvm_flush_coalesced_mmio_buffer();
2781 #if defined(__linux__) && !defined(TARGET_S390X)
2783 #include <sys/vfs.h>
2785 #define HUGETLBFS_MAGIC 0x958458f6
2787 static long gethugepagesize(const char *path
)
2793 ret
= statfs(path
, &fs
);
2794 } while (ret
!= 0 && errno
== EINTR
);
2801 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2802 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2807 static void *file_ram_alloc(RAMBlock
*block
,
2817 unsigned long hpagesize
;
2819 hpagesize
= gethugepagesize(path
);
2824 if (memory
< hpagesize
) {
2828 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2829 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2833 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2837 fd
= mkstemp(filename
);
2839 perror("unable to create backing store for hugepages");
2846 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2849 * ftruncate is not supported by hugetlbfs in older
2850 * hosts, so don't bother bailing out on errors.
2851 * If anything goes wrong with it under other filesystems,
2854 if (ftruncate(fd
, memory
))
2855 perror("ftruncate");
2858 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2859 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2860 * to sidestep this quirk.
2862 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2863 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2865 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2867 if (area
== MAP_FAILED
) {
2868 perror("file_ram_alloc: can't mmap RAM pages");
2877 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2879 RAMBlock
*block
, *next_block
;
2880 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2882 if (QLIST_EMPTY(&ram_list
.blocks
))
2885 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2886 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2888 end
= block
->offset
+ block
->length
;
2890 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2891 if (next_block
->offset
>= end
) {
2892 next
= MIN(next
, next_block
->offset
);
2895 if (next
- end
>= size
&& next
- end
< mingap
) {
2897 mingap
= next
- end
;
2901 if (offset
== RAM_ADDR_MAX
) {
2902 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2910 static ram_addr_t
last_ram_offset(void)
2913 ram_addr_t last
= 0;
2915 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2916 last
= MAX(last
, block
->offset
+ block
->length
);
2921 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
2922 ram_addr_t size
, void *host
,
2925 RAMBlock
*new_block
, *block
;
2927 size
= TARGET_PAGE_ALIGN(size
);
2928 new_block
= g_malloc0(sizeof(*new_block
));
2930 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2931 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2933 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2937 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2939 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2940 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2941 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2947 new_block
->offset
= find_ram_offset(size
);
2949 new_block
->host
= host
;
2950 new_block
->flags
|= RAM_PREALLOC_MASK
;
2953 #if defined (__linux__) && !defined(TARGET_S390X)
2954 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2955 if (!new_block
->host
) {
2956 new_block
->host
= qemu_vmalloc(size
);
2957 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2960 fprintf(stderr
, "-mem-path option unsupported\n");
2964 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2965 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2966 an system defined value, which is at least 256GB. Larger systems
2967 have larger values. We put the guest between the end of data
2968 segment (system break) and this value. We use 32GB as a base to
2969 have enough room for the system break to grow. */
2970 new_block
->host
= mmap((void*)0x800000000, size
,
2971 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2972 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2973 if (new_block
->host
== MAP_FAILED
) {
2974 fprintf(stderr
, "Allocating RAM failed\n");
2978 if (xen_enabled()) {
2979 xen_ram_alloc(new_block
->offset
, size
, mr
);
2981 new_block
->host
= qemu_vmalloc(size
);
2984 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2987 new_block
->length
= size
;
2989 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2991 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2992 last_ram_offset() >> TARGET_PAGE_BITS
);
2993 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2994 0xff, size
>> TARGET_PAGE_BITS
);
2997 kvm_setup_guest_memory(new_block
->host
, size
);
2999 return new_block
->offset
;
3002 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
,
3005 return qemu_ram_alloc_from_ptr(dev
, name
, size
, NULL
, mr
);
3008 void qemu_ram_free_from_ptr(ram_addr_t addr
)
3012 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3013 if (addr
== block
->offset
) {
3014 QLIST_REMOVE(block
, next
);
3021 void qemu_ram_free(ram_addr_t addr
)
3025 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3026 if (addr
== block
->offset
) {
3027 QLIST_REMOVE(block
, next
);
3028 if (block
->flags
& RAM_PREALLOC_MASK
) {
3030 } else if (mem_path
) {
3031 #if defined (__linux__) && !defined(TARGET_S390X)
3033 munmap(block
->host
, block
->length
);
3036 qemu_vfree(block
->host
);
3042 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3043 munmap(block
->host
, block
->length
);
3045 if (xen_enabled()) {
3046 xen_invalidate_map_cache_entry(block
->host
);
3048 qemu_vfree(block
->host
);
3060 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
3067 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3068 offset
= addr
- block
->offset
;
3069 if (offset
< block
->length
) {
3070 vaddr
= block
->host
+ offset
;
3071 if (block
->flags
& RAM_PREALLOC_MASK
) {
3075 munmap(vaddr
, length
);
3077 #if defined(__linux__) && !defined(TARGET_S390X)
3080 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
3083 flags
|= MAP_PRIVATE
;
3085 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3086 flags
, block
->fd
, offset
);
3088 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3089 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3096 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3097 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
3098 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
3101 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3102 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3106 if (area
!= vaddr
) {
3107 fprintf(stderr
, "Could not remap addr: "
3108 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
3112 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3118 #endif /* !_WIN32 */
3120 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3121 With the exception of the softmmu code in this file, this should
3122 only be used for local memory (e.g. video ram) that the device owns,
3123 and knows it isn't going to access beyond the end of the block.
3125 It should not be used for general purpose DMA.
3126 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3128 void *qemu_get_ram_ptr(ram_addr_t addr
)
3132 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3133 if (addr
- block
->offset
< block
->length
) {
3134 /* Move this entry to to start of the list. */
3135 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3136 QLIST_REMOVE(block
, next
);
3137 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3139 if (xen_enabled()) {
3140 /* We need to check if the requested address is in the RAM
3141 * because we don't want to map the entire memory in QEMU.
3142 * In that case just map until the end of the page.
3144 if (block
->offset
== 0) {
3145 return xen_map_cache(addr
, 0, 0);
3146 } else if (block
->host
== NULL
) {
3148 xen_map_cache(block
->offset
, block
->length
, 1);
3151 return block
->host
+ (addr
- block
->offset
);
3155 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3161 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3162 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3164 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3168 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3169 if (addr
- block
->offset
< block
->length
) {
3170 if (xen_enabled()) {
3171 /* We need to check if the requested address is in the RAM
3172 * because we don't want to map the entire memory in QEMU.
3173 * In that case just map until the end of the page.
3175 if (block
->offset
== 0) {
3176 return xen_map_cache(addr
, 0, 0);
3177 } else if (block
->host
== NULL
) {
3179 xen_map_cache(block
->offset
, block
->length
, 1);
3182 return block
->host
+ (addr
- block
->offset
);
3186 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3192 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3193 * but takes a size argument */
3194 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3199 if (xen_enabled()) {
3200 return xen_map_cache(addr
, *size
, 1);
3204 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3205 if (addr
- block
->offset
< block
->length
) {
3206 if (addr
- block
->offset
+ *size
> block
->length
)
3207 *size
= block
->length
- addr
+ block
->offset
;
3208 return block
->host
+ (addr
- block
->offset
);
3212 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3217 void qemu_put_ram_ptr(void *addr
)
3219 trace_qemu_put_ram_ptr(addr
);
3222 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3225 uint8_t *host
= ptr
;
3227 if (xen_enabled()) {
3228 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3232 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3233 /* This case append when the block is not mapped. */
3234 if (block
->host
== NULL
) {
3237 if (host
- block
->host
< block
->length
) {
3238 *ram_addr
= block
->offset
+ (host
- block
->host
);
3246 /* Some of the softmmu routines need to translate from a host pointer
3247 (typically a TLB entry) back to a ram offset. */
3248 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3250 ram_addr_t ram_addr
;
3252 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3253 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3259 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3261 #ifdef DEBUG_UNASSIGNED
3262 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3264 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3265 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 1);
3270 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3272 #ifdef DEBUG_UNASSIGNED
3273 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3275 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3276 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 2);
3281 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3283 #ifdef DEBUG_UNASSIGNED
3284 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3286 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3287 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 4);
3292 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3294 #ifdef DEBUG_UNASSIGNED
3295 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3297 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3298 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 1);
3302 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3304 #ifdef DEBUG_UNASSIGNED
3305 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3307 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3308 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 2);
3312 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3314 #ifdef DEBUG_UNASSIGNED
3315 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3317 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3318 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 4);
3322 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3323 unassigned_mem_readb
,
3324 unassigned_mem_readw
,
3325 unassigned_mem_readl
,
3328 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3329 unassigned_mem_writeb
,
3330 unassigned_mem_writew
,
3331 unassigned_mem_writel
,
3334 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3338 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3339 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3340 #if !defined(CONFIG_USER_ONLY)
3341 tb_invalidate_phys_page_fast(ram_addr
, 1);
3342 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3345 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3346 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3347 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3348 /* we remove the notdirty callback only if the code has been
3350 if (dirty_flags
== 0xff)
3351 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3354 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3358 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3359 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3360 #if !defined(CONFIG_USER_ONLY)
3361 tb_invalidate_phys_page_fast(ram_addr
, 2);
3362 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3365 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3366 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3367 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3368 /* we remove the notdirty callback only if the code has been
3370 if (dirty_flags
== 0xff)
3371 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3374 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3378 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3379 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3380 #if !defined(CONFIG_USER_ONLY)
3381 tb_invalidate_phys_page_fast(ram_addr
, 4);
3382 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3385 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3386 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3387 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3388 /* we remove the notdirty callback only if the code has been
3390 if (dirty_flags
== 0xff)
3391 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3394 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3395 NULL
, /* never used */
3396 NULL
, /* never used */
3397 NULL
, /* never used */
3400 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3401 notdirty_mem_writeb
,
3402 notdirty_mem_writew
,
3403 notdirty_mem_writel
,
3406 /* Generate a debug exception if a watchpoint has been hit. */
3407 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3409 CPUState
*env
= cpu_single_env
;
3410 target_ulong pc
, cs_base
;
3411 TranslationBlock
*tb
;
3416 if (env
->watchpoint_hit
) {
3417 /* We re-entered the check after replacing the TB. Now raise
3418 * the debug interrupt so that is will trigger after the
3419 * current instruction. */
3420 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3423 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3424 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3425 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3426 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3427 wp
->flags
|= BP_WATCHPOINT_HIT
;
3428 if (!env
->watchpoint_hit
) {
3429 env
->watchpoint_hit
= wp
;
3430 tb
= tb_find_pc(env
->mem_io_pc
);
3432 cpu_abort(env
, "check_watchpoint: could not find TB for "
3433 "pc=%p", (void *)env
->mem_io_pc
);
3435 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3436 tb_phys_invalidate(tb
, -1);
3437 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3438 env
->exception_index
= EXCP_DEBUG
;
3440 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3441 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3443 cpu_resume_from_signal(env
, NULL
);
3446 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3451 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3452 so these check for a hit then pass through to the normal out-of-line
3454 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3456 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3457 return ldub_phys(addr
);
3460 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3462 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3463 return lduw_phys(addr
);
3466 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3468 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3469 return ldl_phys(addr
);
3472 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3475 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3476 stb_phys(addr
, val
);
3479 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3482 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3483 stw_phys(addr
, val
);
3486 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3489 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3490 stl_phys(addr
, val
);
3493 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3499 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3505 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3506 target_phys_addr_t addr
,
3509 unsigned int idx
= SUBPAGE_IDX(addr
);
3510 #if defined(DEBUG_SUBPAGE)
3511 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3512 mmio
, len
, addr
, idx
);
3515 addr
+= mmio
->region_offset
[idx
];
3516 idx
= mmio
->sub_io_index
[idx
];
3517 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3520 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3521 uint32_t value
, unsigned int len
)
3523 unsigned int idx
= SUBPAGE_IDX(addr
);
3524 #if defined(DEBUG_SUBPAGE)
3525 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3526 __func__
, mmio
, len
, addr
, idx
, value
);
3529 addr
+= mmio
->region_offset
[idx
];
3530 idx
= mmio
->sub_io_index
[idx
];
3531 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3534 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3536 return subpage_readlen(opaque
, addr
, 0);
3539 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3542 subpage_writelen(opaque
, addr
, value
, 0);
3545 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3547 return subpage_readlen(opaque
, addr
, 1);
3550 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3553 subpage_writelen(opaque
, addr
, value
, 1);
3556 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3558 return subpage_readlen(opaque
, addr
, 2);
3561 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3564 subpage_writelen(opaque
, addr
, value
, 2);
3567 static CPUReadMemoryFunc
* const subpage_read
[] = {
3573 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3579 static uint32_t subpage_ram_readb(void *opaque
, target_phys_addr_t addr
)
3581 ram_addr_t raddr
= addr
;
3582 void *ptr
= qemu_get_ram_ptr(raddr
);
3586 static void subpage_ram_writeb(void *opaque
, target_phys_addr_t addr
,
3589 ram_addr_t raddr
= addr
;
3590 void *ptr
= qemu_get_ram_ptr(raddr
);
3594 static uint32_t subpage_ram_readw(void *opaque
, target_phys_addr_t addr
)
3596 ram_addr_t raddr
= addr
;
3597 void *ptr
= qemu_get_ram_ptr(raddr
);
3601 static void subpage_ram_writew(void *opaque
, target_phys_addr_t addr
,
3604 ram_addr_t raddr
= addr
;
3605 void *ptr
= qemu_get_ram_ptr(raddr
);
3609 static uint32_t subpage_ram_readl(void *opaque
, target_phys_addr_t addr
)
3611 ram_addr_t raddr
= addr
;
3612 void *ptr
= qemu_get_ram_ptr(raddr
);
3616 static void subpage_ram_writel(void *opaque
, target_phys_addr_t addr
,
3619 ram_addr_t raddr
= addr
;
3620 void *ptr
= qemu_get_ram_ptr(raddr
);
3624 static CPUReadMemoryFunc
* const subpage_ram_read
[] = {
3630 static CPUWriteMemoryFunc
* const subpage_ram_write
[] = {
3631 &subpage_ram_writeb
,
3632 &subpage_ram_writew
,
3633 &subpage_ram_writel
,
3636 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3637 ram_addr_t memory
, ram_addr_t region_offset
)
3641 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3643 idx
= SUBPAGE_IDX(start
);
3644 eidx
= SUBPAGE_IDX(end
);
3645 #if defined(DEBUG_SUBPAGE)
3646 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3647 mmio
, start
, end
, idx
, eidx
, memory
);
3649 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
3650 memory
= IO_MEM_SUBPAGE_RAM
;
3652 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3653 for (; idx
<= eidx
; idx
++) {
3654 mmio
->sub_io_index
[idx
] = memory
;
3655 mmio
->region_offset
[idx
] = region_offset
;
3661 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3662 ram_addr_t orig_memory
,
3663 ram_addr_t region_offset
)
3668 mmio
= g_malloc0(sizeof(subpage_t
));
3671 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
,
3672 DEVICE_NATIVE_ENDIAN
);
3673 #if defined(DEBUG_SUBPAGE)
3674 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3675 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3677 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3678 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3683 static int get_free_io_mem_idx(void)
3687 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3688 if (!io_mem_used
[i
]) {
3692 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3697 * Usually, devices operate in little endian mode. There are devices out
3698 * there that operate in big endian too. Each device gets byte swapped
3699 * mmio if plugged onto a CPU that does the other endianness.
3709 typedef struct SwapEndianContainer
{
3710 CPUReadMemoryFunc
*read
[3];
3711 CPUWriteMemoryFunc
*write
[3];
3713 } SwapEndianContainer
;
3715 static uint32_t swapendian_mem_readb (void *opaque
, target_phys_addr_t addr
)
3718 SwapEndianContainer
*c
= opaque
;
3719 val
= c
->read
[0](c
->opaque
, addr
);
3723 static uint32_t swapendian_mem_readw(void *opaque
, target_phys_addr_t addr
)
3726 SwapEndianContainer
*c
= opaque
;
3727 val
= bswap16(c
->read
[1](c
->opaque
, addr
));
3731 static uint32_t swapendian_mem_readl(void *opaque
, target_phys_addr_t addr
)
3734 SwapEndianContainer
*c
= opaque
;
3735 val
= bswap32(c
->read
[2](c
->opaque
, addr
));
3739 static CPUReadMemoryFunc
* const swapendian_readfn
[3]={
3740 swapendian_mem_readb
,
3741 swapendian_mem_readw
,
3742 swapendian_mem_readl
3745 static void swapendian_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3748 SwapEndianContainer
*c
= opaque
;
3749 c
->write
[0](c
->opaque
, addr
, val
);
3752 static void swapendian_mem_writew(void *opaque
, target_phys_addr_t addr
,
3755 SwapEndianContainer
*c
= opaque
;
3756 c
->write
[1](c
->opaque
, addr
, bswap16(val
));
3759 static void swapendian_mem_writel(void *opaque
, target_phys_addr_t addr
,
3762 SwapEndianContainer
*c
= opaque
;
3763 c
->write
[2](c
->opaque
, addr
, bswap32(val
));
3766 static CPUWriteMemoryFunc
* const swapendian_writefn
[3]={
3767 swapendian_mem_writeb
,
3768 swapendian_mem_writew
,
3769 swapendian_mem_writel
3772 static void swapendian_init(int io_index
)
3774 SwapEndianContainer
*c
= g_malloc(sizeof(SwapEndianContainer
));
3777 /* Swap mmio for big endian targets */
3778 c
->opaque
= io_mem_opaque
[io_index
];
3779 for (i
= 0; i
< 3; i
++) {
3780 c
->read
[i
] = io_mem_read
[io_index
][i
];
3781 c
->write
[i
] = io_mem_write
[io_index
][i
];
3783 io_mem_read
[io_index
][i
] = swapendian_readfn
[i
];
3784 io_mem_write
[io_index
][i
] = swapendian_writefn
[i
];
3786 io_mem_opaque
[io_index
] = c
;
3789 static void swapendian_del(int io_index
)
3791 if (io_mem_read
[io_index
][0] == swapendian_readfn
[0]) {
3792 g_free(io_mem_opaque
[io_index
]);
3796 /* mem_read and mem_write are arrays of functions containing the
3797 function to access byte (index 0), word (index 1) and dword (index
3798 2). Functions can be omitted with a NULL function pointer.
3799 If io_index is non zero, the corresponding io zone is
3800 modified. If it is zero, a new io zone is allocated. The return
3801 value can be used with cpu_register_physical_memory(). (-1) is
3802 returned if error. */
3803 static int cpu_register_io_memory_fixed(int io_index
,
3804 CPUReadMemoryFunc
* const *mem_read
,
3805 CPUWriteMemoryFunc
* const *mem_write
,
3806 void *opaque
, enum device_endian endian
)
3810 if (io_index
<= 0) {
3811 io_index
= get_free_io_mem_idx();
3815 io_index
>>= IO_MEM_SHIFT
;
3816 if (io_index
>= IO_MEM_NB_ENTRIES
)
3820 for (i
= 0; i
< 3; ++i
) {
3821 io_mem_read
[io_index
][i
]
3822 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3824 for (i
= 0; i
< 3; ++i
) {
3825 io_mem_write
[io_index
][i
]
3826 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3828 io_mem_opaque
[io_index
] = opaque
;
3831 case DEVICE_BIG_ENDIAN
:
3832 #ifndef TARGET_WORDS_BIGENDIAN
3833 swapendian_init(io_index
);
3836 case DEVICE_LITTLE_ENDIAN
:
3837 #ifdef TARGET_WORDS_BIGENDIAN
3838 swapendian_init(io_index
);
3841 case DEVICE_NATIVE_ENDIAN
:
3846 return (io_index
<< IO_MEM_SHIFT
);
3849 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3850 CPUWriteMemoryFunc
* const *mem_write
,
3851 void *opaque
, enum device_endian endian
)
3853 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
, endian
);
3856 void cpu_unregister_io_memory(int io_table_address
)
3859 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3861 swapendian_del(io_index
);
3863 for (i
=0;i
< 3; i
++) {
3864 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3865 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3867 io_mem_opaque
[io_index
] = NULL
;
3868 io_mem_used
[io_index
] = 0;
3871 static void io_mem_init(void)
3875 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3876 unassigned_mem_write
, NULL
,
3877 DEVICE_NATIVE_ENDIAN
);
3878 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3879 unassigned_mem_write
, NULL
,
3880 DEVICE_NATIVE_ENDIAN
);
3881 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3882 notdirty_mem_write
, NULL
,
3883 DEVICE_NATIVE_ENDIAN
);
3884 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM
, subpage_ram_read
,
3885 subpage_ram_write
, NULL
,
3886 DEVICE_NATIVE_ENDIAN
);
3890 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3891 watch_mem_write
, NULL
,
3892 DEVICE_NATIVE_ENDIAN
);
3895 static void memory_map_init(void)
3897 system_memory
= g_malloc(sizeof(*system_memory
));
3898 memory_region_init(system_memory
, "system", INT64_MAX
);
3899 set_system_memory_map(system_memory
);
3901 system_io
= g_malloc(sizeof(*system_io
));
3902 memory_region_init(system_io
, "io", 65536);
3903 set_system_io_map(system_io
);
3906 MemoryRegion
*get_system_memory(void)
3908 return system_memory
;
3911 MemoryRegion
*get_system_io(void)
3916 #endif /* !defined(CONFIG_USER_ONLY) */
3918 /* physical memory access (slow version, mainly for debug) */
3919 #if defined(CONFIG_USER_ONLY)
3920 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3921 uint8_t *buf
, int len
, int is_write
)
3928 page
= addr
& TARGET_PAGE_MASK
;
3929 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3932 flags
= page_get_flags(page
);
3933 if (!(flags
& PAGE_VALID
))
3936 if (!(flags
& PAGE_WRITE
))
3938 /* XXX: this code should not depend on lock_user */
3939 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3942 unlock_user(p
, addr
, l
);
3944 if (!(flags
& PAGE_READ
))
3946 /* XXX: this code should not depend on lock_user */
3947 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3950 unlock_user(p
, addr
, 0);
3960 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3961 int len
, int is_write
)
3966 target_phys_addr_t page
;
3971 page
= addr
& TARGET_PAGE_MASK
;
3972 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3975 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3977 pd
= IO_MEM_UNASSIGNED
;
3979 pd
= p
->phys_offset
;
3983 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3984 target_phys_addr_t addr1
= addr
;
3985 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3987 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3988 /* XXX: could force cpu_single_env to NULL to avoid
3990 if (l
>= 4 && ((addr1
& 3) == 0)) {
3991 /* 32 bit write access */
3993 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3995 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3996 /* 16 bit write access */
3998 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
4001 /* 8 bit write access */
4003 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
4008 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4010 ptr
= qemu_get_ram_ptr(addr1
);
4011 memcpy(ptr
, buf
, l
);
4012 if (!cpu_physical_memory_is_dirty(addr1
)) {
4013 /* invalidate code */
4014 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
4016 cpu_physical_memory_set_dirty_flags(
4017 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4019 qemu_put_ram_ptr(ptr
);
4022 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4023 !(pd
& IO_MEM_ROMD
)) {
4024 target_phys_addr_t addr1
= addr
;
4026 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4028 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4029 if (l
>= 4 && ((addr1
& 3) == 0)) {
4030 /* 32 bit read access */
4031 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
4034 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
4035 /* 16 bit read access */
4036 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
4040 /* 8 bit read access */
4041 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
4047 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
4048 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
4049 qemu_put_ram_ptr(ptr
);
4058 /* used for ROM loading : can write in RAM and ROM */
4059 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
4060 const uint8_t *buf
, int len
)
4064 target_phys_addr_t page
;
4069 page
= addr
& TARGET_PAGE_MASK
;
4070 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4073 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
4075 pd
= IO_MEM_UNASSIGNED
;
4077 pd
= p
->phys_offset
;
4080 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
4081 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
4082 !(pd
& IO_MEM_ROMD
)) {
4085 unsigned long addr1
;
4086 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4088 ptr
= qemu_get_ram_ptr(addr1
);
4089 memcpy(ptr
, buf
, l
);
4090 qemu_put_ram_ptr(ptr
);
4100 target_phys_addr_t addr
;
4101 target_phys_addr_t len
;
4104 static BounceBuffer bounce
;
4106 typedef struct MapClient
{
4108 void (*callback
)(void *opaque
);
4109 QLIST_ENTRY(MapClient
) link
;
4112 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
4113 = QLIST_HEAD_INITIALIZER(map_client_list
);
4115 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
4117 MapClient
*client
= g_malloc(sizeof(*client
));
4119 client
->opaque
= opaque
;
4120 client
->callback
= callback
;
4121 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
4125 void cpu_unregister_map_client(void *_client
)
4127 MapClient
*client
= (MapClient
*)_client
;
4129 QLIST_REMOVE(client
, link
);
4133 static void cpu_notify_map_clients(void)
4137 while (!QLIST_EMPTY(&map_client_list
)) {
4138 client
= QLIST_FIRST(&map_client_list
);
4139 client
->callback(client
->opaque
);
4140 cpu_unregister_map_client(client
);
4144 /* Map a physical memory region into a host virtual address.
4145 * May map a subset of the requested range, given by and returned in *plen.
4146 * May return NULL if resources needed to perform the mapping are exhausted.
4147 * Use only for reads OR writes - not for read-modify-write operations.
4148 * Use cpu_register_map_client() to know when retrying the map operation is
4149 * likely to succeed.
4151 void *cpu_physical_memory_map(target_phys_addr_t addr
,
4152 target_phys_addr_t
*plen
,
4155 target_phys_addr_t len
= *plen
;
4156 target_phys_addr_t todo
= 0;
4158 target_phys_addr_t page
;
4161 ram_addr_t raddr
= RAM_ADDR_MAX
;
4166 page
= addr
& TARGET_PAGE_MASK
;
4167 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4170 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
4172 pd
= IO_MEM_UNASSIGNED
;
4174 pd
= p
->phys_offset
;
4177 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4178 if (todo
|| bounce
.buffer
) {
4181 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
4185 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
4189 return bounce
.buffer
;
4192 raddr
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4200 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
4205 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4206 * Will also mark the memory as dirty if is_write == 1. access_len gives
4207 * the amount of memory that was actually read or written by the caller.
4209 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
4210 int is_write
, target_phys_addr_t access_len
)
4212 if (buffer
!= bounce
.buffer
) {
4214 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
4215 while (access_len
) {
4217 l
= TARGET_PAGE_SIZE
;
4220 if (!cpu_physical_memory_is_dirty(addr1
)) {
4221 /* invalidate code */
4222 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
4224 cpu_physical_memory_set_dirty_flags(
4225 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4231 if (xen_enabled()) {
4232 xen_invalidate_map_cache_entry(buffer
);
4237 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
4239 qemu_vfree(bounce
.buffer
);
4240 bounce
.buffer
= NULL
;
4241 cpu_notify_map_clients();
4244 /* warning: addr must be aligned */
4245 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
4246 enum device_endian endian
)
4254 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4256 pd
= IO_MEM_UNASSIGNED
;
4258 pd
= p
->phys_offset
;
4261 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4262 !(pd
& IO_MEM_ROMD
)) {
4264 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4266 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4267 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4268 #if defined(TARGET_WORDS_BIGENDIAN)
4269 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4273 if (endian
== DEVICE_BIG_ENDIAN
) {
4279 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4280 (addr
& ~TARGET_PAGE_MASK
);
4282 case DEVICE_LITTLE_ENDIAN
:
4283 val
= ldl_le_p(ptr
);
4285 case DEVICE_BIG_ENDIAN
:
4286 val
= ldl_be_p(ptr
);
4296 uint32_t ldl_phys(target_phys_addr_t addr
)
4298 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4301 uint32_t ldl_le_phys(target_phys_addr_t addr
)
4303 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4306 uint32_t ldl_be_phys(target_phys_addr_t addr
)
4308 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4311 /* warning: addr must be aligned */
4312 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
4313 enum device_endian endian
)
4321 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4323 pd
= IO_MEM_UNASSIGNED
;
4325 pd
= p
->phys_offset
;
4328 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4329 !(pd
& IO_MEM_ROMD
)) {
4331 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4333 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4335 /* XXX This is broken when device endian != cpu endian.
4336 Fix and add "endian" variable check */
4337 #ifdef TARGET_WORDS_BIGENDIAN
4338 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4339 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4341 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4342 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4346 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4347 (addr
& ~TARGET_PAGE_MASK
);
4349 case DEVICE_LITTLE_ENDIAN
:
4350 val
= ldq_le_p(ptr
);
4352 case DEVICE_BIG_ENDIAN
:
4353 val
= ldq_be_p(ptr
);
4363 uint64_t ldq_phys(target_phys_addr_t addr
)
4365 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4368 uint64_t ldq_le_phys(target_phys_addr_t addr
)
4370 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4373 uint64_t ldq_be_phys(target_phys_addr_t addr
)
4375 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4379 uint32_t ldub_phys(target_phys_addr_t addr
)
4382 cpu_physical_memory_read(addr
, &val
, 1);
4386 /* warning: addr must be aligned */
4387 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
4388 enum device_endian endian
)
4396 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4398 pd
= IO_MEM_UNASSIGNED
;
4400 pd
= p
->phys_offset
;
4403 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4404 !(pd
& IO_MEM_ROMD
)) {
4406 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4408 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4409 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4410 #if defined(TARGET_WORDS_BIGENDIAN)
4411 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4415 if (endian
== DEVICE_BIG_ENDIAN
) {
4421 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4422 (addr
& ~TARGET_PAGE_MASK
);
4424 case DEVICE_LITTLE_ENDIAN
:
4425 val
= lduw_le_p(ptr
);
4427 case DEVICE_BIG_ENDIAN
:
4428 val
= lduw_be_p(ptr
);
4438 uint32_t lduw_phys(target_phys_addr_t addr
)
4440 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4443 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4445 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4448 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4450 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4453 /* warning: addr must be aligned. The ram page is not masked as dirty
4454 and the code inside is not invalidated. It is useful if the dirty
4455 bits are used to track modified PTEs */
4456 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4463 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4465 pd
= IO_MEM_UNASSIGNED
;
4467 pd
= p
->phys_offset
;
4470 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4471 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4473 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4474 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4476 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4477 ptr
= qemu_get_ram_ptr(addr1
);
4480 if (unlikely(in_migration
)) {
4481 if (!cpu_physical_memory_is_dirty(addr1
)) {
4482 /* invalidate code */
4483 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4485 cpu_physical_memory_set_dirty_flags(
4486 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4492 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4499 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4501 pd
= IO_MEM_UNASSIGNED
;
4503 pd
= p
->phys_offset
;
4506 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4507 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4509 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4510 #ifdef TARGET_WORDS_BIGENDIAN
4511 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4512 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4514 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4515 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4518 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4519 (addr
& ~TARGET_PAGE_MASK
);
4524 /* warning: addr must be aligned */
4525 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4526 enum device_endian endian
)
4533 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4535 pd
= IO_MEM_UNASSIGNED
;
4537 pd
= p
->phys_offset
;
4540 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4541 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4543 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4544 #if defined(TARGET_WORDS_BIGENDIAN)
4545 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4549 if (endian
== DEVICE_BIG_ENDIAN
) {
4553 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4555 unsigned long addr1
;
4556 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4558 ptr
= qemu_get_ram_ptr(addr1
);
4560 case DEVICE_LITTLE_ENDIAN
:
4563 case DEVICE_BIG_ENDIAN
:
4570 if (!cpu_physical_memory_is_dirty(addr1
)) {
4571 /* invalidate code */
4572 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4574 cpu_physical_memory_set_dirty_flags(addr1
,
4575 (0xff & ~CODE_DIRTY_FLAG
));
4580 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4582 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4585 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4587 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4590 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4592 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4596 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4599 cpu_physical_memory_write(addr
, &v
, 1);
4602 /* warning: addr must be aligned */
4603 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4604 enum device_endian endian
)
4611 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4613 pd
= IO_MEM_UNASSIGNED
;
4615 pd
= p
->phys_offset
;
4618 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4619 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4621 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4622 #if defined(TARGET_WORDS_BIGENDIAN)
4623 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4627 if (endian
== DEVICE_BIG_ENDIAN
) {
4631 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4633 unsigned long addr1
;
4634 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4636 ptr
= qemu_get_ram_ptr(addr1
);
4638 case DEVICE_LITTLE_ENDIAN
:
4641 case DEVICE_BIG_ENDIAN
:
4648 if (!cpu_physical_memory_is_dirty(addr1
)) {
4649 /* invalidate code */
4650 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4652 cpu_physical_memory_set_dirty_flags(addr1
,
4653 (0xff & ~CODE_DIRTY_FLAG
));
4658 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4660 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4663 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4665 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4668 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4670 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4674 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4677 cpu_physical_memory_write(addr
, &val
, 8);
4680 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4682 val
= cpu_to_le64(val
);
4683 cpu_physical_memory_write(addr
, &val
, 8);
4686 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4688 val
= cpu_to_be64(val
);
4689 cpu_physical_memory_write(addr
, &val
, 8);
4692 /* virtual memory access for debug (includes writing to ROM) */
4693 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4694 uint8_t *buf
, int len
, int is_write
)
4697 target_phys_addr_t phys_addr
;
4701 page
= addr
& TARGET_PAGE_MASK
;
4702 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4703 /* if no physical page mapped, return an error */
4704 if (phys_addr
== -1)
4706 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4709 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4711 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4713 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4722 /* in deterministic execution mode, instructions doing device I/Os
4723 must be at the end of the TB */
4724 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4726 TranslationBlock
*tb
;
4728 target_ulong pc
, cs_base
;
4731 tb
= tb_find_pc((unsigned long)retaddr
);
4733 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4736 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4737 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4738 /* Calculate how many instructions had been executed before the fault
4740 n
= n
- env
->icount_decr
.u16
.low
;
4741 /* Generate a new TB ending on the I/O insn. */
4743 /* On MIPS and SH, delay slot instructions can only be restarted if
4744 they were already the first instruction in the TB. If this is not
4745 the first instruction in a TB then re-execute the preceding
4747 #if defined(TARGET_MIPS)
4748 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4749 env
->active_tc
.PC
-= 4;
4750 env
->icount_decr
.u16
.low
++;
4751 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4753 #elif defined(TARGET_SH4)
4754 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4757 env
->icount_decr
.u16
.low
++;
4758 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4761 /* This should never happen. */
4762 if (n
> CF_COUNT_MASK
)
4763 cpu_abort(env
, "TB too big during recompile");
4765 cflags
= n
| CF_LAST_IO
;
4767 cs_base
= tb
->cs_base
;
4769 tb_phys_invalidate(tb
, -1);
4770 /* FIXME: In theory this could raise an exception. In practice
4771 we have already translated the block once so it's probably ok. */
4772 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4773 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4774 the first in the TB) then we end up generating a whole new TB and
4775 repeating the fault, which is horribly inefficient.
4776 Better would be to execute just this insn uncached, or generate a
4778 cpu_resume_from_signal(env
, NULL
);
4781 #if !defined(CONFIG_USER_ONLY)
4783 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4785 int i
, target_code_size
, max_target_code_size
;
4786 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4787 TranslationBlock
*tb
;
4789 target_code_size
= 0;
4790 max_target_code_size
= 0;
4792 direct_jmp_count
= 0;
4793 direct_jmp2_count
= 0;
4794 for(i
= 0; i
< nb_tbs
; i
++) {
4796 target_code_size
+= tb
->size
;
4797 if (tb
->size
> max_target_code_size
)
4798 max_target_code_size
= tb
->size
;
4799 if (tb
->page_addr
[1] != -1)
4801 if (tb
->tb_next_offset
[0] != 0xffff) {
4803 if (tb
->tb_next_offset
[1] != 0xffff) {
4804 direct_jmp2_count
++;
4808 /* XXX: avoid using doubles ? */
4809 cpu_fprintf(f
, "Translation buffer state:\n");
4810 cpu_fprintf(f
, "gen code size %td/%ld\n",
4811 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4812 cpu_fprintf(f
, "TB count %d/%d\n",
4813 nb_tbs
, code_gen_max_blocks
);
4814 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4815 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4816 max_target_code_size
);
4817 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4818 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4819 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4820 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4822 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4823 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4825 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4827 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4828 cpu_fprintf(f
, "\nStatistics:\n");
4829 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4830 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4831 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4832 tcg_dump_info(f
, cpu_fprintf
);
4835 #define MMUSUFFIX _cmmu
4837 #define GETPC() NULL
4838 #define env cpu_single_env
4839 #define SOFTMMU_CODE_ACCESS
4842 #include "softmmu_template.h"
4845 #include "softmmu_template.h"
4848 #include "softmmu_template.h"
4851 #include "softmmu_template.h"