2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
30 #include "cache-utils.h"
32 #if !defined(TARGET_IA64)
41 #include "qemu-timer.h"
42 #if defined(CONFIG_USER_ONLY)
45 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
46 #include <sys/param.h>
47 #if __FreeBSD_version >= 700104
48 #define HAVE_KINFO_GETVMMAP
49 #define sigqueue sigqueue_freebsd /* avoid redefinition */
52 #include <machine/profile.h>
60 #else /* !CONFIG_USER_ONLY */
61 #include "xen-mapcache.h"
64 //#define DEBUG_TB_INVALIDATE
67 //#define DEBUG_UNASSIGNED
69 /* make various TB consistency checks */
70 //#define DEBUG_TB_CHECK
71 //#define DEBUG_TLB_CHECK
73 //#define DEBUG_IOPORT
74 //#define DEBUG_SUBPAGE
76 #if !defined(CONFIG_USER_ONLY)
77 /* TB consistency checks only implemented for usermode emulation. */
81 #define SMC_BITMAP_USE_THRESHOLD 10
83 static TranslationBlock
*tbs
;
84 static int code_gen_max_blocks
;
85 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
87 /* any access to the tbs or the page table must use this lock */
88 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
90 #if defined(__arm__) || defined(__sparc_v9__)
91 /* The prologue must be reachable with a direct jump. ARM and Sparc64
92 have limited branch ranges (possibly also PPC) so place it in a
93 section close to code segment. */
94 #define code_gen_section \
95 __attribute__((__section__(".gen_code"))) \
96 __attribute__((aligned (32)))
98 /* Maximum alignment for Win32 is 16. */
99 #define code_gen_section \
100 __attribute__((aligned (16)))
102 #define code_gen_section \
103 __attribute__((aligned (32)))
106 uint8_t code_gen_prologue
[1024] code_gen_section
;
107 static uint8_t *code_gen_buffer
;
108 static unsigned long code_gen_buffer_size
;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size
;
111 static uint8_t *code_gen_ptr
;
113 #if !defined(CONFIG_USER_ONLY)
115 static int in_migration
;
117 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
) };
121 /* current CPU in the current thread. It is only valid inside
123 CPUState
*cpu_single_env
;
124 /* 0 = Do not count executed instructions.
125 1 = Precise instruction counting.
126 2 = Adaptive rate instruction counting. */
128 /* Current instruction counter. While executing translated code this may
129 include some instructions that have not yet been executed. */
132 typedef struct PageDesc
{
133 /* list of TBs intersecting this ram page */
134 TranslationBlock
*first_tb
;
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count
;
138 uint8_t *code_bitmap
;
139 #if defined(CONFIG_USER_ONLY)
144 /* In system mode we want L1_MAP to be based on ram offsets,
145 while in user mode we want it to be based on virtual addresses. */
146 #if !defined(CONFIG_USER_ONLY)
147 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
150 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
156 /* Size of the L2 (and L3, etc) page tables. */
158 #define L2_SIZE (1 << L2_BITS)
160 /* The bits remaining after N lower levels of page tables. */
161 #define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163 #define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166 /* Size of the L1 page table. Avoid silly small sizes. */
167 #if P_L1_BITS_REM < 4
168 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
170 #define P_L1_BITS P_L1_BITS_REM
173 #if V_L1_BITS_REM < 4
174 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
176 #define V_L1_BITS V_L1_BITS_REM
179 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
182 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
185 unsigned long qemu_real_host_page_size
;
186 unsigned long qemu_host_page_bits
;
187 unsigned long qemu_host_page_size
;
188 unsigned long qemu_host_page_mask
;
190 /* This is a multi-level map on the virtual address space.
191 The bottom level has pointers to PageDesc. */
192 static void *l1_map
[V_L1_SIZE
];
194 #if !defined(CONFIG_USER_ONLY)
195 typedef struct PhysPageDesc
{
196 /* offset in host memory of the page + io_index in the low bits */
197 ram_addr_t phys_offset
;
198 ram_addr_t region_offset
;
201 /* This is a multi-level map on the physical address space.
202 The bottom level has pointers to PhysPageDesc. */
203 static void *l1_phys_map
[P_L1_SIZE
];
205 static void io_mem_init(void);
207 /* io memory support */
208 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
209 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
210 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
211 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
212 static int io_mem_watch
;
217 static const char *logfilename
= "qemu.log";
219 static const char *logfilename
= "/tmp/qemu.log";
223 static int log_append
= 0;
226 #if !defined(CONFIG_USER_ONLY)
227 static int tlb_flush_count
;
229 static int tb_flush_count
;
230 static int tb_phys_invalidate_count
;
233 static void map_exec(void *addr
, long size
)
236 VirtualProtect(addr
, size
,
237 PAGE_EXECUTE_READWRITE
, &old_protect
);
241 static void map_exec(void *addr
, long size
)
243 unsigned long start
, end
, page_size
;
245 page_size
= getpagesize();
246 start
= (unsigned long)addr
;
247 start
&= ~(page_size
- 1);
249 end
= (unsigned long)addr
+ size
;
250 end
+= page_size
- 1;
251 end
&= ~(page_size
- 1);
253 mprotect((void *)start
, end
- start
,
254 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
258 static void page_init(void)
260 /* NOTE: we can always suppose that qemu_host_page_size >=
264 SYSTEM_INFO system_info
;
266 GetSystemInfo(&system_info
);
267 qemu_real_host_page_size
= system_info
.dwPageSize
;
270 qemu_real_host_page_size
= getpagesize();
272 if (qemu_host_page_size
== 0)
273 qemu_host_page_size
= qemu_real_host_page_size
;
274 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
275 qemu_host_page_size
= TARGET_PAGE_SIZE
;
276 qemu_host_page_bits
= 0;
277 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
278 qemu_host_page_bits
++;
279 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
281 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
283 #ifdef HAVE_KINFO_GETVMMAP
284 struct kinfo_vmentry
*freep
;
287 freep
= kinfo_getvmmap(getpid(), &cnt
);
290 for (i
= 0; i
< cnt
; i
++) {
291 unsigned long startaddr
, endaddr
;
293 startaddr
= freep
[i
].kve_start
;
294 endaddr
= freep
[i
].kve_end
;
295 if (h2g_valid(startaddr
)) {
296 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
298 if (h2g_valid(endaddr
)) {
299 endaddr
= h2g(endaddr
);
300 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
302 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
304 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
315 last_brk
= (unsigned long)sbrk(0);
317 f
= fopen("/compat/linux/proc/self/maps", "r");
322 unsigned long startaddr
, endaddr
;
325 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
327 if (n
== 2 && h2g_valid(startaddr
)) {
328 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
330 if (h2g_valid(endaddr
)) {
331 endaddr
= h2g(endaddr
);
335 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
347 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
353 #if defined(CONFIG_USER_ONLY)
354 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
355 # define ALLOC(P, SIZE) \
357 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
358 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
361 # define ALLOC(P, SIZE) \
362 do { P = qemu_mallocz(SIZE); } while (0)
365 /* Level 1. Always allocated. */
366 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
369 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
376 ALLOC(p
, sizeof(void *) * L2_SIZE
);
380 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
388 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
394 return pd
+ (index
& (L2_SIZE
- 1));
397 static inline PageDesc
*page_find(tb_page_addr_t index
)
399 return page_find_alloc(index
, 0);
402 #if !defined(CONFIG_USER_ONLY)
403 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
409 /* Level 1. Always allocated. */
410 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
413 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
419 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
421 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
432 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
434 for (i
= 0; i
< L2_SIZE
; i
++) {
435 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
436 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
440 return pd
+ (index
& (L2_SIZE
- 1));
443 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
445 return phys_page_find_alloc(index
, 0);
448 static void tlb_protect_code(ram_addr_t ram_addr
);
449 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
451 #define mmap_lock() do { } while(0)
452 #define mmap_unlock() do { } while(0)
455 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
457 #if defined(CONFIG_USER_ONLY)
458 /* Currently it is not recommended to allocate big chunks of data in
459 user mode. It will change when a dedicated libc will be used */
460 #define USE_STATIC_CODE_GEN_BUFFER
463 #ifdef USE_STATIC_CODE_GEN_BUFFER
464 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
465 __attribute__((aligned (CODE_GEN_ALIGN
)));
468 static void code_gen_alloc(unsigned long tb_size
)
473 #ifdef USE_STATIC_CODE_GEN_BUFFER
474 code_gen_buffer
= static_code_gen_buffer
;
475 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
476 map_exec(code_gen_buffer
, code_gen_buffer_size
);
478 code_gen_buffer_size
= tb_size
;
479 if (code_gen_buffer_size
== 0) {
480 #if defined(CONFIG_USER_ONLY)
481 /* in user mode, phys_ram_size is not meaningful */
482 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
484 /* XXX: needs adjustments */
485 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
488 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
489 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
490 /* The code gen buffer location may have constraints depending on
491 the host cpu and OS */
492 #if defined(__linux__)
497 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
498 #if defined(__x86_64__)
500 /* Cannot map more than that */
501 if (code_gen_buffer_size
> (800 * 1024 * 1024))
502 code_gen_buffer_size
= (800 * 1024 * 1024);
503 #elif defined(__sparc_v9__)
504 // Map the buffer below 2G, so we can use direct calls and branches
506 start
= (void *) 0x60000000UL
;
507 if (code_gen_buffer_size
> (512 * 1024 * 1024))
508 code_gen_buffer_size
= (512 * 1024 * 1024);
509 #elif defined(__arm__)
510 /* Map the buffer below 32M, so we can use direct calls and branches */
512 start
= (void *) 0x01000000UL
;
513 if (code_gen_buffer_size
> 16 * 1024 * 1024)
514 code_gen_buffer_size
= 16 * 1024 * 1024;
515 #elif defined(__s390x__)
516 /* Map the buffer so that we can use direct calls and branches. */
517 /* We have a +- 4GB range on the branches; leave some slop. */
518 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
519 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
521 start
= (void *)0x90000000UL
;
523 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
524 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
526 if (code_gen_buffer
== MAP_FAILED
) {
527 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
531 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
532 || defined(__DragonFly__) || defined(__OpenBSD__)
536 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
537 #if defined(__x86_64__)
538 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
539 * 0x40000000 is free */
541 addr
= (void *)0x40000000;
542 /* Cannot map more than that */
543 if (code_gen_buffer_size
> (800 * 1024 * 1024))
544 code_gen_buffer_size
= (800 * 1024 * 1024);
545 #elif defined(__sparc_v9__)
546 // Map the buffer below 2G, so we can use direct calls and branches
548 addr
= (void *) 0x60000000UL
;
549 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
550 code_gen_buffer_size
= (512 * 1024 * 1024);
553 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
554 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
556 if (code_gen_buffer
== MAP_FAILED
) {
557 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
562 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
563 map_exec(code_gen_buffer
, code_gen_buffer_size
);
565 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
566 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
567 code_gen_buffer_max_size
= code_gen_buffer_size
-
568 (TCG_MAX_OP_SIZE
* OPC_MAX_SIZE
);
569 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
570 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
573 /* Must be called before using the QEMU cpus. 'tb_size' is the size
574 (in bytes) allocated to the translation buffer. Zero means default
576 void cpu_exec_init_all(unsigned long tb_size
)
579 code_gen_alloc(tb_size
);
580 code_gen_ptr
= code_gen_buffer
;
582 #if !defined(CONFIG_USER_ONLY)
585 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
586 /* There's no guest base to take into account, so go ahead and
587 initialize the prologue now. */
588 tcg_prologue_init(&tcg_ctx
);
592 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
594 static int cpu_common_post_load(void *opaque
, int version_id
)
596 CPUState
*env
= opaque
;
598 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
599 version_id is increased. */
600 env
->interrupt_request
&= ~0x01;
606 static const VMStateDescription vmstate_cpu_common
= {
607 .name
= "cpu_common",
609 .minimum_version_id
= 1,
610 .minimum_version_id_old
= 1,
611 .post_load
= cpu_common_post_load
,
612 .fields
= (VMStateField
[]) {
613 VMSTATE_UINT32(halted
, CPUState
),
614 VMSTATE_UINT32(interrupt_request
, CPUState
),
615 VMSTATE_END_OF_LIST()
620 CPUState
*qemu_get_cpu(int cpu
)
622 CPUState
*env
= first_cpu
;
625 if (env
->cpu_index
== cpu
)
633 void cpu_exec_init(CPUState
*env
)
638 #if defined(CONFIG_USER_ONLY)
641 env
->next_cpu
= NULL
;
644 while (*penv
!= NULL
) {
645 penv
= &(*penv
)->next_cpu
;
648 env
->cpu_index
= cpu_index
;
650 QTAILQ_INIT(&env
->breakpoints
);
651 QTAILQ_INIT(&env
->watchpoints
);
652 #ifndef CONFIG_USER_ONLY
653 env
->thread_id
= qemu_get_thread_id();
656 #if defined(CONFIG_USER_ONLY)
659 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
660 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
661 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
662 cpu_save
, cpu_load
, env
);
666 /* Allocate a new translation block. Flush the translation buffer if
667 too many translation blocks or too much generated code. */
668 static TranslationBlock
*tb_alloc(target_ulong pc
)
670 TranslationBlock
*tb
;
672 if (nb_tbs
>= code_gen_max_blocks
||
673 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
681 void tb_free(TranslationBlock
*tb
)
683 /* In practice this is mostly used for single use temporary TB
684 Ignore the hard cases and just back up if this TB happens to
685 be the last one generated. */
686 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
687 code_gen_ptr
= tb
->tc_ptr
;
692 static inline void invalidate_page_bitmap(PageDesc
*p
)
694 if (p
->code_bitmap
) {
695 qemu_free(p
->code_bitmap
);
696 p
->code_bitmap
= NULL
;
698 p
->code_write_count
= 0;
701 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
703 static void page_flush_tb_1 (int level
, void **lp
)
712 for (i
= 0; i
< L2_SIZE
; ++i
) {
713 pd
[i
].first_tb
= NULL
;
714 invalidate_page_bitmap(pd
+ i
);
718 for (i
= 0; i
< L2_SIZE
; ++i
) {
719 page_flush_tb_1 (level
- 1, pp
+ i
);
724 static void page_flush_tb(void)
727 for (i
= 0; i
< V_L1_SIZE
; i
++) {
728 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
732 /* flush all the translation blocks */
733 /* XXX: tb_flush is currently not thread safe */
734 void tb_flush(CPUState
*env1
)
737 #if defined(DEBUG_FLUSH)
738 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
739 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
741 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
743 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
744 cpu_abort(env1
, "Internal error: code buffer overflow\n");
748 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
749 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
752 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
755 code_gen_ptr
= code_gen_buffer
;
756 /* XXX: flush processor icache at this point if cache flush is
761 #ifdef DEBUG_TB_CHECK
763 static void tb_invalidate_check(target_ulong address
)
765 TranslationBlock
*tb
;
767 address
&= TARGET_PAGE_MASK
;
768 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
769 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
770 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
771 address
>= tb
->pc
+ tb
->size
)) {
772 printf("ERROR invalidate: address=" TARGET_FMT_lx
773 " PC=%08lx size=%04x\n",
774 address
, (long)tb
->pc
, tb
->size
);
780 /* verify that all the pages have correct rights for code */
781 static void tb_page_check(void)
783 TranslationBlock
*tb
;
784 int i
, flags1
, flags2
;
786 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
787 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
788 flags1
= page_get_flags(tb
->pc
);
789 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
790 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
791 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
792 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
800 /* invalidate one TB */
801 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
804 TranslationBlock
*tb1
;
808 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
811 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
815 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
817 TranslationBlock
*tb1
;
823 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
825 *ptb
= tb1
->page_next
[n1
];
828 ptb
= &tb1
->page_next
[n1
];
832 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
834 TranslationBlock
*tb1
, **ptb
;
837 ptb
= &tb
->jmp_next
[n
];
840 /* find tb(n) in circular list */
844 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
845 if (n1
== n
&& tb1
== tb
)
848 ptb
= &tb1
->jmp_first
;
850 ptb
= &tb1
->jmp_next
[n1
];
853 /* now we can suppress tb(n) from the list */
854 *ptb
= tb
->jmp_next
[n
];
856 tb
->jmp_next
[n
] = NULL
;
860 /* reset the jump entry 'n' of a TB so that it is not chained to
862 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
864 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
867 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
872 tb_page_addr_t phys_pc
;
873 TranslationBlock
*tb1
, *tb2
;
875 /* remove the TB from the hash list */
876 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
877 h
= tb_phys_hash_func(phys_pc
);
878 tb_remove(&tb_phys_hash
[h
], tb
,
879 offsetof(TranslationBlock
, phys_hash_next
));
881 /* remove the TB from the page list */
882 if (tb
->page_addr
[0] != page_addr
) {
883 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
884 tb_page_remove(&p
->first_tb
, tb
);
885 invalidate_page_bitmap(p
);
887 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
888 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
889 tb_page_remove(&p
->first_tb
, tb
);
890 invalidate_page_bitmap(p
);
893 tb_invalidated_flag
= 1;
895 /* remove the TB from the hash list */
896 h
= tb_jmp_cache_hash_func(tb
->pc
);
897 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
898 if (env
->tb_jmp_cache
[h
] == tb
)
899 env
->tb_jmp_cache
[h
] = NULL
;
902 /* suppress this TB from the two jump lists */
903 tb_jmp_remove(tb
, 0);
904 tb_jmp_remove(tb
, 1);
906 /* suppress any remaining jumps to this TB */
912 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
913 tb2
= tb1
->jmp_next
[n1
];
914 tb_reset_jump(tb1
, n1
);
915 tb1
->jmp_next
[n1
] = NULL
;
918 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
920 tb_phys_invalidate_count
++;
923 static inline void set_bits(uint8_t *tab
, int start
, int len
)
929 mask
= 0xff << (start
& 7);
930 if ((start
& ~7) == (end
& ~7)) {
932 mask
&= ~(0xff << (end
& 7));
937 start
= (start
+ 8) & ~7;
939 while (start
< end1
) {
944 mask
= ~(0xff << (end
& 7));
950 static void build_page_bitmap(PageDesc
*p
)
952 int n
, tb_start
, tb_end
;
953 TranslationBlock
*tb
;
955 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
960 tb
= (TranslationBlock
*)((long)tb
& ~3);
961 /* NOTE: this is subtle as a TB may span two physical pages */
963 /* NOTE: tb_end may be after the end of the page, but
964 it is not a problem */
965 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
966 tb_end
= tb_start
+ tb
->size
;
967 if (tb_end
> TARGET_PAGE_SIZE
)
968 tb_end
= TARGET_PAGE_SIZE
;
971 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
973 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
974 tb
= tb
->page_next
[n
];
978 TranslationBlock
*tb_gen_code(CPUState
*env
,
979 target_ulong pc
, target_ulong cs_base
,
980 int flags
, int cflags
)
982 TranslationBlock
*tb
;
984 tb_page_addr_t phys_pc
, phys_page2
;
985 target_ulong virt_page2
;
988 phys_pc
= get_page_addr_code(env
, pc
);
991 /* flush must be done */
993 /* cannot fail at this point */
995 /* Don't forget to invalidate previous TB info. */
996 tb_invalidated_flag
= 1;
998 tc_ptr
= code_gen_ptr
;
1000 tb
->cs_base
= cs_base
;
1002 tb
->cflags
= cflags
;
1003 cpu_gen_code(env
, tb
, &code_gen_size
);
1004 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1006 /* check next page if needed */
1007 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1009 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1010 phys_page2
= get_page_addr_code(env
, virt_page2
);
1012 tb_link_page(tb
, phys_pc
, phys_page2
);
1016 /* invalidate all TBs which intersect with the target physical page
1017 starting in range [start;end[. NOTE: start and end must refer to
1018 the same physical page. 'is_cpu_write_access' should be true if called
1019 from a real cpu write access: the virtual CPU will exit the current
1020 TB if code is modified inside this TB. */
1021 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1022 int is_cpu_write_access
)
1024 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1025 CPUState
*env
= cpu_single_env
;
1026 tb_page_addr_t tb_start
, tb_end
;
1029 #ifdef TARGET_HAS_PRECISE_SMC
1030 int current_tb_not_found
= is_cpu_write_access
;
1031 TranslationBlock
*current_tb
= NULL
;
1032 int current_tb_modified
= 0;
1033 target_ulong current_pc
= 0;
1034 target_ulong current_cs_base
= 0;
1035 int current_flags
= 0;
1036 #endif /* TARGET_HAS_PRECISE_SMC */
1038 p
= page_find(start
>> TARGET_PAGE_BITS
);
1041 if (!p
->code_bitmap
&&
1042 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1043 is_cpu_write_access
) {
1044 /* build code bitmap */
1045 build_page_bitmap(p
);
1048 /* we remove all the TBs in the range [start, end[ */
1049 /* XXX: see if in some cases it could be faster to invalidate all the code */
1051 while (tb
!= NULL
) {
1053 tb
= (TranslationBlock
*)((long)tb
& ~3);
1054 tb_next
= tb
->page_next
[n
];
1055 /* NOTE: this is subtle as a TB may span two physical pages */
1057 /* NOTE: tb_end may be after the end of the page, but
1058 it is not a problem */
1059 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1060 tb_end
= tb_start
+ tb
->size
;
1062 tb_start
= tb
->page_addr
[1];
1063 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1065 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1066 #ifdef TARGET_HAS_PRECISE_SMC
1067 if (current_tb_not_found
) {
1068 current_tb_not_found
= 0;
1070 if (env
->mem_io_pc
) {
1071 /* now we have a real cpu fault */
1072 current_tb
= tb_find_pc(env
->mem_io_pc
);
1075 if (current_tb
== tb
&&
1076 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1077 /* If we are modifying the current TB, we must stop
1078 its execution. We could be more precise by checking
1079 that the modification is after the current PC, but it
1080 would require a specialized function to partially
1081 restore the CPU state */
1083 current_tb_modified
= 1;
1084 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1085 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1088 #endif /* TARGET_HAS_PRECISE_SMC */
1089 /* we need to do that to handle the case where a signal
1090 occurs while doing tb_phys_invalidate() */
1093 saved_tb
= env
->current_tb
;
1094 env
->current_tb
= NULL
;
1096 tb_phys_invalidate(tb
, -1);
1098 env
->current_tb
= saved_tb
;
1099 if (env
->interrupt_request
&& env
->current_tb
)
1100 cpu_interrupt(env
, env
->interrupt_request
);
1105 #if !defined(CONFIG_USER_ONLY)
1106 /* if no code remaining, no need to continue to use slow writes */
1108 invalidate_page_bitmap(p
);
1109 if (is_cpu_write_access
) {
1110 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1114 #ifdef TARGET_HAS_PRECISE_SMC
1115 if (current_tb_modified
) {
1116 /* we generate a block containing just the instruction
1117 modifying the memory. It will ensure that it cannot modify
1119 env
->current_tb
= NULL
;
1120 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1121 cpu_resume_from_signal(env
, NULL
);
1126 /* len must be <= 8 and start must be a multiple of len */
1127 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1133 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1134 cpu_single_env
->mem_io_vaddr
, len
,
1135 cpu_single_env
->eip
,
1136 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1139 p
= page_find(start
>> TARGET_PAGE_BITS
);
1142 if (p
->code_bitmap
) {
1143 offset
= start
& ~TARGET_PAGE_MASK
;
1144 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1145 if (b
& ((1 << len
) - 1))
1149 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1153 #if !defined(CONFIG_SOFTMMU)
1154 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1155 unsigned long pc
, void *puc
)
1157 TranslationBlock
*tb
;
1160 #ifdef TARGET_HAS_PRECISE_SMC
1161 TranslationBlock
*current_tb
= NULL
;
1162 CPUState
*env
= cpu_single_env
;
1163 int current_tb_modified
= 0;
1164 target_ulong current_pc
= 0;
1165 target_ulong current_cs_base
= 0;
1166 int current_flags
= 0;
1169 addr
&= TARGET_PAGE_MASK
;
1170 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1174 #ifdef TARGET_HAS_PRECISE_SMC
1175 if (tb
&& pc
!= 0) {
1176 current_tb
= tb_find_pc(pc
);
1179 while (tb
!= NULL
) {
1181 tb
= (TranslationBlock
*)((long)tb
& ~3);
1182 #ifdef TARGET_HAS_PRECISE_SMC
1183 if (current_tb
== tb
&&
1184 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1185 /* If we are modifying the current TB, we must stop
1186 its execution. We could be more precise by checking
1187 that the modification is after the current PC, but it
1188 would require a specialized function to partially
1189 restore the CPU state */
1191 current_tb_modified
= 1;
1192 cpu_restore_state(current_tb
, env
, pc
);
1193 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1196 #endif /* TARGET_HAS_PRECISE_SMC */
1197 tb_phys_invalidate(tb
, addr
);
1198 tb
= tb
->page_next
[n
];
1201 #ifdef TARGET_HAS_PRECISE_SMC
1202 if (current_tb_modified
) {
1203 /* we generate a block containing just the instruction
1204 modifying the memory. It will ensure that it cannot modify
1206 env
->current_tb
= NULL
;
1207 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1208 cpu_resume_from_signal(env
, puc
);
1214 /* add the tb in the target page and protect it if necessary */
1215 static inline void tb_alloc_page(TranslationBlock
*tb
,
1216 unsigned int n
, tb_page_addr_t page_addr
)
1219 TranslationBlock
*last_first_tb
;
1221 tb
->page_addr
[n
] = page_addr
;
1222 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1223 tb
->page_next
[n
] = p
->first_tb
;
1224 last_first_tb
= p
->first_tb
;
1225 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1226 invalidate_page_bitmap(p
);
1228 #if defined(TARGET_HAS_SMC) || 1
1230 #if defined(CONFIG_USER_ONLY)
1231 if (p
->flags
& PAGE_WRITE
) {
1236 /* force the host page as non writable (writes will have a
1237 page fault + mprotect overhead) */
1238 page_addr
&= qemu_host_page_mask
;
1240 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1241 addr
+= TARGET_PAGE_SIZE
) {
1243 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1247 p2
->flags
&= ~PAGE_WRITE
;
1249 mprotect(g2h(page_addr
), qemu_host_page_size
,
1250 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1251 #ifdef DEBUG_TB_INVALIDATE
1252 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1257 /* if some code is already present, then the pages are already
1258 protected. So we handle the case where only the first TB is
1259 allocated in a physical page */
1260 if (!last_first_tb
) {
1261 tlb_protect_code(page_addr
);
1265 #endif /* TARGET_HAS_SMC */
1268 /* add a new TB and link it to the physical page tables. phys_page2 is
1269 (-1) to indicate that only one page contains the TB. */
1270 void tb_link_page(TranslationBlock
*tb
,
1271 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1274 TranslationBlock
**ptb
;
1276 /* Grab the mmap lock to stop another thread invalidating this TB
1277 before we are done. */
1279 /* add in the physical hash table */
1280 h
= tb_phys_hash_func(phys_pc
);
1281 ptb
= &tb_phys_hash
[h
];
1282 tb
->phys_hash_next
= *ptb
;
1285 /* add in the page list */
1286 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1287 if (phys_page2
!= -1)
1288 tb_alloc_page(tb
, 1, phys_page2
);
1290 tb
->page_addr
[1] = -1;
1292 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1293 tb
->jmp_next
[0] = NULL
;
1294 tb
->jmp_next
[1] = NULL
;
1296 /* init original jump addresses */
1297 if (tb
->tb_next_offset
[0] != 0xffff)
1298 tb_reset_jump(tb
, 0);
1299 if (tb
->tb_next_offset
[1] != 0xffff)
1300 tb_reset_jump(tb
, 1);
1302 #ifdef DEBUG_TB_CHECK
1308 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1309 tb[1].tc_ptr. Return NULL if not found */
1310 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1312 int m_min
, m_max
, m
;
1314 TranslationBlock
*tb
;
1318 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1319 tc_ptr
>= (unsigned long)code_gen_ptr
)
1321 /* binary search (cf Knuth) */
1324 while (m_min
<= m_max
) {
1325 m
= (m_min
+ m_max
) >> 1;
1327 v
= (unsigned long)tb
->tc_ptr
;
1330 else if (tc_ptr
< v
) {
1339 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1341 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1343 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1346 tb1
= tb
->jmp_next
[n
];
1348 /* find head of list */
1351 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1354 tb1
= tb1
->jmp_next
[n1
];
1356 /* we are now sure now that tb jumps to tb1 */
1359 /* remove tb from the jmp_first list */
1360 ptb
= &tb_next
->jmp_first
;
1364 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1365 if (n1
== n
&& tb1
== tb
)
1367 ptb
= &tb1
->jmp_next
[n1
];
1369 *ptb
= tb
->jmp_next
[n
];
1370 tb
->jmp_next
[n
] = NULL
;
1372 /* suppress the jump to next tb in generated code */
1373 tb_reset_jump(tb
, n
);
1375 /* suppress jumps in the tb on which we could have jumped */
1376 tb_reset_jump_recursive(tb_next
);
1380 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1382 tb_reset_jump_recursive2(tb
, 0);
1383 tb_reset_jump_recursive2(tb
, 1);
1386 #if defined(TARGET_HAS_ICE)
1387 #if defined(CONFIG_USER_ONLY)
1388 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1390 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1393 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1395 target_phys_addr_t addr
;
1397 ram_addr_t ram_addr
;
1400 addr
= cpu_get_phys_page_debug(env
, pc
);
1401 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1403 pd
= IO_MEM_UNASSIGNED
;
1405 pd
= p
->phys_offset
;
1407 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1408 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1411 #endif /* TARGET_HAS_ICE */
1413 #if defined(CONFIG_USER_ONLY)
1414 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1419 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1420 int flags
, CPUWatchpoint
**watchpoint
)
1425 /* Add a watchpoint. */
1426 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1427 int flags
, CPUWatchpoint
**watchpoint
)
1429 target_ulong len_mask
= ~(len
- 1);
1432 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1433 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1434 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1435 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1438 wp
= qemu_malloc(sizeof(*wp
));
1441 wp
->len_mask
= len_mask
;
1444 /* keep all GDB-injected watchpoints in front */
1446 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1448 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1450 tlb_flush_page(env
, addr
);
1457 /* Remove a specific watchpoint. */
1458 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1461 target_ulong len_mask
= ~(len
- 1);
1464 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1465 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1466 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1467 cpu_watchpoint_remove_by_ref(env
, wp
);
1474 /* Remove a specific watchpoint by reference. */
1475 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1477 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1479 tlb_flush_page(env
, watchpoint
->vaddr
);
1481 qemu_free(watchpoint
);
1484 /* Remove all matching watchpoints. */
1485 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1487 CPUWatchpoint
*wp
, *next
;
1489 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1490 if (wp
->flags
& mask
)
1491 cpu_watchpoint_remove_by_ref(env
, wp
);
1496 /* Add a breakpoint. */
1497 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1498 CPUBreakpoint
**breakpoint
)
1500 #if defined(TARGET_HAS_ICE)
1503 bp
= qemu_malloc(sizeof(*bp
));
1508 /* keep all GDB-injected breakpoints in front */
1510 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1512 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1514 breakpoint_invalidate(env
, pc
);
1524 /* Remove a specific breakpoint. */
1525 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1527 #if defined(TARGET_HAS_ICE)
1530 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1531 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1532 cpu_breakpoint_remove_by_ref(env
, bp
);
1542 /* Remove a specific breakpoint by reference. */
1543 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1545 #if defined(TARGET_HAS_ICE)
1546 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1548 breakpoint_invalidate(env
, breakpoint
->pc
);
1550 qemu_free(breakpoint
);
1554 /* Remove all matching breakpoints. */
1555 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1557 #if defined(TARGET_HAS_ICE)
1558 CPUBreakpoint
*bp
, *next
;
1560 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1561 if (bp
->flags
& mask
)
1562 cpu_breakpoint_remove_by_ref(env
, bp
);
1567 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1568 CPU loop after each instruction */
1569 void cpu_single_step(CPUState
*env
, int enabled
)
1571 #if defined(TARGET_HAS_ICE)
1572 if (env
->singlestep_enabled
!= enabled
) {
1573 env
->singlestep_enabled
= enabled
;
1575 kvm_update_guest_debug(env
, 0);
1577 /* must flush all the translated code to avoid inconsistencies */
1578 /* XXX: only flush what is necessary */
1585 /* enable or disable low levels log */
1586 void cpu_set_log(int log_flags
)
1588 loglevel
= log_flags
;
1589 if (loglevel
&& !logfile
) {
1590 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1592 perror(logfilename
);
1595 #if !defined(CONFIG_SOFTMMU)
1596 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1598 static char logfile_buf
[4096];
1599 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1601 #elif !defined(_WIN32)
1602 /* Win32 doesn't support line-buffering and requires size >= 2 */
1603 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1607 if (!loglevel
&& logfile
) {
1613 void cpu_set_log_filename(const char *filename
)
1615 logfilename
= strdup(filename
);
1620 cpu_set_log(loglevel
);
1623 static void cpu_unlink_tb(CPUState
*env
)
1625 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1626 problem and hope the cpu will stop of its own accord. For userspace
1627 emulation this often isn't actually as bad as it sounds. Often
1628 signals are used primarily to interrupt blocking syscalls. */
1629 TranslationBlock
*tb
;
1630 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1632 spin_lock(&interrupt_lock
);
1633 tb
= env
->current_tb
;
1634 /* if the cpu is currently executing code, we must unlink it and
1635 all the potentially executing TB */
1637 env
->current_tb
= NULL
;
1638 tb_reset_jump_recursive(tb
);
1640 spin_unlock(&interrupt_lock
);
1643 #ifndef CONFIG_USER_ONLY
1644 /* mask must never be zero, except for A20 change call */
1645 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1649 old_mask
= env
->interrupt_request
;
1650 env
->interrupt_request
|= mask
;
1653 * If called from iothread context, wake the target cpu in
1656 if (!qemu_cpu_is_self(env
)) {
1662 env
->icount_decr
.u16
.high
= 0xffff;
1664 && (mask
& ~old_mask
) != 0) {
1665 cpu_abort(env
, "Raised interrupt while not in I/O function");
1672 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1674 #else /* CONFIG_USER_ONLY */
1676 void cpu_interrupt(CPUState
*env
, int mask
)
1678 env
->interrupt_request
|= mask
;
1681 #endif /* CONFIG_USER_ONLY */
1683 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1685 env
->interrupt_request
&= ~mask
;
1688 void cpu_exit(CPUState
*env
)
1690 env
->exit_request
= 1;
1694 const CPULogItem cpu_log_items
[] = {
1695 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1696 "show generated host assembly code for each compiled TB" },
1697 { CPU_LOG_TB_IN_ASM
, "in_asm",
1698 "show target assembly code for each compiled TB" },
1699 { CPU_LOG_TB_OP
, "op",
1700 "show micro ops for each compiled TB" },
1701 { CPU_LOG_TB_OP_OPT
, "op_opt",
1704 "before eflags optimization and "
1706 "after liveness analysis" },
1707 { CPU_LOG_INT
, "int",
1708 "show interrupts/exceptions in short format" },
1709 { CPU_LOG_EXEC
, "exec",
1710 "show trace before each executed TB (lots of logs)" },
1711 { CPU_LOG_TB_CPU
, "cpu",
1712 "show CPU state before block translation" },
1714 { CPU_LOG_PCALL
, "pcall",
1715 "show protected mode far calls/returns/exceptions" },
1716 { CPU_LOG_RESET
, "cpu_reset",
1717 "show CPU state before CPU resets" },
1720 { CPU_LOG_IOPORT
, "ioport",
1721 "show all i/o ports accesses" },
1726 #ifndef CONFIG_USER_ONLY
1727 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1728 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1730 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1732 ram_addr_t phys_offset
,
1735 CPUPhysMemoryClient
*client
;
1736 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1737 client
->set_memory(client
, start_addr
, size
, phys_offset
, log_dirty
);
1741 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1742 target_phys_addr_t end
)
1744 CPUPhysMemoryClient
*client
;
1745 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1746 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1753 static int cpu_notify_migration_log(int enable
)
1755 CPUPhysMemoryClient
*client
;
1756 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1757 int r
= client
->migration_log(client
, enable
);
1764 /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1765 * address. Each intermediate table provides the next L2_BITs of guest
1766 * physical address space. The number of levels vary based on host and
1767 * guest configuration, making it efficient to build the final guest
1768 * physical address by seeding the L1 offset and shifting and adding in
1769 * each L2 offset as we recurse through them. */
1770 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1771 int level
, void **lp
, target_phys_addr_t addr
)
1779 PhysPageDesc
*pd
= *lp
;
1780 addr
<<= L2_BITS
+ TARGET_PAGE_BITS
;
1781 for (i
= 0; i
< L2_SIZE
; ++i
) {
1782 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1783 client
->set_memory(client
, addr
| i
<< TARGET_PAGE_BITS
,
1784 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
, false);
1789 for (i
= 0; i
< L2_SIZE
; ++i
) {
1790 phys_page_for_each_1(client
, level
- 1, pp
+ i
,
1791 (addr
<< L2_BITS
) | i
);
1796 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1799 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1800 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1801 l1_phys_map
+ i
, i
);
1805 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1807 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1808 phys_page_for_each(client
);
1811 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1813 QLIST_REMOVE(client
, list
);
1817 static int cmp1(const char *s1
, int n
, const char *s2
)
1819 if (strlen(s2
) != n
)
1821 return memcmp(s1
, s2
, n
) == 0;
1824 /* takes a comma separated list of log masks. Return 0 if error. */
1825 int cpu_str_to_log_mask(const char *str
)
1827 const CPULogItem
*item
;
1834 p1
= strchr(p
, ',');
1837 if(cmp1(p
,p1
-p
,"all")) {
1838 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1842 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1843 if (cmp1(p
, p1
- p
, item
->name
))
1857 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1864 fprintf(stderr
, "qemu: fatal: ");
1865 vfprintf(stderr
, fmt
, ap
);
1866 fprintf(stderr
, "\n");
1868 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1870 cpu_dump_state(env
, stderr
, fprintf
, 0);
1872 if (qemu_log_enabled()) {
1873 qemu_log("qemu: fatal: ");
1874 qemu_log_vprintf(fmt
, ap2
);
1877 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1879 log_cpu_state(env
, 0);
1886 #if defined(CONFIG_USER_ONLY)
1888 struct sigaction act
;
1889 sigfillset(&act
.sa_mask
);
1890 act
.sa_handler
= SIG_DFL
;
1891 sigaction(SIGABRT
, &act
, NULL
);
1897 CPUState
*cpu_copy(CPUState
*env
)
1899 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1900 CPUState
*next_cpu
= new_env
->next_cpu
;
1901 int cpu_index
= new_env
->cpu_index
;
1902 #if defined(TARGET_HAS_ICE)
1907 memcpy(new_env
, env
, sizeof(CPUState
));
1909 /* Preserve chaining and index. */
1910 new_env
->next_cpu
= next_cpu
;
1911 new_env
->cpu_index
= cpu_index
;
1913 /* Clone all break/watchpoints.
1914 Note: Once we support ptrace with hw-debug register access, make sure
1915 BP_CPU break/watchpoints are handled correctly on clone. */
1916 QTAILQ_INIT(&env
->breakpoints
);
1917 QTAILQ_INIT(&env
->watchpoints
);
1918 #if defined(TARGET_HAS_ICE)
1919 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1920 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1922 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1923 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1931 #if !defined(CONFIG_USER_ONLY)
1933 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1937 /* Discard jump cache entries for any tb which might potentially
1938 overlap the flushed page. */
1939 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1940 memset (&env
->tb_jmp_cache
[i
], 0,
1941 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1943 i
= tb_jmp_cache_hash_page(addr
);
1944 memset (&env
->tb_jmp_cache
[i
], 0,
1945 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1948 static CPUTLBEntry s_cputlb_empty_entry
= {
1955 /* NOTE: if flush_global is true, also flush global entries (not
1957 void tlb_flush(CPUState
*env
, int flush_global
)
1961 #if defined(DEBUG_TLB)
1962 printf("tlb_flush:\n");
1964 /* must reset current TB so that interrupts cannot modify the
1965 links while we are modifying them */
1966 env
->current_tb
= NULL
;
1968 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1970 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1971 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1975 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1977 env
->tlb_flush_addr
= -1;
1978 env
->tlb_flush_mask
= 0;
1982 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1984 if (addr
== (tlb_entry
->addr_read
&
1985 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1986 addr
== (tlb_entry
->addr_write
&
1987 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1988 addr
== (tlb_entry
->addr_code
&
1989 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1990 *tlb_entry
= s_cputlb_empty_entry
;
1994 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1999 #if defined(DEBUG_TLB)
2000 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
2002 /* Check if we need to flush due to large pages. */
2003 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
2004 #if defined(DEBUG_TLB)
2005 printf("tlb_flush_page: forced full flush ("
2006 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
2007 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
2012 /* must reset current TB so that interrupts cannot modify the
2013 links while we are modifying them */
2014 env
->current_tb
= NULL
;
2016 addr
&= TARGET_PAGE_MASK
;
2017 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2018 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2019 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2021 tlb_flush_jmp_cache(env
, addr
);
2024 /* update the TLBs so that writes to code in the virtual page 'addr'
2026 static void tlb_protect_code(ram_addr_t ram_addr
)
2028 cpu_physical_memory_reset_dirty(ram_addr
,
2029 ram_addr
+ TARGET_PAGE_SIZE
,
2033 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2034 tested for self modifying code */
2035 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2038 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2041 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2042 unsigned long start
, unsigned long length
)
2045 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2046 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2047 if ((addr
- start
) < length
) {
2048 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2053 /* Note: start and end must be within the same ram block. */
2054 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2058 unsigned long length
, start1
;
2061 start
&= TARGET_PAGE_MASK
;
2062 end
= TARGET_PAGE_ALIGN(end
);
2064 length
= end
- start
;
2067 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2069 /* we modify the TLB cache so that the dirty bit will be set again
2070 when accessing the range */
2071 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2072 /* Chek that we don't span multiple blocks - this breaks the
2073 address comparisons below. */
2074 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2075 != (end
- 1) - start
) {
2079 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2081 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2082 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2083 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2089 int cpu_physical_memory_set_dirty_tracking(int enable
)
2092 in_migration
= enable
;
2093 ret
= cpu_notify_migration_log(!!enable
);
2097 int cpu_physical_memory_get_dirty_tracking(void)
2099 return in_migration
;
2102 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2103 target_phys_addr_t end_addr
)
2107 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2111 int cpu_physical_log_start(target_phys_addr_t start_addr
,
2114 CPUPhysMemoryClient
*client
;
2115 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2116 if (client
->log_start
) {
2117 int r
= client
->log_start(client
, start_addr
, size
);
2126 int cpu_physical_log_stop(target_phys_addr_t start_addr
,
2129 CPUPhysMemoryClient
*client
;
2130 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2131 if (client
->log_stop
) {
2132 int r
= client
->log_stop(client
, start_addr
, size
);
2141 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2143 ram_addr_t ram_addr
;
2146 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2147 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2148 + tlb_entry
->addend
);
2149 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2150 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2151 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2156 /* update the TLB according to the current state of the dirty bits */
2157 void cpu_tlb_update_dirty(CPUState
*env
)
2161 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2162 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2163 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2167 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2169 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2170 tlb_entry
->addr_write
= vaddr
;
2173 /* update the TLB corresponding to virtual page vaddr
2174 so that it is no longer dirty */
2175 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2180 vaddr
&= TARGET_PAGE_MASK
;
2181 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2182 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2183 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2186 /* Our TLB does not support large pages, so remember the area covered by
2187 large pages and trigger a full TLB flush if these are invalidated. */
2188 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2191 target_ulong mask
= ~(size
- 1);
2193 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2194 env
->tlb_flush_addr
= vaddr
& mask
;
2195 env
->tlb_flush_mask
= mask
;
2198 /* Extend the existing region to include the new page.
2199 This is a compromise between unnecessary flushes and the cost
2200 of maintaining a full variable size TLB. */
2201 mask
&= env
->tlb_flush_mask
;
2202 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2205 env
->tlb_flush_addr
&= mask
;
2206 env
->tlb_flush_mask
= mask
;
2209 /* Add a new TLB entry. At most one entry for a given virtual address
2210 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2211 supplied size is only used by tlb_flush_page. */
2212 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2213 target_phys_addr_t paddr
, int prot
,
2214 int mmu_idx
, target_ulong size
)
2219 target_ulong address
;
2220 target_ulong code_address
;
2221 unsigned long addend
;
2224 target_phys_addr_t iotlb
;
2226 assert(size
>= TARGET_PAGE_SIZE
);
2227 if (size
!= TARGET_PAGE_SIZE
) {
2228 tlb_add_large_page(env
, vaddr
, size
);
2230 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2232 pd
= IO_MEM_UNASSIGNED
;
2234 pd
= p
->phys_offset
;
2236 #if defined(DEBUG_TLB)
2237 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2238 " prot=%x idx=%d pd=0x%08lx\n",
2239 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2243 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2244 /* IO memory case (romd handled later) */
2245 address
|= TLB_MMIO
;
2247 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2248 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2250 iotlb
= pd
& TARGET_PAGE_MASK
;
2251 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2252 iotlb
|= IO_MEM_NOTDIRTY
;
2254 iotlb
|= IO_MEM_ROM
;
2256 /* IO handlers are currently passed a physical address.
2257 It would be nice to pass an offset from the base address
2258 of that region. This would avoid having to special case RAM,
2259 and avoid full address decoding in every device.
2260 We can't use the high bits of pd for this because
2261 IO_MEM_ROMD uses these as a ram address. */
2262 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2264 iotlb
+= p
->region_offset
;
2270 code_address
= address
;
2271 /* Make accesses to pages with watchpoints go via the
2272 watchpoint trap routines. */
2273 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2274 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2275 /* Avoid trapping reads of pages with a write breakpoint. */
2276 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2277 iotlb
= io_mem_watch
+ paddr
;
2278 address
|= TLB_MMIO
;
2284 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2285 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2286 te
= &env
->tlb_table
[mmu_idx
][index
];
2287 te
->addend
= addend
- vaddr
;
2288 if (prot
& PAGE_READ
) {
2289 te
->addr_read
= address
;
2294 if (prot
& PAGE_EXEC
) {
2295 te
->addr_code
= code_address
;
2299 if (prot
& PAGE_WRITE
) {
2300 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2301 (pd
& IO_MEM_ROMD
)) {
2302 /* Write access calls the I/O callback. */
2303 te
->addr_write
= address
| TLB_MMIO
;
2304 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2305 !cpu_physical_memory_is_dirty(pd
)) {
2306 te
->addr_write
= address
| TLB_NOTDIRTY
;
2308 te
->addr_write
= address
;
2311 te
->addr_write
= -1;
2317 void tlb_flush(CPUState
*env
, int flush_global
)
2321 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2326 * Walks guest process memory "regions" one by one
2327 * and calls callback function 'fn' for each region.
2330 struct walk_memory_regions_data
2332 walk_memory_regions_fn fn
;
2334 unsigned long start
;
2338 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2339 abi_ulong end
, int new_prot
)
2341 if (data
->start
!= -1ul) {
2342 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2348 data
->start
= (new_prot
? end
: -1ul);
2349 data
->prot
= new_prot
;
2354 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2355 abi_ulong base
, int level
, void **lp
)
2361 return walk_memory_regions_end(data
, base
, 0);
2366 for (i
= 0; i
< L2_SIZE
; ++i
) {
2367 int prot
= pd
[i
].flags
;
2369 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2370 if (prot
!= data
->prot
) {
2371 rc
= walk_memory_regions_end(data
, pa
, prot
);
2379 for (i
= 0; i
< L2_SIZE
; ++i
) {
2380 pa
= base
| ((abi_ulong
)i
<<
2381 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2382 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2392 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2394 struct walk_memory_regions_data data
;
2402 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2403 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2404 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2410 return walk_memory_regions_end(&data
, 0, 0);
2413 static int dump_region(void *priv
, abi_ulong start
,
2414 abi_ulong end
, unsigned long prot
)
2416 FILE *f
= (FILE *)priv
;
2418 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2419 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2420 start
, end
, end
- start
,
2421 ((prot
& PAGE_READ
) ? 'r' : '-'),
2422 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2423 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2428 /* dump memory mappings */
2429 void page_dump(FILE *f
)
2431 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2432 "start", "end", "size", "prot");
2433 walk_memory_regions(f
, dump_region
);
2436 int page_get_flags(target_ulong address
)
2440 p
= page_find(address
>> TARGET_PAGE_BITS
);
2446 /* Modify the flags of a page and invalidate the code if necessary.
2447 The flag PAGE_WRITE_ORG is positioned automatically depending
2448 on PAGE_WRITE. The mmap_lock should already be held. */
2449 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2451 target_ulong addr
, len
;
2453 /* This function should never be called with addresses outside the
2454 guest address space. If this assert fires, it probably indicates
2455 a missing call to h2g_valid. */
2456 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2457 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2459 assert(start
< end
);
2461 start
= start
& TARGET_PAGE_MASK
;
2462 end
= TARGET_PAGE_ALIGN(end
);
2464 if (flags
& PAGE_WRITE
) {
2465 flags
|= PAGE_WRITE_ORG
;
2468 for (addr
= start
, len
= end
- start
;
2470 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2471 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2473 /* If the write protection bit is set, then we invalidate
2475 if (!(p
->flags
& PAGE_WRITE
) &&
2476 (flags
& PAGE_WRITE
) &&
2478 tb_invalidate_phys_page(addr
, 0, NULL
);
2484 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2490 /* This function should never be called with addresses outside the
2491 guest address space. If this assert fires, it probably indicates
2492 a missing call to h2g_valid. */
2493 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2494 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2500 if (start
+ len
- 1 < start
) {
2501 /* We've wrapped around. */
2505 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2506 start
= start
& TARGET_PAGE_MASK
;
2508 for (addr
= start
, len
= end
- start
;
2510 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2511 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2514 if( !(p
->flags
& PAGE_VALID
) )
2517 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2519 if (flags
& PAGE_WRITE
) {
2520 if (!(p
->flags
& PAGE_WRITE_ORG
))
2522 /* unprotect the page if it was put read-only because it
2523 contains translated code */
2524 if (!(p
->flags
& PAGE_WRITE
)) {
2525 if (!page_unprotect(addr
, 0, NULL
))
2534 /* called from signal handler: invalidate the code and unprotect the
2535 page. Return TRUE if the fault was successfully handled. */
2536 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2540 target_ulong host_start
, host_end
, addr
;
2542 /* Technically this isn't safe inside a signal handler. However we
2543 know this only ever happens in a synchronous SEGV handler, so in
2544 practice it seems to be ok. */
2547 p
= page_find(address
>> TARGET_PAGE_BITS
);
2553 /* if the page was really writable, then we change its
2554 protection back to writable */
2555 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2556 host_start
= address
& qemu_host_page_mask
;
2557 host_end
= host_start
+ qemu_host_page_size
;
2560 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2561 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2562 p
->flags
|= PAGE_WRITE
;
2565 /* and since the content will be modified, we must invalidate
2566 the corresponding translated code. */
2567 tb_invalidate_phys_page(addr
, pc
, puc
);
2568 #ifdef DEBUG_TB_CHECK
2569 tb_invalidate_check(addr
);
2572 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2582 static inline void tlb_set_dirty(CPUState
*env
,
2583 unsigned long addr
, target_ulong vaddr
)
2586 #endif /* defined(CONFIG_USER_ONLY) */
2588 #if !defined(CONFIG_USER_ONLY)
2590 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2591 typedef struct subpage_t
{
2592 target_phys_addr_t base
;
2593 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2594 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2597 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2598 ram_addr_t memory
, ram_addr_t region_offset
);
2599 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2600 ram_addr_t orig_memory
,
2601 ram_addr_t region_offset
);
2602 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2605 if (addr > start_addr) \
2608 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2609 if (start_addr2 > 0) \
2613 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2614 end_addr2 = TARGET_PAGE_SIZE - 1; \
2616 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2617 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2622 /* register physical memory.
2623 For RAM, 'size' must be a multiple of the target page size.
2624 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2625 io memory page. The address used when calling the IO function is
2626 the offset from the start of the region, plus region_offset. Both
2627 start_addr and region_offset are rounded down to a page boundary
2628 before calculating this offset. This should not be a problem unless
2629 the low bits of start_addr and region_offset differ. */
2630 void cpu_register_physical_memory_log(target_phys_addr_t start_addr
,
2632 ram_addr_t phys_offset
,
2633 ram_addr_t region_offset
,
2636 target_phys_addr_t addr
, end_addr
;
2639 ram_addr_t orig_size
= size
;
2643 cpu_notify_set_memory(start_addr
, size
, phys_offset
, log_dirty
);
2645 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2646 region_offset
= start_addr
;
2648 region_offset
&= TARGET_PAGE_MASK
;
2649 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2650 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2654 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2655 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2656 ram_addr_t orig_memory
= p
->phys_offset
;
2657 target_phys_addr_t start_addr2
, end_addr2
;
2658 int need_subpage
= 0;
2660 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2663 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2664 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2665 &p
->phys_offset
, orig_memory
,
2668 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2671 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2673 p
->region_offset
= 0;
2675 p
->phys_offset
= phys_offset
;
2676 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2677 (phys_offset
& IO_MEM_ROMD
))
2678 phys_offset
+= TARGET_PAGE_SIZE
;
2681 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2682 p
->phys_offset
= phys_offset
;
2683 p
->region_offset
= region_offset
;
2684 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2685 (phys_offset
& IO_MEM_ROMD
)) {
2686 phys_offset
+= TARGET_PAGE_SIZE
;
2688 target_phys_addr_t start_addr2
, end_addr2
;
2689 int need_subpage
= 0;
2691 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2692 end_addr2
, need_subpage
);
2695 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2696 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2697 addr
& TARGET_PAGE_MASK
);
2698 subpage_register(subpage
, start_addr2
, end_addr2
,
2699 phys_offset
, region_offset
);
2700 p
->region_offset
= 0;
2704 region_offset
+= TARGET_PAGE_SIZE
;
2705 addr
+= TARGET_PAGE_SIZE
;
2706 } while (addr
!= end_addr
);
2708 /* since each CPU stores ram addresses in its TLB cache, we must
2709 reset the modified entries */
2711 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2716 /* XXX: temporary until new memory mapping API */
2717 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2721 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2723 return IO_MEM_UNASSIGNED
;
2724 return p
->phys_offset
;
2727 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2730 kvm_coalesce_mmio_region(addr
, size
);
2733 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2736 kvm_uncoalesce_mmio_region(addr
, size
);
2739 void qemu_flush_coalesced_mmio_buffer(void)
2742 kvm_flush_coalesced_mmio_buffer();
2745 #if defined(__linux__) && !defined(TARGET_S390X)
2747 #include <sys/vfs.h>
2749 #define HUGETLBFS_MAGIC 0x958458f6
2751 static long gethugepagesize(const char *path
)
2757 ret
= statfs(path
, &fs
);
2758 } while (ret
!= 0 && errno
== EINTR
);
2765 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2766 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2771 static void *file_ram_alloc(RAMBlock
*block
,
2781 unsigned long hpagesize
;
2783 hpagesize
= gethugepagesize(path
);
2788 if (memory
< hpagesize
) {
2792 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2793 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2797 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2801 fd
= mkstemp(filename
);
2803 perror("unable to create backing store for hugepages");
2810 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2813 * ftruncate is not supported by hugetlbfs in older
2814 * hosts, so don't bother bailing out on errors.
2815 * If anything goes wrong with it under other filesystems,
2818 if (ftruncate(fd
, memory
))
2819 perror("ftruncate");
2822 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2823 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2824 * to sidestep this quirk.
2826 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2827 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2829 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2831 if (area
== MAP_FAILED
) {
2832 perror("file_ram_alloc: can't mmap RAM pages");
2841 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2843 RAMBlock
*block
, *next_block
;
2844 ram_addr_t offset
= 0, mingap
= ULONG_MAX
;
2846 if (QLIST_EMPTY(&ram_list
.blocks
))
2849 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2850 ram_addr_t end
, next
= ULONG_MAX
;
2852 end
= block
->offset
+ block
->length
;
2854 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2855 if (next_block
->offset
>= end
) {
2856 next
= MIN(next
, next_block
->offset
);
2859 if (next
- end
>= size
&& next
- end
< mingap
) {
2861 mingap
= next
- end
;
2867 static ram_addr_t
last_ram_offset(void)
2870 ram_addr_t last
= 0;
2872 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2873 last
= MAX(last
, block
->offset
+ block
->length
);
2878 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
2879 ram_addr_t size
, void *host
)
2881 RAMBlock
*new_block
, *block
;
2883 size
= TARGET_PAGE_ALIGN(size
);
2884 new_block
= qemu_mallocz(sizeof(*new_block
));
2886 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2887 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2889 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2893 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2895 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2896 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2897 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2903 new_block
->offset
= find_ram_offset(size
);
2905 new_block
->host
= host
;
2906 new_block
->flags
|= RAM_PREALLOC_MASK
;
2909 #if defined (__linux__) && !defined(TARGET_S390X)
2910 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2911 if (!new_block
->host
) {
2912 new_block
->host
= qemu_vmalloc(size
);
2913 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2916 fprintf(stderr
, "-mem-path option unsupported\n");
2920 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2921 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2922 new_block
->host
= mmap((void*)0x1000000, size
,
2923 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2924 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2926 if (xen_mapcache_enabled()) {
2927 xen_ram_alloc(new_block
->offset
, size
);
2929 new_block
->host
= qemu_vmalloc(size
);
2932 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2935 new_block
->length
= size
;
2937 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2939 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
2940 last_ram_offset() >> TARGET_PAGE_BITS
);
2941 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2942 0xff, size
>> TARGET_PAGE_BITS
);
2945 kvm_setup_guest_memory(new_block
->host
, size
);
2947 return new_block
->offset
;
2950 void qemu_ram_unmap(ram_addr_t addr
)
2954 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2955 if (addr
== block
->offset
) {
2956 QLIST_REMOVE(block
, next
);
2963 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
)
2965 return qemu_ram_alloc_from_ptr(dev
, name
, size
, NULL
);
2968 void qemu_ram_free(ram_addr_t addr
)
2972 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2973 if (addr
== block
->offset
) {
2974 QLIST_REMOVE(block
, next
);
2975 if (block
->flags
& RAM_PREALLOC_MASK
) {
2977 } else if (mem_path
) {
2978 #if defined (__linux__) && !defined(TARGET_S390X)
2980 munmap(block
->host
, block
->length
);
2983 qemu_vfree(block
->host
);
2989 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2990 munmap(block
->host
, block
->length
);
2992 if (xen_mapcache_enabled()) {
2993 qemu_invalidate_entry(block
->host
);
2995 qemu_vfree(block
->host
);
3007 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
3014 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3015 offset
= addr
- block
->offset
;
3016 if (offset
< block
->length
) {
3017 vaddr
= block
->host
+ offset
;
3018 if (block
->flags
& RAM_PREALLOC_MASK
) {
3022 munmap(vaddr
, length
);
3024 #if defined(__linux__) && !defined(TARGET_S390X)
3027 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
3030 flags
|= MAP_PRIVATE
;
3032 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3033 flags
, block
->fd
, offset
);
3035 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3036 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3043 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3044 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
3045 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
3048 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3049 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3053 if (area
!= vaddr
) {
3054 fprintf(stderr
, "Could not remap addr: %lx@%lx\n",
3058 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3064 #endif /* !_WIN32 */
3066 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3067 With the exception of the softmmu code in this file, this should
3068 only be used for local memory (e.g. video ram) that the device owns,
3069 and knows it isn't going to access beyond the end of the block.
3071 It should not be used for general purpose DMA.
3072 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3074 void *qemu_get_ram_ptr(ram_addr_t addr
)
3078 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3079 if (addr
- block
->offset
< block
->length
) {
3080 /* Move this entry to to start of the list. */
3081 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3082 QLIST_REMOVE(block
, next
);
3083 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3085 if (xen_mapcache_enabled()) {
3086 /* We need to check if the requested address is in the RAM
3087 * because we don't want to map the entire memory in QEMU.
3089 if (block
->offset
== 0) {
3090 return qemu_map_cache(addr
, 0, 1);
3091 } else if (block
->host
== NULL
) {
3092 block
->host
= xen_map_block(block
->offset
, block
->length
);
3095 return block
->host
+ (addr
- block
->offset
);
3099 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3105 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3106 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3108 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3112 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3113 if (addr
- block
->offset
< block
->length
) {
3114 if (xen_mapcache_enabled()) {
3115 /* We need to check if the requested address is in the RAM
3116 * because we don't want to map the entire memory in QEMU.
3118 if (block
->offset
== 0) {
3119 return qemu_map_cache(addr
, 0, 1);
3120 } else if (block
->host
== NULL
) {
3121 block
->host
= xen_map_block(block
->offset
, block
->length
);
3124 return block
->host
+ (addr
- block
->offset
);
3128 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3134 void qemu_put_ram_ptr(void *addr
)
3136 trace_qemu_put_ram_ptr(addr
);
3138 if (xen_mapcache_enabled()) {
3141 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3142 if (addr
== block
->host
) {
3146 if (block
&& block
->host
) {
3147 xen_unmap_block(block
->host
, block
->length
);
3150 qemu_map_cache_unlock(addr
);
3155 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3158 uint8_t *host
= ptr
;
3160 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3161 /* This case append when the block is not mapped. */
3162 if (block
->host
== NULL
) {
3165 if (host
- block
->host
< block
->length
) {
3166 *ram_addr
= block
->offset
+ (host
- block
->host
);
3171 if (xen_mapcache_enabled()) {
3172 *ram_addr
= qemu_ram_addr_from_mapcache(ptr
);
3179 /* Some of the softmmu routines need to translate from a host pointer
3180 (typically a TLB entry) back to a ram offset. */
3181 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3183 ram_addr_t ram_addr
;
3185 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3186 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3192 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3194 #ifdef DEBUG_UNASSIGNED
3195 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3197 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3198 do_unassigned_access(addr
, 0, 0, 0, 1);
3203 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3205 #ifdef DEBUG_UNASSIGNED
3206 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3208 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3209 do_unassigned_access(addr
, 0, 0, 0, 2);
3214 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3216 #ifdef DEBUG_UNASSIGNED
3217 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3219 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3220 do_unassigned_access(addr
, 0, 0, 0, 4);
3225 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3227 #ifdef DEBUG_UNASSIGNED
3228 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3230 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3231 do_unassigned_access(addr
, 1, 0, 0, 1);
3235 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3237 #ifdef DEBUG_UNASSIGNED
3238 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3240 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3241 do_unassigned_access(addr
, 1, 0, 0, 2);
3245 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3247 #ifdef DEBUG_UNASSIGNED
3248 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3250 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3251 do_unassigned_access(addr
, 1, 0, 0, 4);
3255 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3256 unassigned_mem_readb
,
3257 unassigned_mem_readw
,
3258 unassigned_mem_readl
,
3261 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3262 unassigned_mem_writeb
,
3263 unassigned_mem_writew
,
3264 unassigned_mem_writel
,
3267 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3271 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3272 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3273 #if !defined(CONFIG_USER_ONLY)
3274 tb_invalidate_phys_page_fast(ram_addr
, 1);
3275 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3278 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3279 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3280 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3281 /* we remove the notdirty callback only if the code has been
3283 if (dirty_flags
== 0xff)
3284 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3287 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3291 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3292 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3293 #if !defined(CONFIG_USER_ONLY)
3294 tb_invalidate_phys_page_fast(ram_addr
, 2);
3295 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3298 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3299 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3300 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3301 /* we remove the notdirty callback only if the code has been
3303 if (dirty_flags
== 0xff)
3304 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3307 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3311 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3312 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3313 #if !defined(CONFIG_USER_ONLY)
3314 tb_invalidate_phys_page_fast(ram_addr
, 4);
3315 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3318 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3319 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3320 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3321 /* we remove the notdirty callback only if the code has been
3323 if (dirty_flags
== 0xff)
3324 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3327 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3328 NULL
, /* never used */
3329 NULL
, /* never used */
3330 NULL
, /* never used */
3333 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3334 notdirty_mem_writeb
,
3335 notdirty_mem_writew
,
3336 notdirty_mem_writel
,
3339 /* Generate a debug exception if a watchpoint has been hit. */
3340 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3342 CPUState
*env
= cpu_single_env
;
3343 target_ulong pc
, cs_base
;
3344 TranslationBlock
*tb
;
3349 if (env
->watchpoint_hit
) {
3350 /* We re-entered the check after replacing the TB. Now raise
3351 * the debug interrupt so that is will trigger after the
3352 * current instruction. */
3353 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3356 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3357 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3358 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3359 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3360 wp
->flags
|= BP_WATCHPOINT_HIT
;
3361 if (!env
->watchpoint_hit
) {
3362 env
->watchpoint_hit
= wp
;
3363 tb
= tb_find_pc(env
->mem_io_pc
);
3365 cpu_abort(env
, "check_watchpoint: could not find TB for "
3366 "pc=%p", (void *)env
->mem_io_pc
);
3368 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3369 tb_phys_invalidate(tb
, -1);
3370 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3371 env
->exception_index
= EXCP_DEBUG
;
3373 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3374 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3376 cpu_resume_from_signal(env
, NULL
);
3379 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3384 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3385 so these check for a hit then pass through to the normal out-of-line
3387 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3389 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3390 return ldub_phys(addr
);
3393 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3395 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3396 return lduw_phys(addr
);
3399 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3401 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3402 return ldl_phys(addr
);
3405 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3408 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3409 stb_phys(addr
, val
);
3412 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3415 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3416 stw_phys(addr
, val
);
3419 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3422 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3423 stl_phys(addr
, val
);
3426 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3432 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3438 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3439 target_phys_addr_t addr
,
3442 unsigned int idx
= SUBPAGE_IDX(addr
);
3443 #if defined(DEBUG_SUBPAGE)
3444 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3445 mmio
, len
, addr
, idx
);
3448 addr
+= mmio
->region_offset
[idx
];
3449 idx
= mmio
->sub_io_index
[idx
];
3450 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3453 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3454 uint32_t value
, unsigned int len
)
3456 unsigned int idx
= SUBPAGE_IDX(addr
);
3457 #if defined(DEBUG_SUBPAGE)
3458 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3459 __func__
, mmio
, len
, addr
, idx
, value
);
3462 addr
+= mmio
->region_offset
[idx
];
3463 idx
= mmio
->sub_io_index
[idx
];
3464 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3467 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3469 return subpage_readlen(opaque
, addr
, 0);
3472 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3475 subpage_writelen(opaque
, addr
, value
, 0);
3478 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3480 return subpage_readlen(opaque
, addr
, 1);
3483 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3486 subpage_writelen(opaque
, addr
, value
, 1);
3489 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3491 return subpage_readlen(opaque
, addr
, 2);
3494 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3497 subpage_writelen(opaque
, addr
, value
, 2);
3500 static CPUReadMemoryFunc
* const subpage_read
[] = {
3506 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3512 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3513 ram_addr_t memory
, ram_addr_t region_offset
)
3517 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3519 idx
= SUBPAGE_IDX(start
);
3520 eidx
= SUBPAGE_IDX(end
);
3521 #if defined(DEBUG_SUBPAGE)
3522 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3523 mmio
, start
, end
, idx
, eidx
, memory
);
3525 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
3526 memory
= IO_MEM_UNASSIGNED
;
3527 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3528 for (; idx
<= eidx
; idx
++) {
3529 mmio
->sub_io_index
[idx
] = memory
;
3530 mmio
->region_offset
[idx
] = region_offset
;
3536 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3537 ram_addr_t orig_memory
,
3538 ram_addr_t region_offset
)
3543 mmio
= qemu_mallocz(sizeof(subpage_t
));
3546 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
,
3547 DEVICE_NATIVE_ENDIAN
);
3548 #if defined(DEBUG_SUBPAGE)
3549 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3550 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3552 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3553 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3558 static int get_free_io_mem_idx(void)
3562 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3563 if (!io_mem_used
[i
]) {
3567 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3572 * Usually, devices operate in little endian mode. There are devices out
3573 * there that operate in big endian too. Each device gets byte swapped
3574 * mmio if plugged onto a CPU that does the other endianness.
3584 typedef struct SwapEndianContainer
{
3585 CPUReadMemoryFunc
*read
[3];
3586 CPUWriteMemoryFunc
*write
[3];
3588 } SwapEndianContainer
;
3590 static uint32_t swapendian_mem_readb (void *opaque
, target_phys_addr_t addr
)
3593 SwapEndianContainer
*c
= opaque
;
3594 val
= c
->read
[0](c
->opaque
, addr
);
3598 static uint32_t swapendian_mem_readw(void *opaque
, target_phys_addr_t addr
)
3601 SwapEndianContainer
*c
= opaque
;
3602 val
= bswap16(c
->read
[1](c
->opaque
, addr
));
3606 static uint32_t swapendian_mem_readl(void *opaque
, target_phys_addr_t addr
)
3609 SwapEndianContainer
*c
= opaque
;
3610 val
= bswap32(c
->read
[2](c
->opaque
, addr
));
3614 static CPUReadMemoryFunc
* const swapendian_readfn
[3]={
3615 swapendian_mem_readb
,
3616 swapendian_mem_readw
,
3617 swapendian_mem_readl
3620 static void swapendian_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3623 SwapEndianContainer
*c
= opaque
;
3624 c
->write
[0](c
->opaque
, addr
, val
);
3627 static void swapendian_mem_writew(void *opaque
, target_phys_addr_t addr
,
3630 SwapEndianContainer
*c
= opaque
;
3631 c
->write
[1](c
->opaque
, addr
, bswap16(val
));
3634 static void swapendian_mem_writel(void *opaque
, target_phys_addr_t addr
,
3637 SwapEndianContainer
*c
= opaque
;
3638 c
->write
[2](c
->opaque
, addr
, bswap32(val
));
3641 static CPUWriteMemoryFunc
* const swapendian_writefn
[3]={
3642 swapendian_mem_writeb
,
3643 swapendian_mem_writew
,
3644 swapendian_mem_writel
3647 static void swapendian_init(int io_index
)
3649 SwapEndianContainer
*c
= qemu_malloc(sizeof(SwapEndianContainer
));
3652 /* Swap mmio for big endian targets */
3653 c
->opaque
= io_mem_opaque
[io_index
];
3654 for (i
= 0; i
< 3; i
++) {
3655 c
->read
[i
] = io_mem_read
[io_index
][i
];
3656 c
->write
[i
] = io_mem_write
[io_index
][i
];
3658 io_mem_read
[io_index
][i
] = swapendian_readfn
[i
];
3659 io_mem_write
[io_index
][i
] = swapendian_writefn
[i
];
3661 io_mem_opaque
[io_index
] = c
;
3664 static void swapendian_del(int io_index
)
3666 if (io_mem_read
[io_index
][0] == swapendian_readfn
[0]) {
3667 qemu_free(io_mem_opaque
[io_index
]);
3671 /* mem_read and mem_write are arrays of functions containing the
3672 function to access byte (index 0), word (index 1) and dword (index
3673 2). Functions can be omitted with a NULL function pointer.
3674 If io_index is non zero, the corresponding io zone is
3675 modified. If it is zero, a new io zone is allocated. The return
3676 value can be used with cpu_register_physical_memory(). (-1) is
3677 returned if error. */
3678 static int cpu_register_io_memory_fixed(int io_index
,
3679 CPUReadMemoryFunc
* const *mem_read
,
3680 CPUWriteMemoryFunc
* const *mem_write
,
3681 void *opaque
, enum device_endian endian
)
3685 if (io_index
<= 0) {
3686 io_index
= get_free_io_mem_idx();
3690 io_index
>>= IO_MEM_SHIFT
;
3691 if (io_index
>= IO_MEM_NB_ENTRIES
)
3695 for (i
= 0; i
< 3; ++i
) {
3696 io_mem_read
[io_index
][i
]
3697 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3699 for (i
= 0; i
< 3; ++i
) {
3700 io_mem_write
[io_index
][i
]
3701 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3703 io_mem_opaque
[io_index
] = opaque
;
3706 case DEVICE_BIG_ENDIAN
:
3707 #ifndef TARGET_WORDS_BIGENDIAN
3708 swapendian_init(io_index
);
3711 case DEVICE_LITTLE_ENDIAN
:
3712 #ifdef TARGET_WORDS_BIGENDIAN
3713 swapendian_init(io_index
);
3716 case DEVICE_NATIVE_ENDIAN
:
3721 return (io_index
<< IO_MEM_SHIFT
);
3724 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3725 CPUWriteMemoryFunc
* const *mem_write
,
3726 void *opaque
, enum device_endian endian
)
3728 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
, endian
);
3731 void cpu_unregister_io_memory(int io_table_address
)
3734 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3736 swapendian_del(io_index
);
3738 for (i
=0;i
< 3; i
++) {
3739 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3740 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3742 io_mem_opaque
[io_index
] = NULL
;
3743 io_mem_used
[io_index
] = 0;
3746 static void io_mem_init(void)
3750 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3751 unassigned_mem_write
, NULL
,
3752 DEVICE_NATIVE_ENDIAN
);
3753 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3754 unassigned_mem_write
, NULL
,
3755 DEVICE_NATIVE_ENDIAN
);
3756 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3757 notdirty_mem_write
, NULL
,
3758 DEVICE_NATIVE_ENDIAN
);
3762 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3763 watch_mem_write
, NULL
,
3764 DEVICE_NATIVE_ENDIAN
);
3767 #endif /* !defined(CONFIG_USER_ONLY) */
3769 /* physical memory access (slow version, mainly for debug) */
3770 #if defined(CONFIG_USER_ONLY)
3771 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3772 uint8_t *buf
, int len
, int is_write
)
3779 page
= addr
& TARGET_PAGE_MASK
;
3780 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3783 flags
= page_get_flags(page
);
3784 if (!(flags
& PAGE_VALID
))
3787 if (!(flags
& PAGE_WRITE
))
3789 /* XXX: this code should not depend on lock_user */
3790 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3793 unlock_user(p
, addr
, l
);
3795 if (!(flags
& PAGE_READ
))
3797 /* XXX: this code should not depend on lock_user */
3798 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3801 unlock_user(p
, addr
, 0);
3811 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3812 int len
, int is_write
)
3817 target_phys_addr_t page
;
3822 page
= addr
& TARGET_PAGE_MASK
;
3823 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3826 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3828 pd
= IO_MEM_UNASSIGNED
;
3830 pd
= p
->phys_offset
;
3834 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3835 target_phys_addr_t addr1
= addr
;
3836 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3838 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3839 /* XXX: could force cpu_single_env to NULL to avoid
3841 if (l
>= 4 && ((addr1
& 3) == 0)) {
3842 /* 32 bit write access */
3844 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3846 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3847 /* 16 bit write access */
3849 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3852 /* 8 bit write access */
3854 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3858 unsigned long addr1
;
3859 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3861 ptr
= qemu_get_ram_ptr(addr1
);
3862 memcpy(ptr
, buf
, l
);
3863 if (!cpu_physical_memory_is_dirty(addr1
)) {
3864 /* invalidate code */
3865 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3867 cpu_physical_memory_set_dirty_flags(
3868 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3870 /* qemu doesn't execute guest code directly, but kvm does
3871 therefore flush instruction caches */
3873 flush_icache_range((unsigned long)ptr
,
3874 ((unsigned long)ptr
)+l
);
3875 qemu_put_ram_ptr(ptr
);
3878 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3879 !(pd
& IO_MEM_ROMD
)) {
3880 target_phys_addr_t addr1
= addr
;
3882 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3884 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3885 if (l
>= 4 && ((addr1
& 3) == 0)) {
3886 /* 32 bit read access */
3887 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3890 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3891 /* 16 bit read access */
3892 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3896 /* 8 bit read access */
3897 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3903 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
3904 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3905 qemu_put_ram_ptr(ptr
);
3914 /* used for ROM loading : can write in RAM and ROM */
3915 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3916 const uint8_t *buf
, int len
)
3920 target_phys_addr_t page
;
3925 page
= addr
& TARGET_PAGE_MASK
;
3926 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3929 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3931 pd
= IO_MEM_UNASSIGNED
;
3933 pd
= p
->phys_offset
;
3936 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3937 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3938 !(pd
& IO_MEM_ROMD
)) {
3941 unsigned long addr1
;
3942 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3944 ptr
= qemu_get_ram_ptr(addr1
);
3945 memcpy(ptr
, buf
, l
);
3946 qemu_put_ram_ptr(ptr
);
3956 target_phys_addr_t addr
;
3957 target_phys_addr_t len
;
3960 static BounceBuffer bounce
;
3962 typedef struct MapClient
{
3964 void (*callback
)(void *opaque
);
3965 QLIST_ENTRY(MapClient
) link
;
3968 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3969 = QLIST_HEAD_INITIALIZER(map_client_list
);
3971 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3973 MapClient
*client
= qemu_malloc(sizeof(*client
));
3975 client
->opaque
= opaque
;
3976 client
->callback
= callback
;
3977 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3981 void cpu_unregister_map_client(void *_client
)
3983 MapClient
*client
= (MapClient
*)_client
;
3985 QLIST_REMOVE(client
, link
);
3989 static void cpu_notify_map_clients(void)
3993 while (!QLIST_EMPTY(&map_client_list
)) {
3994 client
= QLIST_FIRST(&map_client_list
);
3995 client
->callback(client
->opaque
);
3996 cpu_unregister_map_client(client
);
4000 /* Map a physical memory region into a host virtual address.
4001 * May map a subset of the requested range, given by and returned in *plen.
4002 * May return NULL if resources needed to perform the mapping are exhausted.
4003 * Use only for reads OR writes - not for read-modify-write operations.
4004 * Use cpu_register_map_client() to know when retrying the map operation is
4005 * likely to succeed.
4007 void *cpu_physical_memory_map(target_phys_addr_t addr
,
4008 target_phys_addr_t
*plen
,
4011 target_phys_addr_t len
= *plen
;
4012 target_phys_addr_t done
= 0;
4014 uint8_t *ret
= NULL
;
4016 target_phys_addr_t page
;
4019 unsigned long addr1
;
4022 page
= addr
& TARGET_PAGE_MASK
;
4023 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4026 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
4028 pd
= IO_MEM_UNASSIGNED
;
4030 pd
= p
->phys_offset
;
4033 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4034 if (done
|| bounce
.buffer
) {
4037 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
4041 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
4043 ptr
= bounce
.buffer
;
4045 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4046 ptr
= qemu_get_ram_ptr(addr1
);
4050 } else if (ret
+ done
!= ptr
) {
4062 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4063 * Will also mark the memory as dirty if is_write == 1. access_len gives
4064 * the amount of memory that was actually read or written by the caller.
4066 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
4067 int is_write
, target_phys_addr_t access_len
)
4069 unsigned long flush_len
= (unsigned long)access_len
;
4071 if (buffer
!= bounce
.buffer
) {
4073 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
4074 while (access_len
) {
4076 l
= TARGET_PAGE_SIZE
;
4079 if (!cpu_physical_memory_is_dirty(addr1
)) {
4080 /* invalidate code */
4081 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
4083 cpu_physical_memory_set_dirty_flags(
4084 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4089 dma_flush_range((unsigned long)buffer
,
4090 (unsigned long)buffer
+ flush_len
);
4092 if (xen_mapcache_enabled()) {
4093 uint8_t *buffer1
= buffer
;
4094 uint8_t *end_buffer
= buffer
+ len
;
4096 while (buffer1
< end_buffer
) {
4097 qemu_put_ram_ptr(buffer1
);
4098 buffer1
+= TARGET_PAGE_SIZE
;
4104 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
4106 qemu_vfree(bounce
.buffer
);
4107 bounce
.buffer
= NULL
;
4108 cpu_notify_map_clients();
4111 /* warning: addr must be aligned */
4112 uint32_t ldl_phys(target_phys_addr_t addr
)
4120 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4122 pd
= IO_MEM_UNASSIGNED
;
4124 pd
= p
->phys_offset
;
4127 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4128 !(pd
& IO_MEM_ROMD
)) {
4130 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4132 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4133 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4136 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4137 (addr
& ~TARGET_PAGE_MASK
);
4143 /* warning: addr must be aligned */
4144 uint64_t ldq_phys(target_phys_addr_t addr
)
4152 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4154 pd
= IO_MEM_UNASSIGNED
;
4156 pd
= p
->phys_offset
;
4159 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4160 !(pd
& IO_MEM_ROMD
)) {
4162 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4164 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4165 #ifdef TARGET_WORDS_BIGENDIAN
4166 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4167 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4169 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4170 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4174 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4175 (addr
& ~TARGET_PAGE_MASK
);
4182 uint32_t ldub_phys(target_phys_addr_t addr
)
4185 cpu_physical_memory_read(addr
, &val
, 1);
4189 /* warning: addr must be aligned */
4190 uint32_t lduw_phys(target_phys_addr_t addr
)
4198 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4200 pd
= IO_MEM_UNASSIGNED
;
4202 pd
= p
->phys_offset
;
4205 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4206 !(pd
& IO_MEM_ROMD
)) {
4208 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4210 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4211 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4214 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4215 (addr
& ~TARGET_PAGE_MASK
);
4221 /* warning: addr must be aligned. The ram page is not masked as dirty
4222 and the code inside is not invalidated. It is useful if the dirty
4223 bits are used to track modified PTEs */
4224 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4231 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4233 pd
= IO_MEM_UNASSIGNED
;
4235 pd
= p
->phys_offset
;
4238 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4239 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4241 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4242 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4244 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4245 ptr
= qemu_get_ram_ptr(addr1
);
4248 if (unlikely(in_migration
)) {
4249 if (!cpu_physical_memory_is_dirty(addr1
)) {
4250 /* invalidate code */
4251 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4253 cpu_physical_memory_set_dirty_flags(
4254 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4260 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4267 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4269 pd
= IO_MEM_UNASSIGNED
;
4271 pd
= p
->phys_offset
;
4274 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4275 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4277 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4278 #ifdef TARGET_WORDS_BIGENDIAN
4279 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4280 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4282 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4283 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4286 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4287 (addr
& ~TARGET_PAGE_MASK
);
4292 /* warning: addr must be aligned */
4293 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4300 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4302 pd
= IO_MEM_UNASSIGNED
;
4304 pd
= p
->phys_offset
;
4307 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4308 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4310 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4311 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4313 unsigned long addr1
;
4314 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4316 ptr
= qemu_get_ram_ptr(addr1
);
4318 if (!cpu_physical_memory_is_dirty(addr1
)) {
4319 /* invalidate code */
4320 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4322 cpu_physical_memory_set_dirty_flags(addr1
,
4323 (0xff & ~CODE_DIRTY_FLAG
));
4329 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4332 cpu_physical_memory_write(addr
, &v
, 1);
4335 /* warning: addr must be aligned */
4336 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4343 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4345 pd
= IO_MEM_UNASSIGNED
;
4347 pd
= p
->phys_offset
;
4350 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4351 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4353 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4354 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4356 unsigned long addr1
;
4357 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4359 ptr
= qemu_get_ram_ptr(addr1
);
4361 if (!cpu_physical_memory_is_dirty(addr1
)) {
4362 /* invalidate code */
4363 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4365 cpu_physical_memory_set_dirty_flags(addr1
,
4366 (0xff & ~CODE_DIRTY_FLAG
));
4372 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4375 cpu_physical_memory_write(addr
, &val
, 8);
4378 /* virtual memory access for debug (includes writing to ROM) */
4379 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4380 uint8_t *buf
, int len
, int is_write
)
4383 target_phys_addr_t phys_addr
;
4387 page
= addr
& TARGET_PAGE_MASK
;
4388 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4389 /* if no physical page mapped, return an error */
4390 if (phys_addr
== -1)
4392 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4395 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4397 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4399 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4408 /* in deterministic execution mode, instructions doing device I/Os
4409 must be at the end of the TB */
4410 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4412 TranslationBlock
*tb
;
4414 target_ulong pc
, cs_base
;
4417 tb
= tb_find_pc((unsigned long)retaddr
);
4419 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4422 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4423 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4424 /* Calculate how many instructions had been executed before the fault
4426 n
= n
- env
->icount_decr
.u16
.low
;
4427 /* Generate a new TB ending on the I/O insn. */
4429 /* On MIPS and SH, delay slot instructions can only be restarted if
4430 they were already the first instruction in the TB. If this is not
4431 the first instruction in a TB then re-execute the preceding
4433 #if defined(TARGET_MIPS)
4434 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4435 env
->active_tc
.PC
-= 4;
4436 env
->icount_decr
.u16
.low
++;
4437 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4439 #elif defined(TARGET_SH4)
4440 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4443 env
->icount_decr
.u16
.low
++;
4444 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4447 /* This should never happen. */
4448 if (n
> CF_COUNT_MASK
)
4449 cpu_abort(env
, "TB too big during recompile");
4451 cflags
= n
| CF_LAST_IO
;
4453 cs_base
= tb
->cs_base
;
4455 tb_phys_invalidate(tb
, -1);
4456 /* FIXME: In theory this could raise an exception. In practice
4457 we have already translated the block once so it's probably ok. */
4458 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4459 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4460 the first in the TB) then we end up generating a whole new TB and
4461 repeating the fault, which is horribly inefficient.
4462 Better would be to execute just this insn uncached, or generate a
4464 cpu_resume_from_signal(env
, NULL
);
4467 #if !defined(CONFIG_USER_ONLY)
4469 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4471 int i
, target_code_size
, max_target_code_size
;
4472 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4473 TranslationBlock
*tb
;
4475 target_code_size
= 0;
4476 max_target_code_size
= 0;
4478 direct_jmp_count
= 0;
4479 direct_jmp2_count
= 0;
4480 for(i
= 0; i
< nb_tbs
; i
++) {
4482 target_code_size
+= tb
->size
;
4483 if (tb
->size
> max_target_code_size
)
4484 max_target_code_size
= tb
->size
;
4485 if (tb
->page_addr
[1] != -1)
4487 if (tb
->tb_next_offset
[0] != 0xffff) {
4489 if (tb
->tb_next_offset
[1] != 0xffff) {
4490 direct_jmp2_count
++;
4494 /* XXX: avoid using doubles ? */
4495 cpu_fprintf(f
, "Translation buffer state:\n");
4496 cpu_fprintf(f
, "gen code size %td/%ld\n",
4497 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4498 cpu_fprintf(f
, "TB count %d/%d\n",
4499 nb_tbs
, code_gen_max_blocks
);
4500 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4501 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4502 max_target_code_size
);
4503 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4504 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4505 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4506 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4508 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4509 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4511 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4513 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4514 cpu_fprintf(f
, "\nStatistics:\n");
4515 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4516 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4517 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4518 #ifdef CONFIG_PROFILER
4519 tcg_dump_info(f
, cpu_fprintf
);
4523 #define MMUSUFFIX _cmmu
4524 #define GETPC() NULL
4525 #define env cpu_single_env
4526 #define SOFTMMU_CODE_ACCESS
4529 #include "softmmu_template.h"
4532 #include "softmmu_template.h"
4535 #include "softmmu_template.h"
4538 #include "softmmu_template.h"