2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
36 #if defined(CONFIG_USER_ONLY)
39 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40 #include <sys/param.h>
41 #if __FreeBSD_version >= 700104
42 #define HAVE_KINFO_GETVMMAP
43 #define sigqueue sigqueue_freebsd /* avoid redefinition */
46 #include <machine/profile.h>
56 //#define DEBUG_TB_INVALIDATE
59 //#define DEBUG_UNASSIGNED
61 /* make various TB consistency checks */
62 //#define DEBUG_TB_CHECK
63 //#define DEBUG_TLB_CHECK
65 //#define DEBUG_IOPORT
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
73 #define SMC_BITMAP_USE_THRESHOLD 10
75 static TranslationBlock
*tbs
;
76 static int code_gen_max_blocks
;
77 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
79 /* any access to the tbs or the page table must use this lock */
80 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
82 #if defined(__arm__) || defined(__sparc_v9__)
83 /* The prologue must be reachable with a direct jump. ARM and Sparc64
84 have limited branch ranges (possibly also PPC) so place it in a
85 section close to code segment. */
86 #define code_gen_section \
87 __attribute__((__section__(".gen_code"))) \
88 __attribute__((aligned (32)))
90 /* Maximum alignment for Win32 is 16. */
91 #define code_gen_section \
92 __attribute__((aligned (16)))
94 #define code_gen_section \
95 __attribute__((aligned (32)))
98 uint8_t code_gen_prologue
[1024] code_gen_section
;
99 static uint8_t *code_gen_buffer
;
100 static unsigned long code_gen_buffer_size
;
101 /* threshold to flush the translated code buffer */
102 static unsigned long code_gen_buffer_max_size
;
103 static uint8_t *code_gen_ptr
;
105 #if !defined(CONFIG_USER_ONLY)
107 static int in_migration
;
109 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
) };
113 /* current CPU in the current thread. It is only valid inside
115 CPUState
*cpu_single_env
;
116 /* 0 = Do not count executed instructions.
117 1 = Precise instruction counting.
118 2 = Adaptive rate instruction counting. */
120 /* Current instruction counter. While executing translated code this may
121 include some instructions that have not yet been executed. */
124 typedef struct PageDesc
{
125 /* list of TBs intersecting this ram page */
126 TranslationBlock
*first_tb
;
127 /* in order to optimize self modifying code, we count the number
128 of lookups we do to a given page to use a bitmap */
129 unsigned int code_write_count
;
130 uint8_t *code_bitmap
;
131 #if defined(CONFIG_USER_ONLY)
136 /* In system mode we want L1_MAP to be based on ram offsets,
137 while in user mode we want it to be based on virtual addresses. */
138 #if !defined(CONFIG_USER_ONLY)
139 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
142 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
145 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
148 /* Size of the L2 (and L3, etc) page tables. */
150 #define L2_SIZE (1 << L2_BITS)
152 /* The bits remaining after N lower levels of page tables. */
153 #define P_L1_BITS_REM \
154 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155 #define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
158 /* Size of the L1 page table. Avoid silly small sizes. */
159 #if P_L1_BITS_REM < 4
160 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
162 #define P_L1_BITS P_L1_BITS_REM
165 #if V_L1_BITS_REM < 4
166 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
168 #define V_L1_BITS V_L1_BITS_REM
171 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
172 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
174 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
177 unsigned long qemu_real_host_page_size
;
178 unsigned long qemu_host_page_bits
;
179 unsigned long qemu_host_page_size
;
180 unsigned long qemu_host_page_mask
;
182 /* This is a multi-level map on the virtual address space.
183 The bottom level has pointers to PageDesc. */
184 static void *l1_map
[V_L1_SIZE
];
186 #if !defined(CONFIG_USER_ONLY)
187 typedef struct PhysPageDesc
{
188 /* offset in host memory of the page + io_index in the low bits */
189 ram_addr_t phys_offset
;
190 ram_addr_t region_offset
;
193 /* This is a multi-level map on the physical address space.
194 The bottom level has pointers to PhysPageDesc. */
195 static void *l1_phys_map
[P_L1_SIZE
];
197 static void io_mem_init(void);
199 /* io memory support */
200 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
201 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
202 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
203 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
204 static int io_mem_watch
;
209 static const char *logfilename
= "qemu.log";
211 static const char *logfilename
= "/tmp/qemu.log";
215 static int log_append
= 0;
218 #if !defined(CONFIG_USER_ONLY)
219 static int tlb_flush_count
;
221 static int tb_flush_count
;
222 static int tb_phys_invalidate_count
;
225 static void map_exec(void *addr
, long size
)
228 VirtualProtect(addr
, size
,
229 PAGE_EXECUTE_READWRITE
, &old_protect
);
233 static void map_exec(void *addr
, long size
)
235 unsigned long start
, end
, page_size
;
237 page_size
= getpagesize();
238 start
= (unsigned long)addr
;
239 start
&= ~(page_size
- 1);
241 end
= (unsigned long)addr
+ size
;
242 end
+= page_size
- 1;
243 end
&= ~(page_size
- 1);
245 mprotect((void *)start
, end
- start
,
246 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
250 static void page_init(void)
252 /* NOTE: we can always suppose that qemu_host_page_size >=
256 SYSTEM_INFO system_info
;
258 GetSystemInfo(&system_info
);
259 qemu_real_host_page_size
= system_info
.dwPageSize
;
262 qemu_real_host_page_size
= getpagesize();
264 if (qemu_host_page_size
== 0)
265 qemu_host_page_size
= qemu_real_host_page_size
;
266 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
267 qemu_host_page_size
= TARGET_PAGE_SIZE
;
268 qemu_host_page_bits
= 0;
269 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
270 qemu_host_page_bits
++;
271 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
273 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
275 #ifdef HAVE_KINFO_GETVMMAP
276 struct kinfo_vmentry
*freep
;
279 freep
= kinfo_getvmmap(getpid(), &cnt
);
282 for (i
= 0; i
< cnt
; i
++) {
283 unsigned long startaddr
, endaddr
;
285 startaddr
= freep
[i
].kve_start
;
286 endaddr
= freep
[i
].kve_end
;
287 if (h2g_valid(startaddr
)) {
288 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
290 if (h2g_valid(endaddr
)) {
291 endaddr
= h2g(endaddr
);
292 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
294 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
296 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
307 last_brk
= (unsigned long)sbrk(0);
309 f
= fopen("/compat/linux/proc/self/maps", "r");
314 unsigned long startaddr
, endaddr
;
317 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
319 if (n
== 2 && h2g_valid(startaddr
)) {
320 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
322 if (h2g_valid(endaddr
)) {
323 endaddr
= h2g(endaddr
);
327 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
339 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
345 #if defined(CONFIG_USER_ONLY)
346 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
347 # define ALLOC(P, SIZE) \
349 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
350 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
353 # define ALLOC(P, SIZE) \
354 do { P = qemu_mallocz(SIZE); } while (0)
357 /* Level 1. Always allocated. */
358 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
361 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
368 ALLOC(p
, sizeof(void *) * L2_SIZE
);
372 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
380 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
386 return pd
+ (index
& (L2_SIZE
- 1));
389 static inline PageDesc
*page_find(tb_page_addr_t index
)
391 return page_find_alloc(index
, 0);
394 #if !defined(CONFIG_USER_ONLY)
395 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
401 /* Level 1. Always allocated. */
402 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
405 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
411 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
413 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
424 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
426 for (i
= 0; i
< L2_SIZE
; i
++) {
427 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
428 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
432 return pd
+ (index
& (L2_SIZE
- 1));
435 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
437 return phys_page_find_alloc(index
, 0);
440 static void tlb_protect_code(ram_addr_t ram_addr
);
441 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
443 #define mmap_lock() do { } while(0)
444 #define mmap_unlock() do { } while(0)
447 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
449 #if defined(CONFIG_USER_ONLY)
450 /* Currently it is not recommended to allocate big chunks of data in
451 user mode. It will change when a dedicated libc will be used */
452 #define USE_STATIC_CODE_GEN_BUFFER
455 #ifdef USE_STATIC_CODE_GEN_BUFFER
456 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
457 __attribute__((aligned (CODE_GEN_ALIGN
)));
460 static void code_gen_alloc(unsigned long tb_size
)
462 #ifdef USE_STATIC_CODE_GEN_BUFFER
463 code_gen_buffer
= static_code_gen_buffer
;
464 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
465 map_exec(code_gen_buffer
, code_gen_buffer_size
);
467 code_gen_buffer_size
= tb_size
;
468 if (code_gen_buffer_size
== 0) {
469 #if defined(CONFIG_USER_ONLY)
470 /* in user mode, phys_ram_size is not meaningful */
471 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
473 /* XXX: needs adjustments */
474 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
477 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
478 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
479 /* The code gen buffer location may have constraints depending on
480 the host cpu and OS */
481 #if defined(__linux__)
486 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
487 #if defined(__x86_64__)
489 /* Cannot map more than that */
490 if (code_gen_buffer_size
> (800 * 1024 * 1024))
491 code_gen_buffer_size
= (800 * 1024 * 1024);
492 #elif defined(__sparc_v9__)
493 // Map the buffer below 2G, so we can use direct calls and branches
495 start
= (void *) 0x60000000UL
;
496 if (code_gen_buffer_size
> (512 * 1024 * 1024))
497 code_gen_buffer_size
= (512 * 1024 * 1024);
498 #elif defined(__arm__)
499 /* Map the buffer below 32M, so we can use direct calls and branches */
501 start
= (void *) 0x01000000UL
;
502 if (code_gen_buffer_size
> 16 * 1024 * 1024)
503 code_gen_buffer_size
= 16 * 1024 * 1024;
504 #elif defined(__s390x__)
505 /* Map the buffer so that we can use direct calls and branches. */
506 /* We have a +- 4GB range on the branches; leave some slop. */
507 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
508 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
510 start
= (void *)0x90000000UL
;
512 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
513 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
515 if (code_gen_buffer
== MAP_FAILED
) {
516 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
520 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 || defined(__DragonFly__) || defined(__OpenBSD__)
525 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
526 #if defined(__x86_64__)
527 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 * 0x40000000 is free */
530 addr
= (void *)0x40000000;
531 /* Cannot map more than that */
532 if (code_gen_buffer_size
> (800 * 1024 * 1024))
533 code_gen_buffer_size
= (800 * 1024 * 1024);
534 #elif defined(__sparc_v9__)
535 // Map the buffer below 2G, so we can use direct calls and branches
537 addr
= (void *) 0x60000000UL
;
538 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
539 code_gen_buffer_size
= (512 * 1024 * 1024);
542 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
543 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
545 if (code_gen_buffer
== MAP_FAILED
) {
546 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
551 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
552 map_exec(code_gen_buffer
, code_gen_buffer_size
);
554 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
555 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
556 code_gen_buffer_max_size
= code_gen_buffer_size
-
557 (TCG_MAX_OP_SIZE
* OPC_MAX_SIZE
);
558 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
559 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
562 /* Must be called before using the QEMU cpus. 'tb_size' is the size
563 (in bytes) allocated to the translation buffer. Zero means default
565 void cpu_exec_init_all(unsigned long tb_size
)
568 code_gen_alloc(tb_size
);
569 code_gen_ptr
= code_gen_buffer
;
571 #if !defined(CONFIG_USER_ONLY)
574 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 /* There's no guest base to take into account, so go ahead and
576 initialize the prologue now. */
577 tcg_prologue_init(&tcg_ctx
);
581 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
583 static int cpu_common_post_load(void *opaque
, int version_id
)
585 CPUState
*env
= opaque
;
587 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588 version_id is increased. */
589 env
->interrupt_request
&= ~0x01;
595 static const VMStateDescription vmstate_cpu_common
= {
596 .name
= "cpu_common",
598 .minimum_version_id
= 1,
599 .minimum_version_id_old
= 1,
600 .post_load
= cpu_common_post_load
,
601 .fields
= (VMStateField
[]) {
602 VMSTATE_UINT32(halted
, CPUState
),
603 VMSTATE_UINT32(interrupt_request
, CPUState
),
604 VMSTATE_END_OF_LIST()
609 CPUState
*qemu_get_cpu(int cpu
)
611 CPUState
*env
= first_cpu
;
614 if (env
->cpu_index
== cpu
)
622 void cpu_exec_init(CPUState
*env
)
627 #if defined(CONFIG_USER_ONLY)
630 env
->next_cpu
= NULL
;
633 while (*penv
!= NULL
) {
634 penv
= &(*penv
)->next_cpu
;
637 env
->cpu_index
= cpu_index
;
639 QTAILQ_INIT(&env
->breakpoints
);
640 QTAILQ_INIT(&env
->watchpoints
);
641 #ifndef CONFIG_USER_ONLY
642 env
->thread_id
= qemu_get_thread_id();
645 #if defined(CONFIG_USER_ONLY)
648 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
649 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
650 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
651 cpu_save
, cpu_load
, env
);
655 /* Allocate a new translation block. Flush the translation buffer if
656 too many translation blocks or too much generated code. */
657 static TranslationBlock
*tb_alloc(target_ulong pc
)
659 TranslationBlock
*tb
;
661 if (nb_tbs
>= code_gen_max_blocks
||
662 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
670 void tb_free(TranslationBlock
*tb
)
672 /* In practice this is mostly used for single use temporary TB
673 Ignore the hard cases and just back up if this TB happens to
674 be the last one generated. */
675 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
676 code_gen_ptr
= tb
->tc_ptr
;
681 static inline void invalidate_page_bitmap(PageDesc
*p
)
683 if (p
->code_bitmap
) {
684 qemu_free(p
->code_bitmap
);
685 p
->code_bitmap
= NULL
;
687 p
->code_write_count
= 0;
690 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
692 static void page_flush_tb_1 (int level
, void **lp
)
701 for (i
= 0; i
< L2_SIZE
; ++i
) {
702 pd
[i
].first_tb
= NULL
;
703 invalidate_page_bitmap(pd
+ i
);
707 for (i
= 0; i
< L2_SIZE
; ++i
) {
708 page_flush_tb_1 (level
- 1, pp
+ i
);
713 static void page_flush_tb(void)
716 for (i
= 0; i
< V_L1_SIZE
; i
++) {
717 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
721 /* flush all the translation blocks */
722 /* XXX: tb_flush is currently not thread safe */
723 void tb_flush(CPUState
*env1
)
726 #if defined(DEBUG_FLUSH)
727 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
728 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
730 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
732 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
733 cpu_abort(env1
, "Internal error: code buffer overflow\n");
737 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
738 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
741 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
744 code_gen_ptr
= code_gen_buffer
;
745 /* XXX: flush processor icache at this point if cache flush is
750 #ifdef DEBUG_TB_CHECK
752 static void tb_invalidate_check(target_ulong address
)
754 TranslationBlock
*tb
;
756 address
&= TARGET_PAGE_MASK
;
757 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
758 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
759 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
760 address
>= tb
->pc
+ tb
->size
)) {
761 printf("ERROR invalidate: address=" TARGET_FMT_lx
762 " PC=%08lx size=%04x\n",
763 address
, (long)tb
->pc
, tb
->size
);
769 /* verify that all the pages have correct rights for code */
770 static void tb_page_check(void)
772 TranslationBlock
*tb
;
773 int i
, flags1
, flags2
;
775 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
776 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
777 flags1
= page_get_flags(tb
->pc
);
778 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
779 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
780 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
781 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
789 /* invalidate one TB */
790 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
793 TranslationBlock
*tb1
;
797 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
800 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
804 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
806 TranslationBlock
*tb1
;
812 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
814 *ptb
= tb1
->page_next
[n1
];
817 ptb
= &tb1
->page_next
[n1
];
821 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
823 TranslationBlock
*tb1
, **ptb
;
826 ptb
= &tb
->jmp_next
[n
];
829 /* find tb(n) in circular list */
833 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
834 if (n1
== n
&& tb1
== tb
)
837 ptb
= &tb1
->jmp_first
;
839 ptb
= &tb1
->jmp_next
[n1
];
842 /* now we can suppress tb(n) from the list */
843 *ptb
= tb
->jmp_next
[n
];
845 tb
->jmp_next
[n
] = NULL
;
849 /* reset the jump entry 'n' of a TB so that it is not chained to
851 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
853 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
856 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
861 tb_page_addr_t phys_pc
;
862 TranslationBlock
*tb1
, *tb2
;
864 /* remove the TB from the hash list */
865 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
866 h
= tb_phys_hash_func(phys_pc
);
867 tb_remove(&tb_phys_hash
[h
], tb
,
868 offsetof(TranslationBlock
, phys_hash_next
));
870 /* remove the TB from the page list */
871 if (tb
->page_addr
[0] != page_addr
) {
872 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
873 tb_page_remove(&p
->first_tb
, tb
);
874 invalidate_page_bitmap(p
);
876 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
877 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
878 tb_page_remove(&p
->first_tb
, tb
);
879 invalidate_page_bitmap(p
);
882 tb_invalidated_flag
= 1;
884 /* remove the TB from the hash list */
885 h
= tb_jmp_cache_hash_func(tb
->pc
);
886 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
887 if (env
->tb_jmp_cache
[h
] == tb
)
888 env
->tb_jmp_cache
[h
] = NULL
;
891 /* suppress this TB from the two jump lists */
892 tb_jmp_remove(tb
, 0);
893 tb_jmp_remove(tb
, 1);
895 /* suppress any remaining jumps to this TB */
901 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
902 tb2
= tb1
->jmp_next
[n1
];
903 tb_reset_jump(tb1
, n1
);
904 tb1
->jmp_next
[n1
] = NULL
;
907 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
909 tb_phys_invalidate_count
++;
912 static inline void set_bits(uint8_t *tab
, int start
, int len
)
918 mask
= 0xff << (start
& 7);
919 if ((start
& ~7) == (end
& ~7)) {
921 mask
&= ~(0xff << (end
& 7));
926 start
= (start
+ 8) & ~7;
928 while (start
< end1
) {
933 mask
= ~(0xff << (end
& 7));
939 static void build_page_bitmap(PageDesc
*p
)
941 int n
, tb_start
, tb_end
;
942 TranslationBlock
*tb
;
944 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
949 tb
= (TranslationBlock
*)((long)tb
& ~3);
950 /* NOTE: this is subtle as a TB may span two physical pages */
952 /* NOTE: tb_end may be after the end of the page, but
953 it is not a problem */
954 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
955 tb_end
= tb_start
+ tb
->size
;
956 if (tb_end
> TARGET_PAGE_SIZE
)
957 tb_end
= TARGET_PAGE_SIZE
;
960 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
962 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
963 tb
= tb
->page_next
[n
];
967 TranslationBlock
*tb_gen_code(CPUState
*env
,
968 target_ulong pc
, target_ulong cs_base
,
969 int flags
, int cflags
)
971 TranslationBlock
*tb
;
973 tb_page_addr_t phys_pc
, phys_page2
;
974 target_ulong virt_page2
;
977 phys_pc
= get_page_addr_code(env
, pc
);
980 /* flush must be done */
982 /* cannot fail at this point */
984 /* Don't forget to invalidate previous TB info. */
985 tb_invalidated_flag
= 1;
987 tc_ptr
= code_gen_ptr
;
989 tb
->cs_base
= cs_base
;
992 cpu_gen_code(env
, tb
, &code_gen_size
);
993 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
995 /* check next page if needed */
996 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
998 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
999 phys_page2
= get_page_addr_code(env
, virt_page2
);
1001 tb_link_page(tb
, phys_pc
, phys_page2
);
1005 /* invalidate all TBs which intersect with the target physical page
1006 starting in range [start;end[. NOTE: start and end must refer to
1007 the same physical page. 'is_cpu_write_access' should be true if called
1008 from a real cpu write access: the virtual CPU will exit the current
1009 TB if code is modified inside this TB. */
1010 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1011 int is_cpu_write_access
)
1013 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1014 CPUState
*env
= cpu_single_env
;
1015 tb_page_addr_t tb_start
, tb_end
;
1018 #ifdef TARGET_HAS_PRECISE_SMC
1019 int current_tb_not_found
= is_cpu_write_access
;
1020 TranslationBlock
*current_tb
= NULL
;
1021 int current_tb_modified
= 0;
1022 target_ulong current_pc
= 0;
1023 target_ulong current_cs_base
= 0;
1024 int current_flags
= 0;
1025 #endif /* TARGET_HAS_PRECISE_SMC */
1027 p
= page_find(start
>> TARGET_PAGE_BITS
);
1030 if (!p
->code_bitmap
&&
1031 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1032 is_cpu_write_access
) {
1033 /* build code bitmap */
1034 build_page_bitmap(p
);
1037 /* we remove all the TBs in the range [start, end[ */
1038 /* XXX: see if in some cases it could be faster to invalidate all the code */
1040 while (tb
!= NULL
) {
1042 tb
= (TranslationBlock
*)((long)tb
& ~3);
1043 tb_next
= tb
->page_next
[n
];
1044 /* NOTE: this is subtle as a TB may span two physical pages */
1046 /* NOTE: tb_end may be after the end of the page, but
1047 it is not a problem */
1048 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1049 tb_end
= tb_start
+ tb
->size
;
1051 tb_start
= tb
->page_addr
[1];
1052 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1054 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1055 #ifdef TARGET_HAS_PRECISE_SMC
1056 if (current_tb_not_found
) {
1057 current_tb_not_found
= 0;
1059 if (env
->mem_io_pc
) {
1060 /* now we have a real cpu fault */
1061 current_tb
= tb_find_pc(env
->mem_io_pc
);
1064 if (current_tb
== tb
&&
1065 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1066 /* If we are modifying the current TB, we must stop
1067 its execution. We could be more precise by checking
1068 that the modification is after the current PC, but it
1069 would require a specialized function to partially
1070 restore the CPU state */
1072 current_tb_modified
= 1;
1073 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1074 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1077 #endif /* TARGET_HAS_PRECISE_SMC */
1078 /* we need to do that to handle the case where a signal
1079 occurs while doing tb_phys_invalidate() */
1082 saved_tb
= env
->current_tb
;
1083 env
->current_tb
= NULL
;
1085 tb_phys_invalidate(tb
, -1);
1087 env
->current_tb
= saved_tb
;
1088 if (env
->interrupt_request
&& env
->current_tb
)
1089 cpu_interrupt(env
, env
->interrupt_request
);
1094 #if !defined(CONFIG_USER_ONLY)
1095 /* if no code remaining, no need to continue to use slow writes */
1097 invalidate_page_bitmap(p
);
1098 if (is_cpu_write_access
) {
1099 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1103 #ifdef TARGET_HAS_PRECISE_SMC
1104 if (current_tb_modified
) {
1105 /* we generate a block containing just the instruction
1106 modifying the memory. It will ensure that it cannot modify
1108 env
->current_tb
= NULL
;
1109 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1110 cpu_resume_from_signal(env
, NULL
);
1115 /* len must be <= 8 and start must be a multiple of len */
1116 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1122 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1123 cpu_single_env
->mem_io_vaddr
, len
,
1124 cpu_single_env
->eip
,
1125 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1128 p
= page_find(start
>> TARGET_PAGE_BITS
);
1131 if (p
->code_bitmap
) {
1132 offset
= start
& ~TARGET_PAGE_MASK
;
1133 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1134 if (b
& ((1 << len
) - 1))
1138 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1142 #if !defined(CONFIG_SOFTMMU)
1143 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1144 unsigned long pc
, void *puc
)
1146 TranslationBlock
*tb
;
1149 #ifdef TARGET_HAS_PRECISE_SMC
1150 TranslationBlock
*current_tb
= NULL
;
1151 CPUState
*env
= cpu_single_env
;
1152 int current_tb_modified
= 0;
1153 target_ulong current_pc
= 0;
1154 target_ulong current_cs_base
= 0;
1155 int current_flags
= 0;
1158 addr
&= TARGET_PAGE_MASK
;
1159 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1163 #ifdef TARGET_HAS_PRECISE_SMC
1164 if (tb
&& pc
!= 0) {
1165 current_tb
= tb_find_pc(pc
);
1168 while (tb
!= NULL
) {
1170 tb
= (TranslationBlock
*)((long)tb
& ~3);
1171 #ifdef TARGET_HAS_PRECISE_SMC
1172 if (current_tb
== tb
&&
1173 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1174 /* If we are modifying the current TB, we must stop
1175 its execution. We could be more precise by checking
1176 that the modification is after the current PC, but it
1177 would require a specialized function to partially
1178 restore the CPU state */
1180 current_tb_modified
= 1;
1181 cpu_restore_state(current_tb
, env
, pc
);
1182 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1185 #endif /* TARGET_HAS_PRECISE_SMC */
1186 tb_phys_invalidate(tb
, addr
);
1187 tb
= tb
->page_next
[n
];
1190 #ifdef TARGET_HAS_PRECISE_SMC
1191 if (current_tb_modified
) {
1192 /* we generate a block containing just the instruction
1193 modifying the memory. It will ensure that it cannot modify
1195 env
->current_tb
= NULL
;
1196 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1197 cpu_resume_from_signal(env
, puc
);
1203 /* add the tb in the target page and protect it if necessary */
1204 static inline void tb_alloc_page(TranslationBlock
*tb
,
1205 unsigned int n
, tb_page_addr_t page_addr
)
1208 TranslationBlock
*last_first_tb
;
1210 tb
->page_addr
[n
] = page_addr
;
1211 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1212 tb
->page_next
[n
] = p
->first_tb
;
1213 last_first_tb
= p
->first_tb
;
1214 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1215 invalidate_page_bitmap(p
);
1217 #if defined(TARGET_HAS_SMC) || 1
1219 #if defined(CONFIG_USER_ONLY)
1220 if (p
->flags
& PAGE_WRITE
) {
1225 /* force the host page as non writable (writes will have a
1226 page fault + mprotect overhead) */
1227 page_addr
&= qemu_host_page_mask
;
1229 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1230 addr
+= TARGET_PAGE_SIZE
) {
1232 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1236 p2
->flags
&= ~PAGE_WRITE
;
1238 mprotect(g2h(page_addr
), qemu_host_page_size
,
1239 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1240 #ifdef DEBUG_TB_INVALIDATE
1241 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1246 /* if some code is already present, then the pages are already
1247 protected. So we handle the case where only the first TB is
1248 allocated in a physical page */
1249 if (!last_first_tb
) {
1250 tlb_protect_code(page_addr
);
1254 #endif /* TARGET_HAS_SMC */
1257 /* add a new TB and link it to the physical page tables. phys_page2 is
1258 (-1) to indicate that only one page contains the TB. */
1259 void tb_link_page(TranslationBlock
*tb
,
1260 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1263 TranslationBlock
**ptb
;
1265 /* Grab the mmap lock to stop another thread invalidating this TB
1266 before we are done. */
1268 /* add in the physical hash table */
1269 h
= tb_phys_hash_func(phys_pc
);
1270 ptb
= &tb_phys_hash
[h
];
1271 tb
->phys_hash_next
= *ptb
;
1274 /* add in the page list */
1275 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1276 if (phys_page2
!= -1)
1277 tb_alloc_page(tb
, 1, phys_page2
);
1279 tb
->page_addr
[1] = -1;
1281 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1282 tb
->jmp_next
[0] = NULL
;
1283 tb
->jmp_next
[1] = NULL
;
1285 /* init original jump addresses */
1286 if (tb
->tb_next_offset
[0] != 0xffff)
1287 tb_reset_jump(tb
, 0);
1288 if (tb
->tb_next_offset
[1] != 0xffff)
1289 tb_reset_jump(tb
, 1);
1291 #ifdef DEBUG_TB_CHECK
1297 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1298 tb[1].tc_ptr. Return NULL if not found */
1299 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1301 int m_min
, m_max
, m
;
1303 TranslationBlock
*tb
;
1307 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1308 tc_ptr
>= (unsigned long)code_gen_ptr
)
1310 /* binary search (cf Knuth) */
1313 while (m_min
<= m_max
) {
1314 m
= (m_min
+ m_max
) >> 1;
1316 v
= (unsigned long)tb
->tc_ptr
;
1319 else if (tc_ptr
< v
) {
1328 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1330 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1332 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1335 tb1
= tb
->jmp_next
[n
];
1337 /* find head of list */
1340 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1343 tb1
= tb1
->jmp_next
[n1
];
1345 /* we are now sure now that tb jumps to tb1 */
1348 /* remove tb from the jmp_first list */
1349 ptb
= &tb_next
->jmp_first
;
1353 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1354 if (n1
== n
&& tb1
== tb
)
1356 ptb
= &tb1
->jmp_next
[n1
];
1358 *ptb
= tb
->jmp_next
[n
];
1359 tb
->jmp_next
[n
] = NULL
;
1361 /* suppress the jump to next tb in generated code */
1362 tb_reset_jump(tb
, n
);
1364 /* suppress jumps in the tb on which we could have jumped */
1365 tb_reset_jump_recursive(tb_next
);
1369 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1371 tb_reset_jump_recursive2(tb
, 0);
1372 tb_reset_jump_recursive2(tb
, 1);
1375 #if defined(TARGET_HAS_ICE)
1376 #if defined(CONFIG_USER_ONLY)
1377 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1379 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1382 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1384 target_phys_addr_t addr
;
1386 ram_addr_t ram_addr
;
1389 addr
= cpu_get_phys_page_debug(env
, pc
);
1390 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1392 pd
= IO_MEM_UNASSIGNED
;
1394 pd
= p
->phys_offset
;
1396 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1397 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1400 #endif /* TARGET_HAS_ICE */
1402 #if defined(CONFIG_USER_ONLY)
1403 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1408 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1409 int flags
, CPUWatchpoint
**watchpoint
)
1414 /* Add a watchpoint. */
1415 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1416 int flags
, CPUWatchpoint
**watchpoint
)
1418 target_ulong len_mask
= ~(len
- 1);
1421 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1422 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1423 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1424 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1427 wp
= qemu_malloc(sizeof(*wp
));
1430 wp
->len_mask
= len_mask
;
1433 /* keep all GDB-injected watchpoints in front */
1435 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1437 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1439 tlb_flush_page(env
, addr
);
1446 /* Remove a specific watchpoint. */
1447 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1450 target_ulong len_mask
= ~(len
- 1);
1453 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1454 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1455 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1456 cpu_watchpoint_remove_by_ref(env
, wp
);
1463 /* Remove a specific watchpoint by reference. */
1464 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1466 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1468 tlb_flush_page(env
, watchpoint
->vaddr
);
1470 qemu_free(watchpoint
);
1473 /* Remove all matching watchpoints. */
1474 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1476 CPUWatchpoint
*wp
, *next
;
1478 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1479 if (wp
->flags
& mask
)
1480 cpu_watchpoint_remove_by_ref(env
, wp
);
1485 /* Add a breakpoint. */
1486 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1487 CPUBreakpoint
**breakpoint
)
1489 #if defined(TARGET_HAS_ICE)
1492 bp
= qemu_malloc(sizeof(*bp
));
1497 /* keep all GDB-injected breakpoints in front */
1499 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1501 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1503 breakpoint_invalidate(env
, pc
);
1513 /* Remove a specific breakpoint. */
1514 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1516 #if defined(TARGET_HAS_ICE)
1519 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1520 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1521 cpu_breakpoint_remove_by_ref(env
, bp
);
1531 /* Remove a specific breakpoint by reference. */
1532 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1534 #if defined(TARGET_HAS_ICE)
1535 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1537 breakpoint_invalidate(env
, breakpoint
->pc
);
1539 qemu_free(breakpoint
);
1543 /* Remove all matching breakpoints. */
1544 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1546 #if defined(TARGET_HAS_ICE)
1547 CPUBreakpoint
*bp
, *next
;
1549 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1550 if (bp
->flags
& mask
)
1551 cpu_breakpoint_remove_by_ref(env
, bp
);
1556 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1557 CPU loop after each instruction */
1558 void cpu_single_step(CPUState
*env
, int enabled
)
1560 #if defined(TARGET_HAS_ICE)
1561 if (env
->singlestep_enabled
!= enabled
) {
1562 env
->singlestep_enabled
= enabled
;
1564 kvm_update_guest_debug(env
, 0);
1566 /* must flush all the translated code to avoid inconsistencies */
1567 /* XXX: only flush what is necessary */
1574 /* enable or disable low levels log */
1575 void cpu_set_log(int log_flags
)
1577 loglevel
= log_flags
;
1578 if (loglevel
&& !logfile
) {
1579 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1581 perror(logfilename
);
1584 #if !defined(CONFIG_SOFTMMU)
1585 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1587 static char logfile_buf
[4096];
1588 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1590 #elif !defined(_WIN32)
1591 /* Win32 doesn't support line-buffering and requires size >= 2 */
1592 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1596 if (!loglevel
&& logfile
) {
1602 void cpu_set_log_filename(const char *filename
)
1604 logfilename
= strdup(filename
);
1609 cpu_set_log(loglevel
);
1612 static void cpu_unlink_tb(CPUState
*env
)
1614 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1615 problem and hope the cpu will stop of its own accord. For userspace
1616 emulation this often isn't actually as bad as it sounds. Often
1617 signals are used primarily to interrupt blocking syscalls. */
1618 TranslationBlock
*tb
;
1619 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1621 spin_lock(&interrupt_lock
);
1622 tb
= env
->current_tb
;
1623 /* if the cpu is currently executing code, we must unlink it and
1624 all the potentially executing TB */
1626 env
->current_tb
= NULL
;
1627 tb_reset_jump_recursive(tb
);
1629 spin_unlock(&interrupt_lock
);
1632 #ifndef CONFIG_USER_ONLY
1633 /* mask must never be zero, except for A20 change call */
1634 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1638 old_mask
= env
->interrupt_request
;
1639 env
->interrupt_request
|= mask
;
1642 * If called from iothread context, wake the target cpu in
1645 if (!qemu_cpu_is_self(env
)) {
1651 env
->icount_decr
.u16
.high
= 0xffff;
1653 && (mask
& ~old_mask
) != 0) {
1654 cpu_abort(env
, "Raised interrupt while not in I/O function");
1661 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1663 #else /* CONFIG_USER_ONLY */
1665 void cpu_interrupt(CPUState
*env
, int mask
)
1667 env
->interrupt_request
|= mask
;
1670 #endif /* CONFIG_USER_ONLY */
1672 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1674 env
->interrupt_request
&= ~mask
;
1677 void cpu_exit(CPUState
*env
)
1679 env
->exit_request
= 1;
1683 const CPULogItem cpu_log_items
[] = {
1684 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1685 "show generated host assembly code for each compiled TB" },
1686 { CPU_LOG_TB_IN_ASM
, "in_asm",
1687 "show target assembly code for each compiled TB" },
1688 { CPU_LOG_TB_OP
, "op",
1689 "show micro ops for each compiled TB" },
1690 { CPU_LOG_TB_OP_OPT
, "op_opt",
1693 "before eflags optimization and "
1695 "after liveness analysis" },
1696 { CPU_LOG_INT
, "int",
1697 "show interrupts/exceptions in short format" },
1698 { CPU_LOG_EXEC
, "exec",
1699 "show trace before each executed TB (lots of logs)" },
1700 { CPU_LOG_TB_CPU
, "cpu",
1701 "show CPU state before block translation" },
1703 { CPU_LOG_PCALL
, "pcall",
1704 "show protected mode far calls/returns/exceptions" },
1705 { CPU_LOG_RESET
, "cpu_reset",
1706 "show CPU state before CPU resets" },
1709 { CPU_LOG_IOPORT
, "ioport",
1710 "show all i/o ports accesses" },
1715 #ifndef CONFIG_USER_ONLY
1716 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1717 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1719 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1721 ram_addr_t phys_offset
)
1723 CPUPhysMemoryClient
*client
;
1724 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1725 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1729 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1730 target_phys_addr_t end
)
1732 CPUPhysMemoryClient
*client
;
1733 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1734 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1741 static int cpu_notify_migration_log(int enable
)
1743 CPUPhysMemoryClient
*client
;
1744 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1745 int r
= client
->migration_log(client
, enable
);
1752 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1753 int level
, void **lp
)
1761 PhysPageDesc
*pd
= *lp
;
1762 for (i
= 0; i
< L2_SIZE
; ++i
) {
1763 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1764 client
->set_memory(client
, pd
[i
].region_offset
,
1765 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
);
1770 for (i
= 0; i
< L2_SIZE
; ++i
) {
1771 phys_page_for_each_1(client
, level
- 1, pp
+ i
);
1776 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1779 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1780 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1785 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1787 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1788 phys_page_for_each(client
);
1791 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1793 QLIST_REMOVE(client
, list
);
1797 static int cmp1(const char *s1
, int n
, const char *s2
)
1799 if (strlen(s2
) != n
)
1801 return memcmp(s1
, s2
, n
) == 0;
1804 /* takes a comma separated list of log masks. Return 0 if error. */
1805 int cpu_str_to_log_mask(const char *str
)
1807 const CPULogItem
*item
;
1814 p1
= strchr(p
, ',');
1817 if(cmp1(p
,p1
-p
,"all")) {
1818 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1822 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1823 if (cmp1(p
, p1
- p
, item
->name
))
1837 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1844 fprintf(stderr
, "qemu: fatal: ");
1845 vfprintf(stderr
, fmt
, ap
);
1846 fprintf(stderr
, "\n");
1848 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1850 cpu_dump_state(env
, stderr
, fprintf
, 0);
1852 if (qemu_log_enabled()) {
1853 qemu_log("qemu: fatal: ");
1854 qemu_log_vprintf(fmt
, ap2
);
1857 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1859 log_cpu_state(env
, 0);
1866 #if defined(CONFIG_USER_ONLY)
1868 struct sigaction act
;
1869 sigfillset(&act
.sa_mask
);
1870 act
.sa_handler
= SIG_DFL
;
1871 sigaction(SIGABRT
, &act
, NULL
);
1877 CPUState
*cpu_copy(CPUState
*env
)
1879 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1880 CPUState
*next_cpu
= new_env
->next_cpu
;
1881 int cpu_index
= new_env
->cpu_index
;
1882 #if defined(TARGET_HAS_ICE)
1887 memcpy(new_env
, env
, sizeof(CPUState
));
1889 /* Preserve chaining and index. */
1890 new_env
->next_cpu
= next_cpu
;
1891 new_env
->cpu_index
= cpu_index
;
1893 /* Clone all break/watchpoints.
1894 Note: Once we support ptrace with hw-debug register access, make sure
1895 BP_CPU break/watchpoints are handled correctly on clone. */
1896 QTAILQ_INIT(&env
->breakpoints
);
1897 QTAILQ_INIT(&env
->watchpoints
);
1898 #if defined(TARGET_HAS_ICE)
1899 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1900 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1902 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1903 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1911 #if !defined(CONFIG_USER_ONLY)
1913 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1917 /* Discard jump cache entries for any tb which might potentially
1918 overlap the flushed page. */
1919 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1920 memset (&env
->tb_jmp_cache
[i
], 0,
1921 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1923 i
= tb_jmp_cache_hash_page(addr
);
1924 memset (&env
->tb_jmp_cache
[i
], 0,
1925 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1928 static CPUTLBEntry s_cputlb_empty_entry
= {
1935 /* NOTE: if flush_global is true, also flush global entries (not
1937 void tlb_flush(CPUState
*env
, int flush_global
)
1941 #if defined(DEBUG_TLB)
1942 printf("tlb_flush:\n");
1944 /* must reset current TB so that interrupts cannot modify the
1945 links while we are modifying them */
1946 env
->current_tb
= NULL
;
1948 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1950 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1951 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1955 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1957 env
->tlb_flush_addr
= -1;
1958 env
->tlb_flush_mask
= 0;
1962 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1964 if (addr
== (tlb_entry
->addr_read
&
1965 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1966 addr
== (tlb_entry
->addr_write
&
1967 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1968 addr
== (tlb_entry
->addr_code
&
1969 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1970 *tlb_entry
= s_cputlb_empty_entry
;
1974 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1979 #if defined(DEBUG_TLB)
1980 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1982 /* Check if we need to flush due to large pages. */
1983 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1984 #if defined(DEBUG_TLB)
1985 printf("tlb_flush_page: forced full flush ("
1986 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1987 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1992 /* must reset current TB so that interrupts cannot modify the
1993 links while we are modifying them */
1994 env
->current_tb
= NULL
;
1996 addr
&= TARGET_PAGE_MASK
;
1997 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1998 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1999 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2001 tlb_flush_jmp_cache(env
, addr
);
2004 /* update the TLBs so that writes to code in the virtual page 'addr'
2006 static void tlb_protect_code(ram_addr_t ram_addr
)
2008 cpu_physical_memory_reset_dirty(ram_addr
,
2009 ram_addr
+ TARGET_PAGE_SIZE
,
2013 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2014 tested for self modifying code */
2015 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2018 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2021 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2022 unsigned long start
, unsigned long length
)
2025 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2026 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2027 if ((addr
- start
) < length
) {
2028 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2033 /* Note: start and end must be within the same ram block. */
2034 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2038 unsigned long length
, start1
;
2041 start
&= TARGET_PAGE_MASK
;
2042 end
= TARGET_PAGE_ALIGN(end
);
2044 length
= end
- start
;
2047 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2049 /* we modify the TLB cache so that the dirty bit will be set again
2050 when accessing the range */
2051 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2052 /* Chek that we don't span multiple blocks - this breaks the
2053 address comparisons below. */
2054 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2055 != (end
- 1) - start
) {
2059 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2061 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2062 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2063 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2069 int cpu_physical_memory_set_dirty_tracking(int enable
)
2072 in_migration
= enable
;
2073 ret
= cpu_notify_migration_log(!!enable
);
2077 int cpu_physical_memory_get_dirty_tracking(void)
2079 return in_migration
;
2082 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2083 target_phys_addr_t end_addr
)
2087 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2091 int cpu_physical_log_start(target_phys_addr_t start_addr
,
2094 CPUPhysMemoryClient
*client
;
2095 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2096 if (client
->log_start
) {
2097 int r
= client
->log_start(client
, start_addr
, size
);
2106 int cpu_physical_log_stop(target_phys_addr_t start_addr
,
2109 CPUPhysMemoryClient
*client
;
2110 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2111 if (client
->log_stop
) {
2112 int r
= client
->log_stop(client
, start_addr
, size
);
2121 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2123 ram_addr_t ram_addr
;
2126 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2127 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2128 + tlb_entry
->addend
);
2129 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2130 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2131 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2136 /* update the TLB according to the current state of the dirty bits */
2137 void cpu_tlb_update_dirty(CPUState
*env
)
2141 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2142 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2143 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2147 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2149 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2150 tlb_entry
->addr_write
= vaddr
;
2153 /* update the TLB corresponding to virtual page vaddr
2154 so that it is no longer dirty */
2155 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2160 vaddr
&= TARGET_PAGE_MASK
;
2161 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2162 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2163 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2166 /* Our TLB does not support large pages, so remember the area covered by
2167 large pages and trigger a full TLB flush if these are invalidated. */
2168 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2171 target_ulong mask
= ~(size
- 1);
2173 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2174 env
->tlb_flush_addr
= vaddr
& mask
;
2175 env
->tlb_flush_mask
= mask
;
2178 /* Extend the existing region to include the new page.
2179 This is a compromise between unnecessary flushes and the cost
2180 of maintaining a full variable size TLB. */
2181 mask
&= env
->tlb_flush_mask
;
2182 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2185 env
->tlb_flush_addr
&= mask
;
2186 env
->tlb_flush_mask
= mask
;
2189 /* Add a new TLB entry. At most one entry for a given virtual address
2190 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2191 supplied size is only used by tlb_flush_page. */
2192 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2193 target_phys_addr_t paddr
, int prot
,
2194 int mmu_idx
, target_ulong size
)
2199 target_ulong address
;
2200 target_ulong code_address
;
2201 unsigned long addend
;
2204 target_phys_addr_t iotlb
;
2206 assert(size
>= TARGET_PAGE_SIZE
);
2207 if (size
!= TARGET_PAGE_SIZE
) {
2208 tlb_add_large_page(env
, vaddr
, size
);
2210 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2212 pd
= IO_MEM_UNASSIGNED
;
2214 pd
= p
->phys_offset
;
2216 #if defined(DEBUG_TLB)
2217 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2218 " prot=%x idx=%d pd=0x%08lx\n",
2219 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2223 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2224 /* IO memory case (romd handled later) */
2225 address
|= TLB_MMIO
;
2227 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2228 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2230 iotlb
= pd
& TARGET_PAGE_MASK
;
2231 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2232 iotlb
|= IO_MEM_NOTDIRTY
;
2234 iotlb
|= IO_MEM_ROM
;
2236 /* IO handlers are currently passed a physical address.
2237 It would be nice to pass an offset from the base address
2238 of that region. This would avoid having to special case RAM,
2239 and avoid full address decoding in every device.
2240 We can't use the high bits of pd for this because
2241 IO_MEM_ROMD uses these as a ram address. */
2242 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2244 iotlb
+= p
->region_offset
;
2250 code_address
= address
;
2251 /* Make accesses to pages with watchpoints go via the
2252 watchpoint trap routines. */
2253 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2254 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2255 /* Avoid trapping reads of pages with a write breakpoint. */
2256 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2257 iotlb
= io_mem_watch
+ paddr
;
2258 address
|= TLB_MMIO
;
2264 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2265 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2266 te
= &env
->tlb_table
[mmu_idx
][index
];
2267 te
->addend
= addend
- vaddr
;
2268 if (prot
& PAGE_READ
) {
2269 te
->addr_read
= address
;
2274 if (prot
& PAGE_EXEC
) {
2275 te
->addr_code
= code_address
;
2279 if (prot
& PAGE_WRITE
) {
2280 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2281 (pd
& IO_MEM_ROMD
)) {
2282 /* Write access calls the I/O callback. */
2283 te
->addr_write
= address
| TLB_MMIO
;
2284 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2285 !cpu_physical_memory_is_dirty(pd
)) {
2286 te
->addr_write
= address
| TLB_NOTDIRTY
;
2288 te
->addr_write
= address
;
2291 te
->addr_write
= -1;
2297 void tlb_flush(CPUState
*env
, int flush_global
)
2301 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2306 * Walks guest process memory "regions" one by one
2307 * and calls callback function 'fn' for each region.
2310 struct walk_memory_regions_data
2312 walk_memory_regions_fn fn
;
2314 unsigned long start
;
2318 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2319 abi_ulong end
, int new_prot
)
2321 if (data
->start
!= -1ul) {
2322 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2328 data
->start
= (new_prot
? end
: -1ul);
2329 data
->prot
= new_prot
;
2334 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2335 abi_ulong base
, int level
, void **lp
)
2341 return walk_memory_regions_end(data
, base
, 0);
2346 for (i
= 0; i
< L2_SIZE
; ++i
) {
2347 int prot
= pd
[i
].flags
;
2349 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2350 if (prot
!= data
->prot
) {
2351 rc
= walk_memory_regions_end(data
, pa
, prot
);
2359 for (i
= 0; i
< L2_SIZE
; ++i
) {
2360 pa
= base
| ((abi_ulong
)i
<<
2361 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2362 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2372 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2374 struct walk_memory_regions_data data
;
2382 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2383 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2384 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2390 return walk_memory_regions_end(&data
, 0, 0);
2393 static int dump_region(void *priv
, abi_ulong start
,
2394 abi_ulong end
, unsigned long prot
)
2396 FILE *f
= (FILE *)priv
;
2398 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2399 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2400 start
, end
, end
- start
,
2401 ((prot
& PAGE_READ
) ? 'r' : '-'),
2402 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2403 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2408 /* dump memory mappings */
2409 void page_dump(FILE *f
)
2411 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2412 "start", "end", "size", "prot");
2413 walk_memory_regions(f
, dump_region
);
2416 int page_get_flags(target_ulong address
)
2420 p
= page_find(address
>> TARGET_PAGE_BITS
);
2426 /* Modify the flags of a page and invalidate the code if necessary.
2427 The flag PAGE_WRITE_ORG is positioned automatically depending
2428 on PAGE_WRITE. The mmap_lock should already be held. */
2429 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2431 target_ulong addr
, len
;
2433 /* This function should never be called with addresses outside the
2434 guest address space. If this assert fires, it probably indicates
2435 a missing call to h2g_valid. */
2436 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2437 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2439 assert(start
< end
);
2441 start
= start
& TARGET_PAGE_MASK
;
2442 end
= TARGET_PAGE_ALIGN(end
);
2444 if (flags
& PAGE_WRITE
) {
2445 flags
|= PAGE_WRITE_ORG
;
2448 for (addr
= start
, len
= end
- start
;
2450 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2451 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2453 /* If the write protection bit is set, then we invalidate
2455 if (!(p
->flags
& PAGE_WRITE
) &&
2456 (flags
& PAGE_WRITE
) &&
2458 tb_invalidate_phys_page(addr
, 0, NULL
);
2464 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2470 /* This function should never be called with addresses outside the
2471 guest address space. If this assert fires, it probably indicates
2472 a missing call to h2g_valid. */
2473 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2474 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2480 if (start
+ len
- 1 < start
) {
2481 /* We've wrapped around. */
2485 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2486 start
= start
& TARGET_PAGE_MASK
;
2488 for (addr
= start
, len
= end
- start
;
2490 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2491 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2494 if( !(p
->flags
& PAGE_VALID
) )
2497 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2499 if (flags
& PAGE_WRITE
) {
2500 if (!(p
->flags
& PAGE_WRITE_ORG
))
2502 /* unprotect the page if it was put read-only because it
2503 contains translated code */
2504 if (!(p
->flags
& PAGE_WRITE
)) {
2505 if (!page_unprotect(addr
, 0, NULL
))
2514 /* called from signal handler: invalidate the code and unprotect the
2515 page. Return TRUE if the fault was successfully handled. */
2516 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2520 target_ulong host_start
, host_end
, addr
;
2522 /* Technically this isn't safe inside a signal handler. However we
2523 know this only ever happens in a synchronous SEGV handler, so in
2524 practice it seems to be ok. */
2527 p
= page_find(address
>> TARGET_PAGE_BITS
);
2533 /* if the page was really writable, then we change its
2534 protection back to writable */
2535 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2536 host_start
= address
& qemu_host_page_mask
;
2537 host_end
= host_start
+ qemu_host_page_size
;
2540 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2541 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2542 p
->flags
|= PAGE_WRITE
;
2545 /* and since the content will be modified, we must invalidate
2546 the corresponding translated code. */
2547 tb_invalidate_phys_page(addr
, pc
, puc
);
2548 #ifdef DEBUG_TB_CHECK
2549 tb_invalidate_check(addr
);
2552 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2562 static inline void tlb_set_dirty(CPUState
*env
,
2563 unsigned long addr
, target_ulong vaddr
)
2566 #endif /* defined(CONFIG_USER_ONLY) */
2568 #if !defined(CONFIG_USER_ONLY)
2570 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2571 typedef struct subpage_t
{
2572 target_phys_addr_t base
;
2573 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2574 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2577 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2578 ram_addr_t memory
, ram_addr_t region_offset
);
2579 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2580 ram_addr_t orig_memory
,
2581 ram_addr_t region_offset
);
2582 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2585 if (addr > start_addr) \
2588 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2589 if (start_addr2 > 0) \
2593 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2594 end_addr2 = TARGET_PAGE_SIZE - 1; \
2596 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2597 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2602 /* register physical memory.
2603 For RAM, 'size' must be a multiple of the target page size.
2604 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2605 io memory page. The address used when calling the IO function is
2606 the offset from the start of the region, plus region_offset. Both
2607 start_addr and region_offset are rounded down to a page boundary
2608 before calculating this offset. This should not be a problem unless
2609 the low bits of start_addr and region_offset differ. */
2610 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2612 ram_addr_t phys_offset
,
2613 ram_addr_t region_offset
)
2615 target_phys_addr_t addr
, end_addr
;
2618 ram_addr_t orig_size
= size
;
2622 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2624 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2625 region_offset
= start_addr
;
2627 region_offset
&= TARGET_PAGE_MASK
;
2628 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2629 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2633 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2634 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2635 ram_addr_t orig_memory
= p
->phys_offset
;
2636 target_phys_addr_t start_addr2
, end_addr2
;
2637 int need_subpage
= 0;
2639 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2642 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2643 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2644 &p
->phys_offset
, orig_memory
,
2647 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2650 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2652 p
->region_offset
= 0;
2654 p
->phys_offset
= phys_offset
;
2655 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2656 (phys_offset
& IO_MEM_ROMD
))
2657 phys_offset
+= TARGET_PAGE_SIZE
;
2660 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2661 p
->phys_offset
= phys_offset
;
2662 p
->region_offset
= region_offset
;
2663 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2664 (phys_offset
& IO_MEM_ROMD
)) {
2665 phys_offset
+= TARGET_PAGE_SIZE
;
2667 target_phys_addr_t start_addr2
, end_addr2
;
2668 int need_subpage
= 0;
2670 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2671 end_addr2
, need_subpage
);
2674 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2675 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2676 addr
& TARGET_PAGE_MASK
);
2677 subpage_register(subpage
, start_addr2
, end_addr2
,
2678 phys_offset
, region_offset
);
2679 p
->region_offset
= 0;
2683 region_offset
+= TARGET_PAGE_SIZE
;
2684 addr
+= TARGET_PAGE_SIZE
;
2685 } while (addr
!= end_addr
);
2687 /* since each CPU stores ram addresses in its TLB cache, we must
2688 reset the modified entries */
2690 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2695 /* XXX: temporary until new memory mapping API */
2696 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2700 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2702 return IO_MEM_UNASSIGNED
;
2703 return p
->phys_offset
;
2706 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2709 kvm_coalesce_mmio_region(addr
, size
);
2712 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2715 kvm_uncoalesce_mmio_region(addr
, size
);
2718 void qemu_flush_coalesced_mmio_buffer(void)
2721 kvm_flush_coalesced_mmio_buffer();
2724 #if defined(__linux__) && !defined(TARGET_S390X)
2726 #include <sys/vfs.h>
2728 #define HUGETLBFS_MAGIC 0x958458f6
2730 static long gethugepagesize(const char *path
)
2736 ret
= statfs(path
, &fs
);
2737 } while (ret
!= 0 && errno
== EINTR
);
2744 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2745 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2750 static void *file_ram_alloc(RAMBlock
*block
,
2760 unsigned long hpagesize
;
2762 hpagesize
= gethugepagesize(path
);
2767 if (memory
< hpagesize
) {
2771 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2772 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2776 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2780 fd
= mkstemp(filename
);
2782 perror("unable to create backing store for hugepages");
2789 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2792 * ftruncate is not supported by hugetlbfs in older
2793 * hosts, so don't bother bailing out on errors.
2794 * If anything goes wrong with it under other filesystems,
2797 if (ftruncate(fd
, memory
))
2798 perror("ftruncate");
2801 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2802 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2803 * to sidestep this quirk.
2805 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2806 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2808 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2810 if (area
== MAP_FAILED
) {
2811 perror("file_ram_alloc: can't mmap RAM pages");
2820 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2822 RAMBlock
*block
, *next_block
;
2823 ram_addr_t offset
= 0, mingap
= ULONG_MAX
;
2825 if (QLIST_EMPTY(&ram_list
.blocks
))
2828 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2829 ram_addr_t end
, next
= ULONG_MAX
;
2831 end
= block
->offset
+ block
->length
;
2833 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2834 if (next_block
->offset
>= end
) {
2835 next
= MIN(next
, next_block
->offset
);
2838 if (next
- end
>= size
&& next
- end
< mingap
) {
2840 mingap
= next
- end
;
2846 static ram_addr_t
last_ram_offset(void)
2849 ram_addr_t last
= 0;
2851 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2852 last
= MAX(last
, block
->offset
+ block
->length
);
2857 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
2858 ram_addr_t size
, void *host
)
2860 RAMBlock
*new_block
, *block
;
2862 size
= TARGET_PAGE_ALIGN(size
);
2863 new_block
= qemu_mallocz(sizeof(*new_block
));
2865 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2866 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2868 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2872 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2874 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2875 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2876 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2883 new_block
->host
= host
;
2884 new_block
->flags
|= RAM_PREALLOC_MASK
;
2887 #if defined (__linux__) && !defined(TARGET_S390X)
2888 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2889 if (!new_block
->host
) {
2890 new_block
->host
= qemu_vmalloc(size
);
2891 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2894 fprintf(stderr
, "-mem-path option unsupported\n");
2898 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2899 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2900 new_block
->host
= mmap((void*)0x1000000, size
,
2901 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2902 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2904 new_block
->host
= qemu_vmalloc(size
);
2906 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2910 new_block
->offset
= find_ram_offset(size
);
2911 new_block
->length
= size
;
2913 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2915 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
2916 last_ram_offset() >> TARGET_PAGE_BITS
);
2917 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2918 0xff, size
>> TARGET_PAGE_BITS
);
2921 kvm_setup_guest_memory(new_block
->host
, size
);
2923 return new_block
->offset
;
2926 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
)
2928 return qemu_ram_alloc_from_ptr(dev
, name
, size
, NULL
);
2931 void qemu_ram_free(ram_addr_t addr
)
2935 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2936 if (addr
== block
->offset
) {
2937 QLIST_REMOVE(block
, next
);
2938 if (block
->flags
& RAM_PREALLOC_MASK
) {
2940 } else if (mem_path
) {
2941 #if defined (__linux__) && !defined(TARGET_S390X)
2943 munmap(block
->host
, block
->length
);
2946 qemu_vfree(block
->host
);
2952 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2953 munmap(block
->host
, block
->length
);
2955 qemu_vfree(block
->host
);
2966 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2973 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2974 offset
= addr
- block
->offset
;
2975 if (offset
< block
->length
) {
2976 vaddr
= block
->host
+ offset
;
2977 if (block
->flags
& RAM_PREALLOC_MASK
) {
2981 munmap(vaddr
, length
);
2983 #if defined(__linux__) && !defined(TARGET_S390X)
2986 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2989 flags
|= MAP_PRIVATE
;
2991 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2992 flags
, block
->fd
, offset
);
2994 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2995 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3002 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3003 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
3004 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
3007 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3008 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3012 if (area
!= vaddr
) {
3013 fprintf(stderr
, "Could not remap addr: %lx@%lx\n",
3017 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3023 #endif /* !_WIN32 */
3025 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3026 With the exception of the softmmu code in this file, this should
3027 only be used for local memory (e.g. video ram) that the device owns,
3028 and knows it isn't going to access beyond the end of the block.
3030 It should not be used for general purpose DMA.
3031 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3033 void *qemu_get_ram_ptr(ram_addr_t addr
)
3037 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3038 if (addr
- block
->offset
< block
->length
) {
3039 /* Move this entry to to start of the list. */
3040 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3041 QLIST_REMOVE(block
, next
);
3042 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3044 return block
->host
+ (addr
- block
->offset
);
3048 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3054 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3055 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3057 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3061 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3062 if (addr
- block
->offset
< block
->length
) {
3063 return block
->host
+ (addr
- block
->offset
);
3067 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3073 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3076 uint8_t *host
= ptr
;
3078 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3079 if (host
- block
->host
< block
->length
) {
3080 *ram_addr
= block
->offset
+ (host
- block
->host
);
3087 /* Some of the softmmu routines need to translate from a host pointer
3088 (typically a TLB entry) back to a ram offset. */
3089 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3091 ram_addr_t ram_addr
;
3093 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3094 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3100 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3102 #ifdef DEBUG_UNASSIGNED
3103 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3105 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3106 do_unassigned_access(addr
, 0, 0, 0, 1);
3111 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3113 #ifdef DEBUG_UNASSIGNED
3114 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3116 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3117 do_unassigned_access(addr
, 0, 0, 0, 2);
3122 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3124 #ifdef DEBUG_UNASSIGNED
3125 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3127 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3128 do_unassigned_access(addr
, 0, 0, 0, 4);
3133 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3135 #ifdef DEBUG_UNASSIGNED
3136 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3138 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3139 do_unassigned_access(addr
, 1, 0, 0, 1);
3143 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3145 #ifdef DEBUG_UNASSIGNED
3146 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3148 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3149 do_unassigned_access(addr
, 1, 0, 0, 2);
3153 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3155 #ifdef DEBUG_UNASSIGNED
3156 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3158 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3159 do_unassigned_access(addr
, 1, 0, 0, 4);
3163 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3164 unassigned_mem_readb
,
3165 unassigned_mem_readw
,
3166 unassigned_mem_readl
,
3169 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3170 unassigned_mem_writeb
,
3171 unassigned_mem_writew
,
3172 unassigned_mem_writel
,
3175 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3179 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3180 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3181 #if !defined(CONFIG_USER_ONLY)
3182 tb_invalidate_phys_page_fast(ram_addr
, 1);
3183 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3186 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3187 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3188 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3189 /* we remove the notdirty callback only if the code has been
3191 if (dirty_flags
== 0xff)
3192 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3195 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3199 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3200 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3201 #if !defined(CONFIG_USER_ONLY)
3202 tb_invalidate_phys_page_fast(ram_addr
, 2);
3203 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3206 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3207 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3208 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3209 /* we remove the notdirty callback only if the code has been
3211 if (dirty_flags
== 0xff)
3212 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3215 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3219 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3220 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3221 #if !defined(CONFIG_USER_ONLY)
3222 tb_invalidate_phys_page_fast(ram_addr
, 4);
3223 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3226 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3227 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3228 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3229 /* we remove the notdirty callback only if the code has been
3231 if (dirty_flags
== 0xff)
3232 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3235 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3236 NULL
, /* never used */
3237 NULL
, /* never used */
3238 NULL
, /* never used */
3241 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3242 notdirty_mem_writeb
,
3243 notdirty_mem_writew
,
3244 notdirty_mem_writel
,
3247 /* Generate a debug exception if a watchpoint has been hit. */
3248 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3250 CPUState
*env
= cpu_single_env
;
3251 target_ulong pc
, cs_base
;
3252 TranslationBlock
*tb
;
3257 if (env
->watchpoint_hit
) {
3258 /* We re-entered the check after replacing the TB. Now raise
3259 * the debug interrupt so that is will trigger after the
3260 * current instruction. */
3261 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3264 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3265 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3266 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3267 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3268 wp
->flags
|= BP_WATCHPOINT_HIT
;
3269 if (!env
->watchpoint_hit
) {
3270 env
->watchpoint_hit
= wp
;
3271 tb
= tb_find_pc(env
->mem_io_pc
);
3273 cpu_abort(env
, "check_watchpoint: could not find TB for "
3274 "pc=%p", (void *)env
->mem_io_pc
);
3276 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3277 tb_phys_invalidate(tb
, -1);
3278 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3279 env
->exception_index
= EXCP_DEBUG
;
3281 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3282 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3284 cpu_resume_from_signal(env
, NULL
);
3287 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3292 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3293 so these check for a hit then pass through to the normal out-of-line
3295 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3297 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3298 return ldub_phys(addr
);
3301 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3303 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3304 return lduw_phys(addr
);
3307 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3309 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3310 return ldl_phys(addr
);
3313 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3316 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3317 stb_phys(addr
, val
);
3320 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3323 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3324 stw_phys(addr
, val
);
3327 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3330 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3331 stl_phys(addr
, val
);
3334 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3340 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3346 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3347 target_phys_addr_t addr
,
3350 unsigned int idx
= SUBPAGE_IDX(addr
);
3351 #if defined(DEBUG_SUBPAGE)
3352 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3353 mmio
, len
, addr
, idx
);
3356 addr
+= mmio
->region_offset
[idx
];
3357 idx
= mmio
->sub_io_index
[idx
];
3358 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3361 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3362 uint32_t value
, unsigned int len
)
3364 unsigned int idx
= SUBPAGE_IDX(addr
);
3365 #if defined(DEBUG_SUBPAGE)
3366 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3367 __func__
, mmio
, len
, addr
, idx
, value
);
3370 addr
+= mmio
->region_offset
[idx
];
3371 idx
= mmio
->sub_io_index
[idx
];
3372 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3375 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3377 return subpage_readlen(opaque
, addr
, 0);
3380 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3383 subpage_writelen(opaque
, addr
, value
, 0);
3386 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3388 return subpage_readlen(opaque
, addr
, 1);
3391 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3394 subpage_writelen(opaque
, addr
, value
, 1);
3397 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3399 return subpage_readlen(opaque
, addr
, 2);
3402 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3405 subpage_writelen(opaque
, addr
, value
, 2);
3408 static CPUReadMemoryFunc
* const subpage_read
[] = {
3414 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3420 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3421 ram_addr_t memory
, ram_addr_t region_offset
)
3425 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3427 idx
= SUBPAGE_IDX(start
);
3428 eidx
= SUBPAGE_IDX(end
);
3429 #if defined(DEBUG_SUBPAGE)
3430 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3431 mmio
, start
, end
, idx
, eidx
, memory
);
3433 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
3434 memory
= IO_MEM_UNASSIGNED
;
3435 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3436 for (; idx
<= eidx
; idx
++) {
3437 mmio
->sub_io_index
[idx
] = memory
;
3438 mmio
->region_offset
[idx
] = region_offset
;
3444 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3445 ram_addr_t orig_memory
,
3446 ram_addr_t region_offset
)
3451 mmio
= qemu_mallocz(sizeof(subpage_t
));
3454 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
,
3455 DEVICE_NATIVE_ENDIAN
);
3456 #if defined(DEBUG_SUBPAGE)
3457 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3458 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3460 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3461 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3466 static int get_free_io_mem_idx(void)
3470 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3471 if (!io_mem_used
[i
]) {
3475 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3480 * Usually, devices operate in little endian mode. There are devices out
3481 * there that operate in big endian too. Each device gets byte swapped
3482 * mmio if plugged onto a CPU that does the other endianness.
3492 typedef struct SwapEndianContainer
{
3493 CPUReadMemoryFunc
*read
[3];
3494 CPUWriteMemoryFunc
*write
[3];
3496 } SwapEndianContainer
;
3498 static uint32_t swapendian_mem_readb (void *opaque
, target_phys_addr_t addr
)
3501 SwapEndianContainer
*c
= opaque
;
3502 val
= c
->read
[0](c
->opaque
, addr
);
3506 static uint32_t swapendian_mem_readw(void *opaque
, target_phys_addr_t addr
)
3509 SwapEndianContainer
*c
= opaque
;
3510 val
= bswap16(c
->read
[1](c
->opaque
, addr
));
3514 static uint32_t swapendian_mem_readl(void *opaque
, target_phys_addr_t addr
)
3517 SwapEndianContainer
*c
= opaque
;
3518 val
= bswap32(c
->read
[2](c
->opaque
, addr
));
3522 static CPUReadMemoryFunc
* const swapendian_readfn
[3]={
3523 swapendian_mem_readb
,
3524 swapendian_mem_readw
,
3525 swapendian_mem_readl
3528 static void swapendian_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3531 SwapEndianContainer
*c
= opaque
;
3532 c
->write
[0](c
->opaque
, addr
, val
);
3535 static void swapendian_mem_writew(void *opaque
, target_phys_addr_t addr
,
3538 SwapEndianContainer
*c
= opaque
;
3539 c
->write
[1](c
->opaque
, addr
, bswap16(val
));
3542 static void swapendian_mem_writel(void *opaque
, target_phys_addr_t addr
,
3545 SwapEndianContainer
*c
= opaque
;
3546 c
->write
[2](c
->opaque
, addr
, bswap32(val
));
3549 static CPUWriteMemoryFunc
* const swapendian_writefn
[3]={
3550 swapendian_mem_writeb
,
3551 swapendian_mem_writew
,
3552 swapendian_mem_writel
3555 static void swapendian_init(int io_index
)
3557 SwapEndianContainer
*c
= qemu_malloc(sizeof(SwapEndianContainer
));
3560 /* Swap mmio for big endian targets */
3561 c
->opaque
= io_mem_opaque
[io_index
];
3562 for (i
= 0; i
< 3; i
++) {
3563 c
->read
[i
] = io_mem_read
[io_index
][i
];
3564 c
->write
[i
] = io_mem_write
[io_index
][i
];
3566 io_mem_read
[io_index
][i
] = swapendian_readfn
[i
];
3567 io_mem_write
[io_index
][i
] = swapendian_writefn
[i
];
3569 io_mem_opaque
[io_index
] = c
;
3572 static void swapendian_del(int io_index
)
3574 if (io_mem_read
[io_index
][0] == swapendian_readfn
[0]) {
3575 qemu_free(io_mem_opaque
[io_index
]);
3579 /* mem_read and mem_write are arrays of functions containing the
3580 function to access byte (index 0), word (index 1) and dword (index
3581 2). Functions can be omitted with a NULL function pointer.
3582 If io_index is non zero, the corresponding io zone is
3583 modified. If it is zero, a new io zone is allocated. The return
3584 value can be used with cpu_register_physical_memory(). (-1) is
3585 returned if error. */
3586 static int cpu_register_io_memory_fixed(int io_index
,
3587 CPUReadMemoryFunc
* const *mem_read
,
3588 CPUWriteMemoryFunc
* const *mem_write
,
3589 void *opaque
, enum device_endian endian
)
3593 if (io_index
<= 0) {
3594 io_index
= get_free_io_mem_idx();
3598 io_index
>>= IO_MEM_SHIFT
;
3599 if (io_index
>= IO_MEM_NB_ENTRIES
)
3603 for (i
= 0; i
< 3; ++i
) {
3604 io_mem_read
[io_index
][i
]
3605 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3607 for (i
= 0; i
< 3; ++i
) {
3608 io_mem_write
[io_index
][i
]
3609 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3611 io_mem_opaque
[io_index
] = opaque
;
3614 case DEVICE_BIG_ENDIAN
:
3615 #ifndef TARGET_WORDS_BIGENDIAN
3616 swapendian_init(io_index
);
3619 case DEVICE_LITTLE_ENDIAN
:
3620 #ifdef TARGET_WORDS_BIGENDIAN
3621 swapendian_init(io_index
);
3624 case DEVICE_NATIVE_ENDIAN
:
3629 return (io_index
<< IO_MEM_SHIFT
);
3632 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3633 CPUWriteMemoryFunc
* const *mem_write
,
3634 void *opaque
, enum device_endian endian
)
3636 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
, endian
);
3639 void cpu_unregister_io_memory(int io_table_address
)
3642 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3644 swapendian_del(io_index
);
3646 for (i
=0;i
< 3; i
++) {
3647 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3648 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3650 io_mem_opaque
[io_index
] = NULL
;
3651 io_mem_used
[io_index
] = 0;
3654 static void io_mem_init(void)
3658 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3659 unassigned_mem_write
, NULL
,
3660 DEVICE_NATIVE_ENDIAN
);
3661 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3662 unassigned_mem_write
, NULL
,
3663 DEVICE_NATIVE_ENDIAN
);
3664 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3665 notdirty_mem_write
, NULL
,
3666 DEVICE_NATIVE_ENDIAN
);
3670 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3671 watch_mem_write
, NULL
,
3672 DEVICE_NATIVE_ENDIAN
);
3675 #endif /* !defined(CONFIG_USER_ONLY) */
3677 /* physical memory access (slow version, mainly for debug) */
3678 #if defined(CONFIG_USER_ONLY)
3679 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3680 uint8_t *buf
, int len
, int is_write
)
3687 page
= addr
& TARGET_PAGE_MASK
;
3688 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3691 flags
= page_get_flags(page
);
3692 if (!(flags
& PAGE_VALID
))
3695 if (!(flags
& PAGE_WRITE
))
3697 /* XXX: this code should not depend on lock_user */
3698 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3701 unlock_user(p
, addr
, l
);
3703 if (!(flags
& PAGE_READ
))
3705 /* XXX: this code should not depend on lock_user */
3706 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3709 unlock_user(p
, addr
, 0);
3719 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3720 int len
, int is_write
)
3725 target_phys_addr_t page
;
3730 page
= addr
& TARGET_PAGE_MASK
;
3731 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3734 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3736 pd
= IO_MEM_UNASSIGNED
;
3738 pd
= p
->phys_offset
;
3742 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3743 target_phys_addr_t addr1
= addr
;
3744 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3746 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3747 /* XXX: could force cpu_single_env to NULL to avoid
3749 if (l
>= 4 && ((addr1
& 3) == 0)) {
3750 /* 32 bit write access */
3752 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3754 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3755 /* 16 bit write access */
3757 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3760 /* 8 bit write access */
3762 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3766 unsigned long addr1
;
3767 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3769 ptr
= qemu_get_ram_ptr(addr1
);
3770 memcpy(ptr
, buf
, l
);
3771 if (!cpu_physical_memory_is_dirty(addr1
)) {
3772 /* invalidate code */
3773 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3775 cpu_physical_memory_set_dirty_flags(
3776 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3780 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3781 !(pd
& IO_MEM_ROMD
)) {
3782 target_phys_addr_t addr1
= addr
;
3784 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3786 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3787 if (l
>= 4 && ((addr1
& 3) == 0)) {
3788 /* 32 bit read access */
3789 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3792 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3793 /* 16 bit read access */
3794 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3798 /* 8 bit read access */
3799 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3805 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3806 (addr
& ~TARGET_PAGE_MASK
);
3807 memcpy(buf
, ptr
, l
);
3816 /* used for ROM loading : can write in RAM and ROM */
3817 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3818 const uint8_t *buf
, int len
)
3822 target_phys_addr_t page
;
3827 page
= addr
& TARGET_PAGE_MASK
;
3828 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3831 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3833 pd
= IO_MEM_UNASSIGNED
;
3835 pd
= p
->phys_offset
;
3838 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3839 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3840 !(pd
& IO_MEM_ROMD
)) {
3843 unsigned long addr1
;
3844 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3846 ptr
= qemu_get_ram_ptr(addr1
);
3847 memcpy(ptr
, buf
, l
);
3857 target_phys_addr_t addr
;
3858 target_phys_addr_t len
;
3861 static BounceBuffer bounce
;
3863 typedef struct MapClient
{
3865 void (*callback
)(void *opaque
);
3866 QLIST_ENTRY(MapClient
) link
;
3869 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3870 = QLIST_HEAD_INITIALIZER(map_client_list
);
3872 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3874 MapClient
*client
= qemu_malloc(sizeof(*client
));
3876 client
->opaque
= opaque
;
3877 client
->callback
= callback
;
3878 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3882 void cpu_unregister_map_client(void *_client
)
3884 MapClient
*client
= (MapClient
*)_client
;
3886 QLIST_REMOVE(client
, link
);
3890 static void cpu_notify_map_clients(void)
3894 while (!QLIST_EMPTY(&map_client_list
)) {
3895 client
= QLIST_FIRST(&map_client_list
);
3896 client
->callback(client
->opaque
);
3897 cpu_unregister_map_client(client
);
3901 /* Map a physical memory region into a host virtual address.
3902 * May map a subset of the requested range, given by and returned in *plen.
3903 * May return NULL if resources needed to perform the mapping are exhausted.
3904 * Use only for reads OR writes - not for read-modify-write operations.
3905 * Use cpu_register_map_client() to know when retrying the map operation is
3906 * likely to succeed.
3908 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3909 target_phys_addr_t
*plen
,
3912 target_phys_addr_t len
= *plen
;
3913 target_phys_addr_t done
= 0;
3915 uint8_t *ret
= NULL
;
3917 target_phys_addr_t page
;
3920 unsigned long addr1
;
3923 page
= addr
& TARGET_PAGE_MASK
;
3924 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3927 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3929 pd
= IO_MEM_UNASSIGNED
;
3931 pd
= p
->phys_offset
;
3934 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3935 if (done
|| bounce
.buffer
) {
3938 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3942 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3944 ptr
= bounce
.buffer
;
3946 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3947 ptr
= qemu_get_ram_ptr(addr1
);
3951 } else if (ret
+ done
!= ptr
) {
3963 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3964 * Will also mark the memory as dirty if is_write == 1. access_len gives
3965 * the amount of memory that was actually read or written by the caller.
3967 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3968 int is_write
, target_phys_addr_t access_len
)
3970 if (buffer
!= bounce
.buffer
) {
3972 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3973 while (access_len
) {
3975 l
= TARGET_PAGE_SIZE
;
3978 if (!cpu_physical_memory_is_dirty(addr1
)) {
3979 /* invalidate code */
3980 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3982 cpu_physical_memory_set_dirty_flags(
3983 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3992 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3994 qemu_vfree(bounce
.buffer
);
3995 bounce
.buffer
= NULL
;
3996 cpu_notify_map_clients();
3999 /* warning: addr must be aligned */
4000 uint32_t ldl_phys(target_phys_addr_t addr
)
4008 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4010 pd
= IO_MEM_UNASSIGNED
;
4012 pd
= p
->phys_offset
;
4015 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4016 !(pd
& IO_MEM_ROMD
)) {
4018 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4020 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4021 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4024 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4025 (addr
& ~TARGET_PAGE_MASK
);
4031 /* warning: addr must be aligned */
4032 uint64_t ldq_phys(target_phys_addr_t addr
)
4040 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4042 pd
= IO_MEM_UNASSIGNED
;
4044 pd
= p
->phys_offset
;
4047 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4048 !(pd
& IO_MEM_ROMD
)) {
4050 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4052 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4053 #ifdef TARGET_WORDS_BIGENDIAN
4054 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4055 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4057 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4058 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4062 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4063 (addr
& ~TARGET_PAGE_MASK
);
4070 uint32_t ldub_phys(target_phys_addr_t addr
)
4073 cpu_physical_memory_read(addr
, &val
, 1);
4077 /* warning: addr must be aligned */
4078 uint32_t lduw_phys(target_phys_addr_t addr
)
4086 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4088 pd
= IO_MEM_UNASSIGNED
;
4090 pd
= p
->phys_offset
;
4093 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4094 !(pd
& IO_MEM_ROMD
)) {
4096 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4098 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4099 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4102 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4103 (addr
& ~TARGET_PAGE_MASK
);
4109 /* warning: addr must be aligned. The ram page is not masked as dirty
4110 and the code inside is not invalidated. It is useful if the dirty
4111 bits are used to track modified PTEs */
4112 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4119 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4121 pd
= IO_MEM_UNASSIGNED
;
4123 pd
= p
->phys_offset
;
4126 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4127 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4129 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4130 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4132 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4133 ptr
= qemu_get_ram_ptr(addr1
);
4136 if (unlikely(in_migration
)) {
4137 if (!cpu_physical_memory_is_dirty(addr1
)) {
4138 /* invalidate code */
4139 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4141 cpu_physical_memory_set_dirty_flags(
4142 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4148 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4155 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4157 pd
= IO_MEM_UNASSIGNED
;
4159 pd
= p
->phys_offset
;
4162 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4163 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4165 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4166 #ifdef TARGET_WORDS_BIGENDIAN
4167 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4168 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4170 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4171 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4174 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4175 (addr
& ~TARGET_PAGE_MASK
);
4180 /* warning: addr must be aligned */
4181 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4188 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4190 pd
= IO_MEM_UNASSIGNED
;
4192 pd
= p
->phys_offset
;
4195 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4196 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4198 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4199 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4201 unsigned long addr1
;
4202 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4204 ptr
= qemu_get_ram_ptr(addr1
);
4206 if (!cpu_physical_memory_is_dirty(addr1
)) {
4207 /* invalidate code */
4208 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4210 cpu_physical_memory_set_dirty_flags(addr1
,
4211 (0xff & ~CODE_DIRTY_FLAG
));
4217 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4220 cpu_physical_memory_write(addr
, &v
, 1);
4223 /* warning: addr must be aligned */
4224 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4231 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4233 pd
= IO_MEM_UNASSIGNED
;
4235 pd
= p
->phys_offset
;
4238 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4239 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4241 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4242 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4244 unsigned long addr1
;
4245 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4247 ptr
= qemu_get_ram_ptr(addr1
);
4249 if (!cpu_physical_memory_is_dirty(addr1
)) {
4250 /* invalidate code */
4251 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4253 cpu_physical_memory_set_dirty_flags(addr1
,
4254 (0xff & ~CODE_DIRTY_FLAG
));
4260 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4263 cpu_physical_memory_write(addr
, &val
, 8);
4266 /* virtual memory access for debug (includes writing to ROM) */
4267 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4268 uint8_t *buf
, int len
, int is_write
)
4271 target_phys_addr_t phys_addr
;
4275 page
= addr
& TARGET_PAGE_MASK
;
4276 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4277 /* if no physical page mapped, return an error */
4278 if (phys_addr
== -1)
4280 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4283 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4285 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4287 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4296 /* in deterministic execution mode, instructions doing device I/Os
4297 must be at the end of the TB */
4298 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4300 TranslationBlock
*tb
;
4302 target_ulong pc
, cs_base
;
4305 tb
= tb_find_pc((unsigned long)retaddr
);
4307 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4310 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4311 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4312 /* Calculate how many instructions had been executed before the fault
4314 n
= n
- env
->icount_decr
.u16
.low
;
4315 /* Generate a new TB ending on the I/O insn. */
4317 /* On MIPS and SH, delay slot instructions can only be restarted if
4318 they were already the first instruction in the TB. If this is not
4319 the first instruction in a TB then re-execute the preceding
4321 #if defined(TARGET_MIPS)
4322 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4323 env
->active_tc
.PC
-= 4;
4324 env
->icount_decr
.u16
.low
++;
4325 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4327 #elif defined(TARGET_SH4)
4328 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4331 env
->icount_decr
.u16
.low
++;
4332 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4335 /* This should never happen. */
4336 if (n
> CF_COUNT_MASK
)
4337 cpu_abort(env
, "TB too big during recompile");
4339 cflags
= n
| CF_LAST_IO
;
4341 cs_base
= tb
->cs_base
;
4343 tb_phys_invalidate(tb
, -1);
4344 /* FIXME: In theory this could raise an exception. In practice
4345 we have already translated the block once so it's probably ok. */
4346 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4347 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4348 the first in the TB) then we end up generating a whole new TB and
4349 repeating the fault, which is horribly inefficient.
4350 Better would be to execute just this insn uncached, or generate a
4352 cpu_resume_from_signal(env
, NULL
);
4355 #if !defined(CONFIG_USER_ONLY)
4357 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4359 int i
, target_code_size
, max_target_code_size
;
4360 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4361 TranslationBlock
*tb
;
4363 target_code_size
= 0;
4364 max_target_code_size
= 0;
4366 direct_jmp_count
= 0;
4367 direct_jmp2_count
= 0;
4368 for(i
= 0; i
< nb_tbs
; i
++) {
4370 target_code_size
+= tb
->size
;
4371 if (tb
->size
> max_target_code_size
)
4372 max_target_code_size
= tb
->size
;
4373 if (tb
->page_addr
[1] != -1)
4375 if (tb
->tb_next_offset
[0] != 0xffff) {
4377 if (tb
->tb_next_offset
[1] != 0xffff) {
4378 direct_jmp2_count
++;
4382 /* XXX: avoid using doubles ? */
4383 cpu_fprintf(f
, "Translation buffer state:\n");
4384 cpu_fprintf(f
, "gen code size %td/%ld\n",
4385 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4386 cpu_fprintf(f
, "TB count %d/%d\n",
4387 nb_tbs
, code_gen_max_blocks
);
4388 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4389 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4390 max_target_code_size
);
4391 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4392 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4393 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4394 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4396 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4397 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4399 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4401 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4402 cpu_fprintf(f
, "\nStatistics:\n");
4403 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4404 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4405 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4406 tcg_dump_info(f
, cpu_fprintf
);
4409 #define MMUSUFFIX _cmmu
4410 #define GETPC() NULL
4411 #define env cpu_single_env
4412 #define SOFTMMU_CODE_ACCESS
4415 #include "softmmu_template.h"
4418 #include "softmmu_template.h"
4421 #include "softmmu_template.h"
4424 #include "softmmu_template.h"