2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
36 #if defined(CONFIG_USER_ONLY)
39 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40 #include <sys/param.h>
41 #if __FreeBSD_version >= 700104
42 #define HAVE_KINFO_GETVMMAP
43 #define sigqueue sigqueue_freebsd /* avoid redefinition */
46 #include <machine/profile.h>
56 //#define DEBUG_TB_INVALIDATE
59 //#define DEBUG_UNASSIGNED
61 /* make various TB consistency checks */
62 //#define DEBUG_TB_CHECK
63 //#define DEBUG_TLB_CHECK
65 //#define DEBUG_IOPORT
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
73 #define SMC_BITMAP_USE_THRESHOLD 10
75 static TranslationBlock
*tbs
;
76 static int code_gen_max_blocks
;
77 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
79 /* any access to the tbs or the page table must use this lock */
80 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
82 #if defined(__arm__) || defined(__sparc_v9__)
83 /* The prologue must be reachable with a direct jump. ARM and Sparc64
84 have limited branch ranges (possibly also PPC) so place it in a
85 section close to code segment. */
86 #define code_gen_section \
87 __attribute__((__section__(".gen_code"))) \
88 __attribute__((aligned (32)))
90 /* Maximum alignment for Win32 is 16. */
91 #define code_gen_section \
92 __attribute__((aligned (16)))
94 #define code_gen_section \
95 __attribute__((aligned (32)))
98 uint8_t code_gen_prologue
[1024] code_gen_section
;
99 static uint8_t *code_gen_buffer
;
100 static unsigned long code_gen_buffer_size
;
101 /* threshold to flush the translated code buffer */
102 static unsigned long code_gen_buffer_max_size
;
103 static uint8_t *code_gen_ptr
;
105 #if !defined(CONFIG_USER_ONLY)
107 static int in_migration
;
109 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
) };
113 /* current CPU in the current thread. It is only valid inside
115 CPUState
*cpu_single_env
;
116 /* 0 = Do not count executed instructions.
117 1 = Precise instruction counting.
118 2 = Adaptive rate instruction counting. */
120 /* Current instruction counter. While executing translated code this may
121 include some instructions that have not yet been executed. */
124 typedef struct PageDesc
{
125 /* list of TBs intersecting this ram page */
126 TranslationBlock
*first_tb
;
127 /* in order to optimize self modifying code, we count the number
128 of lookups we do to a given page to use a bitmap */
129 unsigned int code_write_count
;
130 uint8_t *code_bitmap
;
131 #if defined(CONFIG_USER_ONLY)
136 /* In system mode we want L1_MAP to be based on ram offsets,
137 while in user mode we want it to be based on virtual addresses. */
138 #if !defined(CONFIG_USER_ONLY)
139 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
142 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
145 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
148 /* Size of the L2 (and L3, etc) page tables. */
150 #define L2_SIZE (1 << L2_BITS)
152 /* The bits remaining after N lower levels of page tables. */
153 #define P_L1_BITS_REM \
154 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155 #define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
158 /* Size of the L1 page table. Avoid silly small sizes. */
159 #if P_L1_BITS_REM < 4
160 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
162 #define P_L1_BITS P_L1_BITS_REM
165 #if V_L1_BITS_REM < 4
166 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
168 #define V_L1_BITS V_L1_BITS_REM
171 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
172 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
174 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
177 unsigned long qemu_real_host_page_size
;
178 unsigned long qemu_host_page_bits
;
179 unsigned long qemu_host_page_size
;
180 unsigned long qemu_host_page_mask
;
182 /* This is a multi-level map on the virtual address space.
183 The bottom level has pointers to PageDesc. */
184 static void *l1_map
[V_L1_SIZE
];
186 #if !defined(CONFIG_USER_ONLY)
187 typedef struct PhysPageDesc
{
188 /* offset in host memory of the page + io_index in the low bits */
189 ram_addr_t phys_offset
;
190 ram_addr_t region_offset
;
193 /* This is a multi-level map on the physical address space.
194 The bottom level has pointers to PhysPageDesc. */
195 static void *l1_phys_map
[P_L1_SIZE
];
197 static void io_mem_init(void);
199 /* io memory support */
200 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
201 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
202 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
203 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
204 static int io_mem_watch
;
209 static const char *logfilename
= "qemu.log";
211 static const char *logfilename
= "/tmp/qemu.log";
215 static int log_append
= 0;
218 #if !defined(CONFIG_USER_ONLY)
219 static int tlb_flush_count
;
221 static int tb_flush_count
;
222 static int tb_phys_invalidate_count
;
225 static void map_exec(void *addr
, long size
)
228 VirtualProtect(addr
, size
,
229 PAGE_EXECUTE_READWRITE
, &old_protect
);
233 static void map_exec(void *addr
, long size
)
235 unsigned long start
, end
, page_size
;
237 page_size
= getpagesize();
238 start
= (unsigned long)addr
;
239 start
&= ~(page_size
- 1);
241 end
= (unsigned long)addr
+ size
;
242 end
+= page_size
- 1;
243 end
&= ~(page_size
- 1);
245 mprotect((void *)start
, end
- start
,
246 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
250 static void page_init(void)
252 /* NOTE: we can always suppose that qemu_host_page_size >=
256 SYSTEM_INFO system_info
;
258 GetSystemInfo(&system_info
);
259 qemu_real_host_page_size
= system_info
.dwPageSize
;
262 qemu_real_host_page_size
= getpagesize();
264 if (qemu_host_page_size
== 0)
265 qemu_host_page_size
= qemu_real_host_page_size
;
266 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
267 qemu_host_page_size
= TARGET_PAGE_SIZE
;
268 qemu_host_page_bits
= 0;
269 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
270 qemu_host_page_bits
++;
271 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
273 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
275 #ifdef HAVE_KINFO_GETVMMAP
276 struct kinfo_vmentry
*freep
;
279 freep
= kinfo_getvmmap(getpid(), &cnt
);
282 for (i
= 0; i
< cnt
; i
++) {
283 unsigned long startaddr
, endaddr
;
285 startaddr
= freep
[i
].kve_start
;
286 endaddr
= freep
[i
].kve_end
;
287 if (h2g_valid(startaddr
)) {
288 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
290 if (h2g_valid(endaddr
)) {
291 endaddr
= h2g(endaddr
);
292 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
294 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
296 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
307 last_brk
= (unsigned long)sbrk(0);
309 f
= fopen("/compat/linux/proc/self/maps", "r");
314 unsigned long startaddr
, endaddr
;
317 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
319 if (n
== 2 && h2g_valid(startaddr
)) {
320 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
322 if (h2g_valid(endaddr
)) {
323 endaddr
= h2g(endaddr
);
327 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
339 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
345 #if defined(CONFIG_USER_ONLY)
346 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
347 # define ALLOC(P, SIZE) \
349 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
350 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
353 # define ALLOC(P, SIZE) \
354 do { P = qemu_mallocz(SIZE); } while (0)
357 /* Level 1. Always allocated. */
358 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
361 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
368 ALLOC(p
, sizeof(void *) * L2_SIZE
);
372 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
380 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
386 return pd
+ (index
& (L2_SIZE
- 1));
389 static inline PageDesc
*page_find(tb_page_addr_t index
)
391 return page_find_alloc(index
, 0);
394 #if !defined(CONFIG_USER_ONLY)
395 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
401 /* Level 1. Always allocated. */
402 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
405 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
411 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
413 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
424 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
426 for (i
= 0; i
< L2_SIZE
; i
++) {
427 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
428 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
432 return pd
+ (index
& (L2_SIZE
- 1));
435 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
437 return phys_page_find_alloc(index
, 0);
440 static void tlb_protect_code(ram_addr_t ram_addr
);
441 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
443 #define mmap_lock() do { } while(0)
444 #define mmap_unlock() do { } while(0)
447 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
449 #if defined(CONFIG_USER_ONLY)
450 /* Currently it is not recommended to allocate big chunks of data in
451 user mode. It will change when a dedicated libc will be used */
452 #define USE_STATIC_CODE_GEN_BUFFER
455 #ifdef USE_STATIC_CODE_GEN_BUFFER
456 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
457 __attribute__((aligned (CODE_GEN_ALIGN
)));
460 static void code_gen_alloc(unsigned long tb_size
)
462 #ifdef USE_STATIC_CODE_GEN_BUFFER
463 code_gen_buffer
= static_code_gen_buffer
;
464 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
465 map_exec(code_gen_buffer
, code_gen_buffer_size
);
467 code_gen_buffer_size
= tb_size
;
468 if (code_gen_buffer_size
== 0) {
469 #if defined(CONFIG_USER_ONLY)
470 /* in user mode, phys_ram_size is not meaningful */
471 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
473 /* XXX: needs adjustments */
474 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
477 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
478 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
479 /* The code gen buffer location may have constraints depending on
480 the host cpu and OS */
481 #if defined(__linux__)
486 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
487 #if defined(__x86_64__)
489 /* Cannot map more than that */
490 if (code_gen_buffer_size
> (800 * 1024 * 1024))
491 code_gen_buffer_size
= (800 * 1024 * 1024);
492 #elif defined(__sparc_v9__)
493 // Map the buffer below 2G, so we can use direct calls and branches
495 start
= (void *) 0x60000000UL
;
496 if (code_gen_buffer_size
> (512 * 1024 * 1024))
497 code_gen_buffer_size
= (512 * 1024 * 1024);
498 #elif defined(__arm__)
499 /* Map the buffer below 32M, so we can use direct calls and branches */
501 start
= (void *) 0x01000000UL
;
502 if (code_gen_buffer_size
> 16 * 1024 * 1024)
503 code_gen_buffer_size
= 16 * 1024 * 1024;
504 #elif defined(__s390x__)
505 /* Map the buffer so that we can use direct calls and branches. */
506 /* We have a +- 4GB range on the branches; leave some slop. */
507 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
508 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
510 start
= (void *)0x90000000UL
;
512 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
513 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
515 if (code_gen_buffer
== MAP_FAILED
) {
516 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
520 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 || defined(__DragonFly__) || defined(__OpenBSD__)
525 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
526 #if defined(__x86_64__)
527 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 * 0x40000000 is free */
530 addr
= (void *)0x40000000;
531 /* Cannot map more than that */
532 if (code_gen_buffer_size
> (800 * 1024 * 1024))
533 code_gen_buffer_size
= (800 * 1024 * 1024);
534 #elif defined(__sparc_v9__)
535 // Map the buffer below 2G, so we can use direct calls and branches
537 addr
= (void *) 0x60000000UL
;
538 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
539 code_gen_buffer_size
= (512 * 1024 * 1024);
542 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
543 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
545 if (code_gen_buffer
== MAP_FAILED
) {
546 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
551 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
552 map_exec(code_gen_buffer
, code_gen_buffer_size
);
554 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
555 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
556 code_gen_buffer_max_size
= code_gen_buffer_size
-
557 (TCG_MAX_OP_SIZE
* OPC_MAX_SIZE
);
558 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
559 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
562 /* Must be called before using the QEMU cpus. 'tb_size' is the size
563 (in bytes) allocated to the translation buffer. Zero means default
565 void cpu_exec_init_all(unsigned long tb_size
)
568 code_gen_alloc(tb_size
);
569 code_gen_ptr
= code_gen_buffer
;
571 #if !defined(CONFIG_USER_ONLY)
574 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 /* There's no guest base to take into account, so go ahead and
576 initialize the prologue now. */
577 tcg_prologue_init(&tcg_ctx
);
581 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
583 static int cpu_common_post_load(void *opaque
, int version_id
)
585 CPUState
*env
= opaque
;
587 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588 version_id is increased. */
589 env
->interrupt_request
&= ~0x01;
595 static const VMStateDescription vmstate_cpu_common
= {
596 .name
= "cpu_common",
598 .minimum_version_id
= 1,
599 .minimum_version_id_old
= 1,
600 .post_load
= cpu_common_post_load
,
601 .fields
= (VMStateField
[]) {
602 VMSTATE_UINT32(halted
, CPUState
),
603 VMSTATE_UINT32(interrupt_request
, CPUState
),
604 VMSTATE_END_OF_LIST()
609 CPUState
*qemu_get_cpu(int cpu
)
611 CPUState
*env
= first_cpu
;
614 if (env
->cpu_index
== cpu
)
622 void cpu_exec_init(CPUState
*env
)
627 #if defined(CONFIG_USER_ONLY)
630 env
->next_cpu
= NULL
;
633 while (*penv
!= NULL
) {
634 penv
= &(*penv
)->next_cpu
;
637 env
->cpu_index
= cpu_index
;
639 QTAILQ_INIT(&env
->breakpoints
);
640 QTAILQ_INIT(&env
->watchpoints
);
641 #ifndef CONFIG_USER_ONLY
642 env
->thread_id
= qemu_get_thread_id();
645 #if defined(CONFIG_USER_ONLY)
648 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
649 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
650 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
651 cpu_save
, cpu_load
, env
);
655 /* Allocate a new translation block. Flush the translation buffer if
656 too many translation blocks or too much generated code. */
657 static TranslationBlock
*tb_alloc(target_ulong pc
)
659 TranslationBlock
*tb
;
661 if (nb_tbs
>= code_gen_max_blocks
||
662 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
670 void tb_free(TranslationBlock
*tb
)
672 /* In practice this is mostly used for single use temporary TB
673 Ignore the hard cases and just back up if this TB happens to
674 be the last one generated. */
675 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
676 code_gen_ptr
= tb
->tc_ptr
;
681 static inline void invalidate_page_bitmap(PageDesc
*p
)
683 if (p
->code_bitmap
) {
684 qemu_free(p
->code_bitmap
);
685 p
->code_bitmap
= NULL
;
687 p
->code_write_count
= 0;
690 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
692 static void page_flush_tb_1 (int level
, void **lp
)
701 for (i
= 0; i
< L2_SIZE
; ++i
) {
702 pd
[i
].first_tb
= NULL
;
703 invalidate_page_bitmap(pd
+ i
);
707 for (i
= 0; i
< L2_SIZE
; ++i
) {
708 page_flush_tb_1 (level
- 1, pp
+ i
);
713 static void page_flush_tb(void)
716 for (i
= 0; i
< V_L1_SIZE
; i
++) {
717 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
721 /* flush all the translation blocks */
722 /* XXX: tb_flush is currently not thread safe */
723 void tb_flush(CPUState
*env1
)
726 #if defined(DEBUG_FLUSH)
727 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
728 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
730 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
732 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
733 cpu_abort(env1
, "Internal error: code buffer overflow\n");
737 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
738 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
741 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
744 code_gen_ptr
= code_gen_buffer
;
745 /* XXX: flush processor icache at this point if cache flush is
750 #ifdef DEBUG_TB_CHECK
752 static void tb_invalidate_check(target_ulong address
)
754 TranslationBlock
*tb
;
756 address
&= TARGET_PAGE_MASK
;
757 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
758 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
759 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
760 address
>= tb
->pc
+ tb
->size
)) {
761 printf("ERROR invalidate: address=" TARGET_FMT_lx
762 " PC=%08lx size=%04x\n",
763 address
, (long)tb
->pc
, tb
->size
);
769 /* verify that all the pages have correct rights for code */
770 static void tb_page_check(void)
772 TranslationBlock
*tb
;
773 int i
, flags1
, flags2
;
775 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
776 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
777 flags1
= page_get_flags(tb
->pc
);
778 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
779 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
780 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
781 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
789 /* invalidate one TB */
790 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
793 TranslationBlock
*tb1
;
797 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
800 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
804 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
806 TranslationBlock
*tb1
;
812 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
814 *ptb
= tb1
->page_next
[n1
];
817 ptb
= &tb1
->page_next
[n1
];
821 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
823 TranslationBlock
*tb1
, **ptb
;
826 ptb
= &tb
->jmp_next
[n
];
829 /* find tb(n) in circular list */
833 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
834 if (n1
== n
&& tb1
== tb
)
837 ptb
= &tb1
->jmp_first
;
839 ptb
= &tb1
->jmp_next
[n1
];
842 /* now we can suppress tb(n) from the list */
843 *ptb
= tb
->jmp_next
[n
];
845 tb
->jmp_next
[n
] = NULL
;
849 /* reset the jump entry 'n' of a TB so that it is not chained to
851 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
853 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
856 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
861 tb_page_addr_t phys_pc
;
862 TranslationBlock
*tb1
, *tb2
;
864 /* remove the TB from the hash list */
865 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
866 h
= tb_phys_hash_func(phys_pc
);
867 tb_remove(&tb_phys_hash
[h
], tb
,
868 offsetof(TranslationBlock
, phys_hash_next
));
870 /* remove the TB from the page list */
871 if (tb
->page_addr
[0] != page_addr
) {
872 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
873 tb_page_remove(&p
->first_tb
, tb
);
874 invalidate_page_bitmap(p
);
876 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
877 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
878 tb_page_remove(&p
->first_tb
, tb
);
879 invalidate_page_bitmap(p
);
882 tb_invalidated_flag
= 1;
884 /* remove the TB from the hash list */
885 h
= tb_jmp_cache_hash_func(tb
->pc
);
886 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
887 if (env
->tb_jmp_cache
[h
] == tb
)
888 env
->tb_jmp_cache
[h
] = NULL
;
891 /* suppress this TB from the two jump lists */
892 tb_jmp_remove(tb
, 0);
893 tb_jmp_remove(tb
, 1);
895 /* suppress any remaining jumps to this TB */
901 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
902 tb2
= tb1
->jmp_next
[n1
];
903 tb_reset_jump(tb1
, n1
);
904 tb1
->jmp_next
[n1
] = NULL
;
907 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
909 tb_phys_invalidate_count
++;
912 static inline void set_bits(uint8_t *tab
, int start
, int len
)
918 mask
= 0xff << (start
& 7);
919 if ((start
& ~7) == (end
& ~7)) {
921 mask
&= ~(0xff << (end
& 7));
926 start
= (start
+ 8) & ~7;
928 while (start
< end1
) {
933 mask
= ~(0xff << (end
& 7));
939 static void build_page_bitmap(PageDesc
*p
)
941 int n
, tb_start
, tb_end
;
942 TranslationBlock
*tb
;
944 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
949 tb
= (TranslationBlock
*)((long)tb
& ~3);
950 /* NOTE: this is subtle as a TB may span two physical pages */
952 /* NOTE: tb_end may be after the end of the page, but
953 it is not a problem */
954 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
955 tb_end
= tb_start
+ tb
->size
;
956 if (tb_end
> TARGET_PAGE_SIZE
)
957 tb_end
= TARGET_PAGE_SIZE
;
960 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
962 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
963 tb
= tb
->page_next
[n
];
967 TranslationBlock
*tb_gen_code(CPUState
*env
,
968 target_ulong pc
, target_ulong cs_base
,
969 int flags
, int cflags
)
971 TranslationBlock
*tb
;
973 tb_page_addr_t phys_pc
, phys_page2
;
974 target_ulong virt_page2
;
977 phys_pc
= get_page_addr_code(env
, pc
);
980 /* flush must be done */
982 /* cannot fail at this point */
984 /* Don't forget to invalidate previous TB info. */
985 tb_invalidated_flag
= 1;
987 tc_ptr
= code_gen_ptr
;
989 tb
->cs_base
= cs_base
;
992 cpu_gen_code(env
, tb
, &code_gen_size
);
993 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
995 /* check next page if needed */
996 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
998 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
999 phys_page2
= get_page_addr_code(env
, virt_page2
);
1001 tb_link_page(tb
, phys_pc
, phys_page2
);
1005 /* invalidate all TBs which intersect with the target physical page
1006 starting in range [start;end[. NOTE: start and end must refer to
1007 the same physical page. 'is_cpu_write_access' should be true if called
1008 from a real cpu write access: the virtual CPU will exit the current
1009 TB if code is modified inside this TB. */
1010 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1011 int is_cpu_write_access
)
1013 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1014 CPUState
*env
= cpu_single_env
;
1015 tb_page_addr_t tb_start
, tb_end
;
1018 #ifdef TARGET_HAS_PRECISE_SMC
1019 int current_tb_not_found
= is_cpu_write_access
;
1020 TranslationBlock
*current_tb
= NULL
;
1021 int current_tb_modified
= 0;
1022 target_ulong current_pc
= 0;
1023 target_ulong current_cs_base
= 0;
1024 int current_flags
= 0;
1025 #endif /* TARGET_HAS_PRECISE_SMC */
1027 p
= page_find(start
>> TARGET_PAGE_BITS
);
1030 if (!p
->code_bitmap
&&
1031 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1032 is_cpu_write_access
) {
1033 /* build code bitmap */
1034 build_page_bitmap(p
);
1037 /* we remove all the TBs in the range [start, end[ */
1038 /* XXX: see if in some cases it could be faster to invalidate all the code */
1040 while (tb
!= NULL
) {
1042 tb
= (TranslationBlock
*)((long)tb
& ~3);
1043 tb_next
= tb
->page_next
[n
];
1044 /* NOTE: this is subtle as a TB may span two physical pages */
1046 /* NOTE: tb_end may be after the end of the page, but
1047 it is not a problem */
1048 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1049 tb_end
= tb_start
+ tb
->size
;
1051 tb_start
= tb
->page_addr
[1];
1052 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1054 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1055 #ifdef TARGET_HAS_PRECISE_SMC
1056 if (current_tb_not_found
) {
1057 current_tb_not_found
= 0;
1059 if (env
->mem_io_pc
) {
1060 /* now we have a real cpu fault */
1061 current_tb
= tb_find_pc(env
->mem_io_pc
);
1064 if (current_tb
== tb
&&
1065 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1066 /* If we are modifying the current TB, we must stop
1067 its execution. We could be more precise by checking
1068 that the modification is after the current PC, but it
1069 would require a specialized function to partially
1070 restore the CPU state */
1072 current_tb_modified
= 1;
1073 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1074 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1077 #endif /* TARGET_HAS_PRECISE_SMC */
1078 /* we need to do that to handle the case where a signal
1079 occurs while doing tb_phys_invalidate() */
1082 saved_tb
= env
->current_tb
;
1083 env
->current_tb
= NULL
;
1085 tb_phys_invalidate(tb
, -1);
1087 env
->current_tb
= saved_tb
;
1088 if (env
->interrupt_request
&& env
->current_tb
)
1089 cpu_interrupt(env
, env
->interrupt_request
);
1094 #if !defined(CONFIG_USER_ONLY)
1095 /* if no code remaining, no need to continue to use slow writes */
1097 invalidate_page_bitmap(p
);
1098 if (is_cpu_write_access
) {
1099 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1103 #ifdef TARGET_HAS_PRECISE_SMC
1104 if (current_tb_modified
) {
1105 /* we generate a block containing just the instruction
1106 modifying the memory. It will ensure that it cannot modify
1108 env
->current_tb
= NULL
;
1109 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1110 cpu_resume_from_signal(env
, NULL
);
1115 /* len must be <= 8 and start must be a multiple of len */
1116 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1122 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1123 cpu_single_env
->mem_io_vaddr
, len
,
1124 cpu_single_env
->eip
,
1125 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1128 p
= page_find(start
>> TARGET_PAGE_BITS
);
1131 if (p
->code_bitmap
) {
1132 offset
= start
& ~TARGET_PAGE_MASK
;
1133 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1134 if (b
& ((1 << len
) - 1))
1138 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1142 #if !defined(CONFIG_SOFTMMU)
1143 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1144 unsigned long pc
, void *puc
)
1146 TranslationBlock
*tb
;
1149 #ifdef TARGET_HAS_PRECISE_SMC
1150 TranslationBlock
*current_tb
= NULL
;
1151 CPUState
*env
= cpu_single_env
;
1152 int current_tb_modified
= 0;
1153 target_ulong current_pc
= 0;
1154 target_ulong current_cs_base
= 0;
1155 int current_flags
= 0;
1158 addr
&= TARGET_PAGE_MASK
;
1159 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1163 #ifdef TARGET_HAS_PRECISE_SMC
1164 if (tb
&& pc
!= 0) {
1165 current_tb
= tb_find_pc(pc
);
1168 while (tb
!= NULL
) {
1170 tb
= (TranslationBlock
*)((long)tb
& ~3);
1171 #ifdef TARGET_HAS_PRECISE_SMC
1172 if (current_tb
== tb
&&
1173 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1174 /* If we are modifying the current TB, we must stop
1175 its execution. We could be more precise by checking
1176 that the modification is after the current PC, but it
1177 would require a specialized function to partially
1178 restore the CPU state */
1180 current_tb_modified
= 1;
1181 cpu_restore_state(current_tb
, env
, pc
);
1182 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1185 #endif /* TARGET_HAS_PRECISE_SMC */
1186 tb_phys_invalidate(tb
, addr
);
1187 tb
= tb
->page_next
[n
];
1190 #ifdef TARGET_HAS_PRECISE_SMC
1191 if (current_tb_modified
) {
1192 /* we generate a block containing just the instruction
1193 modifying the memory. It will ensure that it cannot modify
1195 env
->current_tb
= NULL
;
1196 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1197 cpu_resume_from_signal(env
, puc
);
1203 /* add the tb in the target page and protect it if necessary */
1204 static inline void tb_alloc_page(TranslationBlock
*tb
,
1205 unsigned int n
, tb_page_addr_t page_addr
)
1208 TranslationBlock
*last_first_tb
;
1210 tb
->page_addr
[n
] = page_addr
;
1211 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1212 tb
->page_next
[n
] = p
->first_tb
;
1213 last_first_tb
= p
->first_tb
;
1214 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1215 invalidate_page_bitmap(p
);
1217 #if defined(TARGET_HAS_SMC) || 1
1219 #if defined(CONFIG_USER_ONLY)
1220 if (p
->flags
& PAGE_WRITE
) {
1225 /* force the host page as non writable (writes will have a
1226 page fault + mprotect overhead) */
1227 page_addr
&= qemu_host_page_mask
;
1229 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1230 addr
+= TARGET_PAGE_SIZE
) {
1232 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1236 p2
->flags
&= ~PAGE_WRITE
;
1238 mprotect(g2h(page_addr
), qemu_host_page_size
,
1239 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1240 #ifdef DEBUG_TB_INVALIDATE
1241 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1246 /* if some code is already present, then the pages are already
1247 protected. So we handle the case where only the first TB is
1248 allocated in a physical page */
1249 if (!last_first_tb
) {
1250 tlb_protect_code(page_addr
);
1254 #endif /* TARGET_HAS_SMC */
1257 /* add a new TB and link it to the physical page tables. phys_page2 is
1258 (-1) to indicate that only one page contains the TB. */
1259 void tb_link_page(TranslationBlock
*tb
,
1260 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1263 TranslationBlock
**ptb
;
1265 /* Grab the mmap lock to stop another thread invalidating this TB
1266 before we are done. */
1268 /* add in the physical hash table */
1269 h
= tb_phys_hash_func(phys_pc
);
1270 ptb
= &tb_phys_hash
[h
];
1271 tb
->phys_hash_next
= *ptb
;
1274 /* add in the page list */
1275 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1276 if (phys_page2
!= -1)
1277 tb_alloc_page(tb
, 1, phys_page2
);
1279 tb
->page_addr
[1] = -1;
1281 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1282 tb
->jmp_next
[0] = NULL
;
1283 tb
->jmp_next
[1] = NULL
;
1285 /* init original jump addresses */
1286 if (tb
->tb_next_offset
[0] != 0xffff)
1287 tb_reset_jump(tb
, 0);
1288 if (tb
->tb_next_offset
[1] != 0xffff)
1289 tb_reset_jump(tb
, 1);
1291 #ifdef DEBUG_TB_CHECK
1297 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1298 tb[1].tc_ptr. Return NULL if not found */
1299 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1301 int m_min
, m_max
, m
;
1303 TranslationBlock
*tb
;
1307 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1308 tc_ptr
>= (unsigned long)code_gen_ptr
)
1310 /* binary search (cf Knuth) */
1313 while (m_min
<= m_max
) {
1314 m
= (m_min
+ m_max
) >> 1;
1316 v
= (unsigned long)tb
->tc_ptr
;
1319 else if (tc_ptr
< v
) {
1328 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1330 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1332 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1335 tb1
= tb
->jmp_next
[n
];
1337 /* find head of list */
1340 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1343 tb1
= tb1
->jmp_next
[n1
];
1345 /* we are now sure now that tb jumps to tb1 */
1348 /* remove tb from the jmp_first list */
1349 ptb
= &tb_next
->jmp_first
;
1353 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1354 if (n1
== n
&& tb1
== tb
)
1356 ptb
= &tb1
->jmp_next
[n1
];
1358 *ptb
= tb
->jmp_next
[n
];
1359 tb
->jmp_next
[n
] = NULL
;
1361 /* suppress the jump to next tb in generated code */
1362 tb_reset_jump(tb
, n
);
1364 /* suppress jumps in the tb on which we could have jumped */
1365 tb_reset_jump_recursive(tb_next
);
1369 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1371 tb_reset_jump_recursive2(tb
, 0);
1372 tb_reset_jump_recursive2(tb
, 1);
1375 #if defined(TARGET_HAS_ICE)
1376 #if defined(CONFIG_USER_ONLY)
1377 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1379 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1382 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1384 target_phys_addr_t addr
;
1386 ram_addr_t ram_addr
;
1389 addr
= cpu_get_phys_page_debug(env
, pc
);
1390 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1392 pd
= IO_MEM_UNASSIGNED
;
1394 pd
= p
->phys_offset
;
1396 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1397 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1400 #endif /* TARGET_HAS_ICE */
1402 #if defined(CONFIG_USER_ONLY)
1403 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1408 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1409 int flags
, CPUWatchpoint
**watchpoint
)
1414 /* Add a watchpoint. */
1415 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1416 int flags
, CPUWatchpoint
**watchpoint
)
1418 target_ulong len_mask
= ~(len
- 1);
1421 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1422 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1423 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1424 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1427 wp
= qemu_malloc(sizeof(*wp
));
1430 wp
->len_mask
= len_mask
;
1433 /* keep all GDB-injected watchpoints in front */
1435 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1437 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1439 tlb_flush_page(env
, addr
);
1446 /* Remove a specific watchpoint. */
1447 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1450 target_ulong len_mask
= ~(len
- 1);
1453 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1454 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1455 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1456 cpu_watchpoint_remove_by_ref(env
, wp
);
1463 /* Remove a specific watchpoint by reference. */
1464 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1466 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1468 tlb_flush_page(env
, watchpoint
->vaddr
);
1470 qemu_free(watchpoint
);
1473 /* Remove all matching watchpoints. */
1474 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1476 CPUWatchpoint
*wp
, *next
;
1478 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1479 if (wp
->flags
& mask
)
1480 cpu_watchpoint_remove_by_ref(env
, wp
);
1485 /* Add a breakpoint. */
1486 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1487 CPUBreakpoint
**breakpoint
)
1489 #if defined(TARGET_HAS_ICE)
1492 bp
= qemu_malloc(sizeof(*bp
));
1497 /* keep all GDB-injected breakpoints in front */
1499 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1501 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1503 breakpoint_invalidate(env
, pc
);
1513 /* Remove a specific breakpoint. */
1514 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1516 #if defined(TARGET_HAS_ICE)
1519 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1520 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1521 cpu_breakpoint_remove_by_ref(env
, bp
);
1531 /* Remove a specific breakpoint by reference. */
1532 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1534 #if defined(TARGET_HAS_ICE)
1535 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1537 breakpoint_invalidate(env
, breakpoint
->pc
);
1539 qemu_free(breakpoint
);
1543 /* Remove all matching breakpoints. */
1544 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1546 #if defined(TARGET_HAS_ICE)
1547 CPUBreakpoint
*bp
, *next
;
1549 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1550 if (bp
->flags
& mask
)
1551 cpu_breakpoint_remove_by_ref(env
, bp
);
1556 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1557 CPU loop after each instruction */
1558 void cpu_single_step(CPUState
*env
, int enabled
)
1560 #if defined(TARGET_HAS_ICE)
1561 if (env
->singlestep_enabled
!= enabled
) {
1562 env
->singlestep_enabled
= enabled
;
1564 kvm_update_guest_debug(env
, 0);
1566 /* must flush all the translated code to avoid inconsistencies */
1567 /* XXX: only flush what is necessary */
1574 /* enable or disable low levels log */
1575 void cpu_set_log(int log_flags
)
1577 loglevel
= log_flags
;
1578 if (loglevel
&& !logfile
) {
1579 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1581 perror(logfilename
);
1584 #if !defined(CONFIG_SOFTMMU)
1585 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1587 static char logfile_buf
[4096];
1588 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1590 #elif !defined(_WIN32)
1591 /* Win32 doesn't support line-buffering and requires size >= 2 */
1592 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1596 if (!loglevel
&& logfile
) {
1602 void cpu_set_log_filename(const char *filename
)
1604 logfilename
= strdup(filename
);
1609 cpu_set_log(loglevel
);
1612 static void cpu_unlink_tb(CPUState
*env
)
1614 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1615 problem and hope the cpu will stop of its own accord. For userspace
1616 emulation this often isn't actually as bad as it sounds. Often
1617 signals are used primarily to interrupt blocking syscalls. */
1618 TranslationBlock
*tb
;
1619 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1621 spin_lock(&interrupt_lock
);
1622 tb
= env
->current_tb
;
1623 /* if the cpu is currently executing code, we must unlink it and
1624 all the potentially executing TB */
1626 env
->current_tb
= NULL
;
1627 tb_reset_jump_recursive(tb
);
1629 spin_unlock(&interrupt_lock
);
1632 #ifndef CONFIG_USER_ONLY
1633 /* mask must never be zero, except for A20 change call */
1634 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1638 old_mask
= env
->interrupt_request
;
1639 env
->interrupt_request
|= mask
;
1642 * If called from iothread context, wake the target cpu in
1645 if (!qemu_cpu_is_self(env
)) {
1651 env
->icount_decr
.u16
.high
= 0xffff;
1653 && (mask
& ~old_mask
) != 0) {
1654 cpu_abort(env
, "Raised interrupt while not in I/O function");
1661 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1663 #else /* CONFIG_USER_ONLY */
1665 void cpu_interrupt(CPUState
*env
, int mask
)
1667 env
->interrupt_request
|= mask
;
1670 #endif /* CONFIG_USER_ONLY */
1672 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1674 env
->interrupt_request
&= ~mask
;
1677 void cpu_exit(CPUState
*env
)
1679 env
->exit_request
= 1;
1683 const CPULogItem cpu_log_items
[] = {
1684 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1685 "show generated host assembly code for each compiled TB" },
1686 { CPU_LOG_TB_IN_ASM
, "in_asm",
1687 "show target assembly code for each compiled TB" },
1688 { CPU_LOG_TB_OP
, "op",
1689 "show micro ops for each compiled TB" },
1690 { CPU_LOG_TB_OP_OPT
, "op_opt",
1693 "before eflags optimization and "
1695 "after liveness analysis" },
1696 { CPU_LOG_INT
, "int",
1697 "show interrupts/exceptions in short format" },
1698 { CPU_LOG_EXEC
, "exec",
1699 "show trace before each executed TB (lots of logs)" },
1700 { CPU_LOG_TB_CPU
, "cpu",
1701 "show CPU state before block translation" },
1703 { CPU_LOG_PCALL
, "pcall",
1704 "show protected mode far calls/returns/exceptions" },
1705 { CPU_LOG_RESET
, "cpu_reset",
1706 "show CPU state before CPU resets" },
1709 { CPU_LOG_IOPORT
, "ioport",
1710 "show all i/o ports accesses" },
1715 #ifndef CONFIG_USER_ONLY
1716 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1717 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1719 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1721 ram_addr_t phys_offset
,
1724 CPUPhysMemoryClient
*client
;
1725 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1726 client
->set_memory(client
, start_addr
, size
, phys_offset
, log_dirty
);
1730 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1731 target_phys_addr_t end
)
1733 CPUPhysMemoryClient
*client
;
1734 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1735 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1742 static int cpu_notify_migration_log(int enable
)
1744 CPUPhysMemoryClient
*client
;
1745 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1746 int r
= client
->migration_log(client
, enable
);
1754 target_phys_addr_t start_addr
;
1756 ram_addr_t phys_offset
;
1759 /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1760 * address. Each intermediate table provides the next L2_BITs of guest
1761 * physical address space. The number of levels vary based on host and
1762 * guest configuration, making it efficient to build the final guest
1763 * physical address by seeding the L1 offset and shifting and adding in
1764 * each L2 offset as we recurse through them. */
1765 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
, int level
,
1766 void **lp
, target_phys_addr_t addr
,
1767 struct last_map
*map
)
1775 PhysPageDesc
*pd
= *lp
;
1776 addr
<<= L2_BITS
+ TARGET_PAGE_BITS
;
1777 for (i
= 0; i
< L2_SIZE
; ++i
) {
1778 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1779 target_phys_addr_t start_addr
= addr
| i
<< TARGET_PAGE_BITS
;
1782 start_addr
== map
->start_addr
+ map
->size
&&
1783 pd
[i
].phys_offset
== map
->phys_offset
+ map
->size
) {
1785 map
->size
+= TARGET_PAGE_SIZE
;
1787 } else if (map
->size
) {
1788 client
->set_memory(client
, map
->start_addr
,
1789 map
->size
, map
->phys_offset
, false);
1792 map
->start_addr
= start_addr
;
1793 map
->size
= TARGET_PAGE_SIZE
;
1794 map
->phys_offset
= pd
[i
].phys_offset
;
1799 for (i
= 0; i
< L2_SIZE
; ++i
) {
1800 phys_page_for_each_1(client
, level
- 1, pp
+ i
,
1801 (addr
<< L2_BITS
) | i
, map
);
1806 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1809 struct last_map map
= { };
1811 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1812 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1813 l1_phys_map
+ i
, i
, &map
);
1816 client
->set_memory(client
, map
.start_addr
, map
.size
, map
.phys_offset
,
1821 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1823 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1824 phys_page_for_each(client
);
1827 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1829 QLIST_REMOVE(client
, list
);
1833 static int cmp1(const char *s1
, int n
, const char *s2
)
1835 if (strlen(s2
) != n
)
1837 return memcmp(s1
, s2
, n
) == 0;
1840 /* takes a comma separated list of log masks. Return 0 if error. */
1841 int cpu_str_to_log_mask(const char *str
)
1843 const CPULogItem
*item
;
1850 p1
= strchr(p
, ',');
1853 if(cmp1(p
,p1
-p
,"all")) {
1854 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1858 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1859 if (cmp1(p
, p1
- p
, item
->name
))
1873 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1880 fprintf(stderr
, "qemu: fatal: ");
1881 vfprintf(stderr
, fmt
, ap
);
1882 fprintf(stderr
, "\n");
1884 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1886 cpu_dump_state(env
, stderr
, fprintf
, 0);
1888 if (qemu_log_enabled()) {
1889 qemu_log("qemu: fatal: ");
1890 qemu_log_vprintf(fmt
, ap2
);
1893 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1895 log_cpu_state(env
, 0);
1902 #if defined(CONFIG_USER_ONLY)
1904 struct sigaction act
;
1905 sigfillset(&act
.sa_mask
);
1906 act
.sa_handler
= SIG_DFL
;
1907 sigaction(SIGABRT
, &act
, NULL
);
1913 CPUState
*cpu_copy(CPUState
*env
)
1915 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1916 CPUState
*next_cpu
= new_env
->next_cpu
;
1917 int cpu_index
= new_env
->cpu_index
;
1918 #if defined(TARGET_HAS_ICE)
1923 memcpy(new_env
, env
, sizeof(CPUState
));
1925 /* Preserve chaining and index. */
1926 new_env
->next_cpu
= next_cpu
;
1927 new_env
->cpu_index
= cpu_index
;
1929 /* Clone all break/watchpoints.
1930 Note: Once we support ptrace with hw-debug register access, make sure
1931 BP_CPU break/watchpoints are handled correctly on clone. */
1932 QTAILQ_INIT(&env
->breakpoints
);
1933 QTAILQ_INIT(&env
->watchpoints
);
1934 #if defined(TARGET_HAS_ICE)
1935 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1936 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1938 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1939 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1947 #if !defined(CONFIG_USER_ONLY)
1949 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1953 /* Discard jump cache entries for any tb which might potentially
1954 overlap the flushed page. */
1955 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1956 memset (&env
->tb_jmp_cache
[i
], 0,
1957 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1959 i
= tb_jmp_cache_hash_page(addr
);
1960 memset (&env
->tb_jmp_cache
[i
], 0,
1961 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1964 static CPUTLBEntry s_cputlb_empty_entry
= {
1971 /* NOTE: if flush_global is true, also flush global entries (not
1973 void tlb_flush(CPUState
*env
, int flush_global
)
1977 #if defined(DEBUG_TLB)
1978 printf("tlb_flush:\n");
1980 /* must reset current TB so that interrupts cannot modify the
1981 links while we are modifying them */
1982 env
->current_tb
= NULL
;
1984 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1986 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1987 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1991 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1993 env
->tlb_flush_addr
= -1;
1994 env
->tlb_flush_mask
= 0;
1998 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
2000 if (addr
== (tlb_entry
->addr_read
&
2001 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
2002 addr
== (tlb_entry
->addr_write
&
2003 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
2004 addr
== (tlb_entry
->addr_code
&
2005 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
2006 *tlb_entry
= s_cputlb_empty_entry
;
2010 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2015 #if defined(DEBUG_TLB)
2016 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
2018 /* Check if we need to flush due to large pages. */
2019 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
2020 #if defined(DEBUG_TLB)
2021 printf("tlb_flush_page: forced full flush ("
2022 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
2023 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
2028 /* must reset current TB so that interrupts cannot modify the
2029 links while we are modifying them */
2030 env
->current_tb
= NULL
;
2032 addr
&= TARGET_PAGE_MASK
;
2033 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2034 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2035 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2037 tlb_flush_jmp_cache(env
, addr
);
2040 /* update the TLBs so that writes to code in the virtual page 'addr'
2042 static void tlb_protect_code(ram_addr_t ram_addr
)
2044 cpu_physical_memory_reset_dirty(ram_addr
,
2045 ram_addr
+ TARGET_PAGE_SIZE
,
2049 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2050 tested for self modifying code */
2051 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2054 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2057 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2058 unsigned long start
, unsigned long length
)
2061 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2062 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2063 if ((addr
- start
) < length
) {
2064 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2069 /* Note: start and end must be within the same ram block. */
2070 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2074 unsigned long length
, start1
;
2077 start
&= TARGET_PAGE_MASK
;
2078 end
= TARGET_PAGE_ALIGN(end
);
2080 length
= end
- start
;
2083 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2085 /* we modify the TLB cache so that the dirty bit will be set again
2086 when accessing the range */
2087 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2088 /* Chek that we don't span multiple blocks - this breaks the
2089 address comparisons below. */
2090 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2091 != (end
- 1) - start
) {
2095 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2097 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2098 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2099 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2105 int cpu_physical_memory_set_dirty_tracking(int enable
)
2108 in_migration
= enable
;
2109 ret
= cpu_notify_migration_log(!!enable
);
2113 int cpu_physical_memory_get_dirty_tracking(void)
2115 return in_migration
;
2118 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2119 target_phys_addr_t end_addr
)
2123 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2127 int cpu_physical_log_start(target_phys_addr_t start_addr
,
2130 CPUPhysMemoryClient
*client
;
2131 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2132 if (client
->log_start
) {
2133 int r
= client
->log_start(client
, start_addr
, size
);
2142 int cpu_physical_log_stop(target_phys_addr_t start_addr
,
2145 CPUPhysMemoryClient
*client
;
2146 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2147 if (client
->log_stop
) {
2148 int r
= client
->log_stop(client
, start_addr
, size
);
2157 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2159 ram_addr_t ram_addr
;
2162 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2163 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2164 + tlb_entry
->addend
);
2165 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2166 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2167 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2172 /* update the TLB according to the current state of the dirty bits */
2173 void cpu_tlb_update_dirty(CPUState
*env
)
2177 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2178 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2179 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2183 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2185 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2186 tlb_entry
->addr_write
= vaddr
;
2189 /* update the TLB corresponding to virtual page vaddr
2190 so that it is no longer dirty */
2191 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2196 vaddr
&= TARGET_PAGE_MASK
;
2197 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2198 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2199 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2202 /* Our TLB does not support large pages, so remember the area covered by
2203 large pages and trigger a full TLB flush if these are invalidated. */
2204 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2207 target_ulong mask
= ~(size
- 1);
2209 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2210 env
->tlb_flush_addr
= vaddr
& mask
;
2211 env
->tlb_flush_mask
= mask
;
2214 /* Extend the existing region to include the new page.
2215 This is a compromise between unnecessary flushes and the cost
2216 of maintaining a full variable size TLB. */
2217 mask
&= env
->tlb_flush_mask
;
2218 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2221 env
->tlb_flush_addr
&= mask
;
2222 env
->tlb_flush_mask
= mask
;
2225 /* Add a new TLB entry. At most one entry for a given virtual address
2226 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2227 supplied size is only used by tlb_flush_page. */
2228 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2229 target_phys_addr_t paddr
, int prot
,
2230 int mmu_idx
, target_ulong size
)
2235 target_ulong address
;
2236 target_ulong code_address
;
2237 unsigned long addend
;
2240 target_phys_addr_t iotlb
;
2242 assert(size
>= TARGET_PAGE_SIZE
);
2243 if (size
!= TARGET_PAGE_SIZE
) {
2244 tlb_add_large_page(env
, vaddr
, size
);
2246 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2248 pd
= IO_MEM_UNASSIGNED
;
2250 pd
= p
->phys_offset
;
2252 #if defined(DEBUG_TLB)
2253 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2254 " prot=%x idx=%d pd=0x%08lx\n",
2255 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2259 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2260 /* IO memory case (romd handled later) */
2261 address
|= TLB_MMIO
;
2263 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2264 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2266 iotlb
= pd
& TARGET_PAGE_MASK
;
2267 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2268 iotlb
|= IO_MEM_NOTDIRTY
;
2270 iotlb
|= IO_MEM_ROM
;
2272 /* IO handlers are currently passed a physical address.
2273 It would be nice to pass an offset from the base address
2274 of that region. This would avoid having to special case RAM,
2275 and avoid full address decoding in every device.
2276 We can't use the high bits of pd for this because
2277 IO_MEM_ROMD uses these as a ram address. */
2278 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2280 iotlb
+= p
->region_offset
;
2286 code_address
= address
;
2287 /* Make accesses to pages with watchpoints go via the
2288 watchpoint trap routines. */
2289 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2290 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2291 /* Avoid trapping reads of pages with a write breakpoint. */
2292 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2293 iotlb
= io_mem_watch
+ paddr
;
2294 address
|= TLB_MMIO
;
2300 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2301 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2302 te
= &env
->tlb_table
[mmu_idx
][index
];
2303 te
->addend
= addend
- vaddr
;
2304 if (prot
& PAGE_READ
) {
2305 te
->addr_read
= address
;
2310 if (prot
& PAGE_EXEC
) {
2311 te
->addr_code
= code_address
;
2315 if (prot
& PAGE_WRITE
) {
2316 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2317 (pd
& IO_MEM_ROMD
)) {
2318 /* Write access calls the I/O callback. */
2319 te
->addr_write
= address
| TLB_MMIO
;
2320 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2321 !cpu_physical_memory_is_dirty(pd
)) {
2322 te
->addr_write
= address
| TLB_NOTDIRTY
;
2324 te
->addr_write
= address
;
2327 te
->addr_write
= -1;
2333 void tlb_flush(CPUState
*env
, int flush_global
)
2337 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2342 * Walks guest process memory "regions" one by one
2343 * and calls callback function 'fn' for each region.
2346 struct walk_memory_regions_data
2348 walk_memory_regions_fn fn
;
2350 unsigned long start
;
2354 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2355 abi_ulong end
, int new_prot
)
2357 if (data
->start
!= -1ul) {
2358 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2364 data
->start
= (new_prot
? end
: -1ul);
2365 data
->prot
= new_prot
;
2370 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2371 abi_ulong base
, int level
, void **lp
)
2377 return walk_memory_regions_end(data
, base
, 0);
2382 for (i
= 0; i
< L2_SIZE
; ++i
) {
2383 int prot
= pd
[i
].flags
;
2385 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2386 if (prot
!= data
->prot
) {
2387 rc
= walk_memory_regions_end(data
, pa
, prot
);
2395 for (i
= 0; i
< L2_SIZE
; ++i
) {
2396 pa
= base
| ((abi_ulong
)i
<<
2397 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2398 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2408 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2410 struct walk_memory_regions_data data
;
2418 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2419 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2420 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2426 return walk_memory_regions_end(&data
, 0, 0);
2429 static int dump_region(void *priv
, abi_ulong start
,
2430 abi_ulong end
, unsigned long prot
)
2432 FILE *f
= (FILE *)priv
;
2434 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2435 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2436 start
, end
, end
- start
,
2437 ((prot
& PAGE_READ
) ? 'r' : '-'),
2438 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2439 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2444 /* dump memory mappings */
2445 void page_dump(FILE *f
)
2447 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2448 "start", "end", "size", "prot");
2449 walk_memory_regions(f
, dump_region
);
2452 int page_get_flags(target_ulong address
)
2456 p
= page_find(address
>> TARGET_PAGE_BITS
);
2462 /* Modify the flags of a page and invalidate the code if necessary.
2463 The flag PAGE_WRITE_ORG is positioned automatically depending
2464 on PAGE_WRITE. The mmap_lock should already be held. */
2465 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2467 target_ulong addr
, len
;
2469 /* This function should never be called with addresses outside the
2470 guest address space. If this assert fires, it probably indicates
2471 a missing call to h2g_valid. */
2472 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2473 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2475 assert(start
< end
);
2477 start
= start
& TARGET_PAGE_MASK
;
2478 end
= TARGET_PAGE_ALIGN(end
);
2480 if (flags
& PAGE_WRITE
) {
2481 flags
|= PAGE_WRITE_ORG
;
2484 for (addr
= start
, len
= end
- start
;
2486 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2487 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2489 /* If the write protection bit is set, then we invalidate
2491 if (!(p
->flags
& PAGE_WRITE
) &&
2492 (flags
& PAGE_WRITE
) &&
2494 tb_invalidate_phys_page(addr
, 0, NULL
);
2500 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2506 /* This function should never be called with addresses outside the
2507 guest address space. If this assert fires, it probably indicates
2508 a missing call to h2g_valid. */
2509 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2510 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2516 if (start
+ len
- 1 < start
) {
2517 /* We've wrapped around. */
2521 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2522 start
= start
& TARGET_PAGE_MASK
;
2524 for (addr
= start
, len
= end
- start
;
2526 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2527 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2530 if( !(p
->flags
& PAGE_VALID
) )
2533 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2535 if (flags
& PAGE_WRITE
) {
2536 if (!(p
->flags
& PAGE_WRITE_ORG
))
2538 /* unprotect the page if it was put read-only because it
2539 contains translated code */
2540 if (!(p
->flags
& PAGE_WRITE
)) {
2541 if (!page_unprotect(addr
, 0, NULL
))
2550 /* called from signal handler: invalidate the code and unprotect the
2551 page. Return TRUE if the fault was successfully handled. */
2552 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2556 target_ulong host_start
, host_end
, addr
;
2558 /* Technically this isn't safe inside a signal handler. However we
2559 know this only ever happens in a synchronous SEGV handler, so in
2560 practice it seems to be ok. */
2563 p
= page_find(address
>> TARGET_PAGE_BITS
);
2569 /* if the page was really writable, then we change its
2570 protection back to writable */
2571 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2572 host_start
= address
& qemu_host_page_mask
;
2573 host_end
= host_start
+ qemu_host_page_size
;
2576 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2577 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2578 p
->flags
|= PAGE_WRITE
;
2581 /* and since the content will be modified, we must invalidate
2582 the corresponding translated code. */
2583 tb_invalidate_phys_page(addr
, pc
, puc
);
2584 #ifdef DEBUG_TB_CHECK
2585 tb_invalidate_check(addr
);
2588 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2598 static inline void tlb_set_dirty(CPUState
*env
,
2599 unsigned long addr
, target_ulong vaddr
)
2602 #endif /* defined(CONFIG_USER_ONLY) */
2604 #if !defined(CONFIG_USER_ONLY)
2606 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2607 typedef struct subpage_t
{
2608 target_phys_addr_t base
;
2609 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2610 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2613 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2614 ram_addr_t memory
, ram_addr_t region_offset
);
2615 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2616 ram_addr_t orig_memory
,
2617 ram_addr_t region_offset
);
2618 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2621 if (addr > start_addr) \
2624 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2625 if (start_addr2 > 0) \
2629 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2630 end_addr2 = TARGET_PAGE_SIZE - 1; \
2632 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2633 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2638 /* register physical memory.
2639 For RAM, 'size' must be a multiple of the target page size.
2640 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2641 io memory page. The address used when calling the IO function is
2642 the offset from the start of the region, plus region_offset. Both
2643 start_addr and region_offset are rounded down to a page boundary
2644 before calculating this offset. This should not be a problem unless
2645 the low bits of start_addr and region_offset differ. */
2646 void cpu_register_physical_memory_log(target_phys_addr_t start_addr
,
2648 ram_addr_t phys_offset
,
2649 ram_addr_t region_offset
,
2652 target_phys_addr_t addr
, end_addr
;
2655 ram_addr_t orig_size
= size
;
2659 cpu_notify_set_memory(start_addr
, size
, phys_offset
, log_dirty
);
2661 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2662 region_offset
= start_addr
;
2664 region_offset
&= TARGET_PAGE_MASK
;
2665 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2666 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2670 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2671 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2672 ram_addr_t orig_memory
= p
->phys_offset
;
2673 target_phys_addr_t start_addr2
, end_addr2
;
2674 int need_subpage
= 0;
2676 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2679 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2680 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2681 &p
->phys_offset
, orig_memory
,
2684 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2687 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2689 p
->region_offset
= 0;
2691 p
->phys_offset
= phys_offset
;
2692 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2693 (phys_offset
& IO_MEM_ROMD
))
2694 phys_offset
+= TARGET_PAGE_SIZE
;
2697 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2698 p
->phys_offset
= phys_offset
;
2699 p
->region_offset
= region_offset
;
2700 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2701 (phys_offset
& IO_MEM_ROMD
)) {
2702 phys_offset
+= TARGET_PAGE_SIZE
;
2704 target_phys_addr_t start_addr2
, end_addr2
;
2705 int need_subpage
= 0;
2707 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2708 end_addr2
, need_subpage
);
2711 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2712 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2713 addr
& TARGET_PAGE_MASK
);
2714 subpage_register(subpage
, start_addr2
, end_addr2
,
2715 phys_offset
, region_offset
);
2716 p
->region_offset
= 0;
2720 region_offset
+= TARGET_PAGE_SIZE
;
2721 addr
+= TARGET_PAGE_SIZE
;
2722 } while (addr
!= end_addr
);
2724 /* since each CPU stores ram addresses in its TLB cache, we must
2725 reset the modified entries */
2727 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2732 /* XXX: temporary until new memory mapping API */
2733 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2737 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2739 return IO_MEM_UNASSIGNED
;
2740 return p
->phys_offset
;
2743 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2746 kvm_coalesce_mmio_region(addr
, size
);
2749 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2752 kvm_uncoalesce_mmio_region(addr
, size
);
2755 void qemu_flush_coalesced_mmio_buffer(void)
2758 kvm_flush_coalesced_mmio_buffer();
2761 #if defined(__linux__) && !defined(TARGET_S390X)
2763 #include <sys/vfs.h>
2765 #define HUGETLBFS_MAGIC 0x958458f6
2767 static long gethugepagesize(const char *path
)
2773 ret
= statfs(path
, &fs
);
2774 } while (ret
!= 0 && errno
== EINTR
);
2781 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2782 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2787 static void *file_ram_alloc(RAMBlock
*block
,
2797 unsigned long hpagesize
;
2799 hpagesize
= gethugepagesize(path
);
2804 if (memory
< hpagesize
) {
2808 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2809 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2813 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2817 fd
= mkstemp(filename
);
2819 perror("unable to create backing store for hugepages");
2826 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2829 * ftruncate is not supported by hugetlbfs in older
2830 * hosts, so don't bother bailing out on errors.
2831 * If anything goes wrong with it under other filesystems,
2834 if (ftruncate(fd
, memory
))
2835 perror("ftruncate");
2838 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2839 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2840 * to sidestep this quirk.
2842 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2843 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2845 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2847 if (area
== MAP_FAILED
) {
2848 perror("file_ram_alloc: can't mmap RAM pages");
2857 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2859 RAMBlock
*block
, *next_block
;
2860 ram_addr_t offset
= 0, mingap
= ULONG_MAX
;
2862 if (QLIST_EMPTY(&ram_list
.blocks
))
2865 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2866 ram_addr_t end
, next
= ULONG_MAX
;
2868 end
= block
->offset
+ block
->length
;
2870 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2871 if (next_block
->offset
>= end
) {
2872 next
= MIN(next
, next_block
->offset
);
2875 if (next
- end
>= size
&& next
- end
< mingap
) {
2877 mingap
= next
- end
;
2883 static ram_addr_t
last_ram_offset(void)
2886 ram_addr_t last
= 0;
2888 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2889 last
= MAX(last
, block
->offset
+ block
->length
);
2894 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
2895 ram_addr_t size
, void *host
)
2897 RAMBlock
*new_block
, *block
;
2899 size
= TARGET_PAGE_ALIGN(size
);
2900 new_block
= qemu_mallocz(sizeof(*new_block
));
2902 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2903 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2905 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2909 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2911 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2912 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2913 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2920 new_block
->host
= host
;
2921 new_block
->flags
|= RAM_PREALLOC_MASK
;
2924 #if defined (__linux__) && !defined(TARGET_S390X)
2925 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2926 if (!new_block
->host
) {
2927 new_block
->host
= qemu_vmalloc(size
);
2928 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2931 fprintf(stderr
, "-mem-path option unsupported\n");
2935 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2936 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2937 new_block
->host
= mmap((void*)0x1000000, size
,
2938 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2939 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2941 new_block
->host
= qemu_vmalloc(size
);
2943 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2947 new_block
->offset
= find_ram_offset(size
);
2948 new_block
->length
= size
;
2950 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2952 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
2953 last_ram_offset() >> TARGET_PAGE_BITS
);
2954 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2955 0xff, size
>> TARGET_PAGE_BITS
);
2958 kvm_setup_guest_memory(new_block
->host
, size
);
2960 return new_block
->offset
;
2963 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
)
2965 return qemu_ram_alloc_from_ptr(dev
, name
, size
, NULL
);
2968 void qemu_ram_free(ram_addr_t addr
)
2972 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2973 if (addr
== block
->offset
) {
2974 QLIST_REMOVE(block
, next
);
2975 if (block
->flags
& RAM_PREALLOC_MASK
) {
2977 } else if (mem_path
) {
2978 #if defined (__linux__) && !defined(TARGET_S390X)
2980 munmap(block
->host
, block
->length
);
2983 qemu_vfree(block
->host
);
2989 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2990 munmap(block
->host
, block
->length
);
2992 qemu_vfree(block
->host
);
3003 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
3010 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3011 offset
= addr
- block
->offset
;
3012 if (offset
< block
->length
) {
3013 vaddr
= block
->host
+ offset
;
3014 if (block
->flags
& RAM_PREALLOC_MASK
) {
3018 munmap(vaddr
, length
);
3020 #if defined(__linux__) && !defined(TARGET_S390X)
3023 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
3026 flags
|= MAP_PRIVATE
;
3028 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3029 flags
, block
->fd
, offset
);
3031 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3032 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3039 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3040 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
3041 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
3044 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3045 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3049 if (area
!= vaddr
) {
3050 fprintf(stderr
, "Could not remap addr: %lx@%lx\n",
3054 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3060 #endif /* !_WIN32 */
3062 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3063 With the exception of the softmmu code in this file, this should
3064 only be used for local memory (e.g. video ram) that the device owns,
3065 and knows it isn't going to access beyond the end of the block.
3067 It should not be used for general purpose DMA.
3068 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3070 void *qemu_get_ram_ptr(ram_addr_t addr
)
3074 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3075 if (addr
- block
->offset
< block
->length
) {
3076 /* Move this entry to to start of the list. */
3077 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3078 QLIST_REMOVE(block
, next
);
3079 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3081 return block
->host
+ (addr
- block
->offset
);
3085 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3091 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3092 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3094 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3098 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3099 if (addr
- block
->offset
< block
->length
) {
3100 return block
->host
+ (addr
- block
->offset
);
3104 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3110 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3113 uint8_t *host
= ptr
;
3115 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3116 if (host
- block
->host
< block
->length
) {
3117 *ram_addr
= block
->offset
+ (host
- block
->host
);
3124 /* Some of the softmmu routines need to translate from a host pointer
3125 (typically a TLB entry) back to a ram offset. */
3126 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3128 ram_addr_t ram_addr
;
3130 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3131 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3137 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3139 #ifdef DEBUG_UNASSIGNED
3140 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3142 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3143 do_unassigned_access(addr
, 0, 0, 0, 1);
3148 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3150 #ifdef DEBUG_UNASSIGNED
3151 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3153 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3154 do_unassigned_access(addr
, 0, 0, 0, 2);
3159 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3161 #ifdef DEBUG_UNASSIGNED
3162 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3164 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3165 do_unassigned_access(addr
, 0, 0, 0, 4);
3170 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3172 #ifdef DEBUG_UNASSIGNED
3173 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3175 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3176 do_unassigned_access(addr
, 1, 0, 0, 1);
3180 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3182 #ifdef DEBUG_UNASSIGNED
3183 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3185 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3186 do_unassigned_access(addr
, 1, 0, 0, 2);
3190 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3192 #ifdef DEBUG_UNASSIGNED
3193 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3195 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3196 do_unassigned_access(addr
, 1, 0, 0, 4);
3200 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3201 unassigned_mem_readb
,
3202 unassigned_mem_readw
,
3203 unassigned_mem_readl
,
3206 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3207 unassigned_mem_writeb
,
3208 unassigned_mem_writew
,
3209 unassigned_mem_writel
,
3212 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3216 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3217 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3218 #if !defined(CONFIG_USER_ONLY)
3219 tb_invalidate_phys_page_fast(ram_addr
, 1);
3220 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3223 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3224 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3225 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3226 /* we remove the notdirty callback only if the code has been
3228 if (dirty_flags
== 0xff)
3229 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3232 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3236 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3237 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3238 #if !defined(CONFIG_USER_ONLY)
3239 tb_invalidate_phys_page_fast(ram_addr
, 2);
3240 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3243 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3244 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3245 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3246 /* we remove the notdirty callback only if the code has been
3248 if (dirty_flags
== 0xff)
3249 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3252 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3256 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3257 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3258 #if !defined(CONFIG_USER_ONLY)
3259 tb_invalidate_phys_page_fast(ram_addr
, 4);
3260 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3263 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3264 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3265 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3266 /* we remove the notdirty callback only if the code has been
3268 if (dirty_flags
== 0xff)
3269 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3272 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3273 NULL
, /* never used */
3274 NULL
, /* never used */
3275 NULL
, /* never used */
3278 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3279 notdirty_mem_writeb
,
3280 notdirty_mem_writew
,
3281 notdirty_mem_writel
,
3284 /* Generate a debug exception if a watchpoint has been hit. */
3285 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3287 CPUState
*env
= cpu_single_env
;
3288 target_ulong pc
, cs_base
;
3289 TranslationBlock
*tb
;
3294 if (env
->watchpoint_hit
) {
3295 /* We re-entered the check after replacing the TB. Now raise
3296 * the debug interrupt so that is will trigger after the
3297 * current instruction. */
3298 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3301 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3302 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3303 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3304 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3305 wp
->flags
|= BP_WATCHPOINT_HIT
;
3306 if (!env
->watchpoint_hit
) {
3307 env
->watchpoint_hit
= wp
;
3308 tb
= tb_find_pc(env
->mem_io_pc
);
3310 cpu_abort(env
, "check_watchpoint: could not find TB for "
3311 "pc=%p", (void *)env
->mem_io_pc
);
3313 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3314 tb_phys_invalidate(tb
, -1);
3315 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3316 env
->exception_index
= EXCP_DEBUG
;
3318 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3319 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3321 cpu_resume_from_signal(env
, NULL
);
3324 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3329 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3330 so these check for a hit then pass through to the normal out-of-line
3332 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3334 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3335 return ldub_phys(addr
);
3338 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3340 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3341 return lduw_phys(addr
);
3344 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3346 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3347 return ldl_phys(addr
);
3350 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3353 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3354 stb_phys(addr
, val
);
3357 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3360 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3361 stw_phys(addr
, val
);
3364 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3367 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3368 stl_phys(addr
, val
);
3371 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3377 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3383 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3384 target_phys_addr_t addr
,
3387 unsigned int idx
= SUBPAGE_IDX(addr
);
3388 #if defined(DEBUG_SUBPAGE)
3389 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3390 mmio
, len
, addr
, idx
);
3393 addr
+= mmio
->region_offset
[idx
];
3394 idx
= mmio
->sub_io_index
[idx
];
3395 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3398 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3399 uint32_t value
, unsigned int len
)
3401 unsigned int idx
= SUBPAGE_IDX(addr
);
3402 #if defined(DEBUG_SUBPAGE)
3403 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3404 __func__
, mmio
, len
, addr
, idx
, value
);
3407 addr
+= mmio
->region_offset
[idx
];
3408 idx
= mmio
->sub_io_index
[idx
];
3409 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3412 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3414 return subpage_readlen(opaque
, addr
, 0);
3417 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3420 subpage_writelen(opaque
, addr
, value
, 0);
3423 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3425 return subpage_readlen(opaque
, addr
, 1);
3428 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3431 subpage_writelen(opaque
, addr
, value
, 1);
3434 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3436 return subpage_readlen(opaque
, addr
, 2);
3439 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3442 subpage_writelen(opaque
, addr
, value
, 2);
3445 static CPUReadMemoryFunc
* const subpage_read
[] = {
3451 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3457 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3458 ram_addr_t memory
, ram_addr_t region_offset
)
3462 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3464 idx
= SUBPAGE_IDX(start
);
3465 eidx
= SUBPAGE_IDX(end
);
3466 #if defined(DEBUG_SUBPAGE)
3467 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3468 mmio
, start
, end
, idx
, eidx
, memory
);
3470 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
3471 memory
= IO_MEM_UNASSIGNED
;
3472 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3473 for (; idx
<= eidx
; idx
++) {
3474 mmio
->sub_io_index
[idx
] = memory
;
3475 mmio
->region_offset
[idx
] = region_offset
;
3481 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3482 ram_addr_t orig_memory
,
3483 ram_addr_t region_offset
)
3488 mmio
= qemu_mallocz(sizeof(subpage_t
));
3491 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
,
3492 DEVICE_NATIVE_ENDIAN
);
3493 #if defined(DEBUG_SUBPAGE)
3494 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3495 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3497 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3498 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3503 static int get_free_io_mem_idx(void)
3507 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3508 if (!io_mem_used
[i
]) {
3512 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3517 * Usually, devices operate in little endian mode. There are devices out
3518 * there that operate in big endian too. Each device gets byte swapped
3519 * mmio if plugged onto a CPU that does the other endianness.
3529 typedef struct SwapEndianContainer
{
3530 CPUReadMemoryFunc
*read
[3];
3531 CPUWriteMemoryFunc
*write
[3];
3533 } SwapEndianContainer
;
3535 static uint32_t swapendian_mem_readb (void *opaque
, target_phys_addr_t addr
)
3538 SwapEndianContainer
*c
= opaque
;
3539 val
= c
->read
[0](c
->opaque
, addr
);
3543 static uint32_t swapendian_mem_readw(void *opaque
, target_phys_addr_t addr
)
3546 SwapEndianContainer
*c
= opaque
;
3547 val
= bswap16(c
->read
[1](c
->opaque
, addr
));
3551 static uint32_t swapendian_mem_readl(void *opaque
, target_phys_addr_t addr
)
3554 SwapEndianContainer
*c
= opaque
;
3555 val
= bswap32(c
->read
[2](c
->opaque
, addr
));
3559 static CPUReadMemoryFunc
* const swapendian_readfn
[3]={
3560 swapendian_mem_readb
,
3561 swapendian_mem_readw
,
3562 swapendian_mem_readl
3565 static void swapendian_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3568 SwapEndianContainer
*c
= opaque
;
3569 c
->write
[0](c
->opaque
, addr
, val
);
3572 static void swapendian_mem_writew(void *opaque
, target_phys_addr_t addr
,
3575 SwapEndianContainer
*c
= opaque
;
3576 c
->write
[1](c
->opaque
, addr
, bswap16(val
));
3579 static void swapendian_mem_writel(void *opaque
, target_phys_addr_t addr
,
3582 SwapEndianContainer
*c
= opaque
;
3583 c
->write
[2](c
->opaque
, addr
, bswap32(val
));
3586 static CPUWriteMemoryFunc
* const swapendian_writefn
[3]={
3587 swapendian_mem_writeb
,
3588 swapendian_mem_writew
,
3589 swapendian_mem_writel
3592 static void swapendian_init(int io_index
)
3594 SwapEndianContainer
*c
= qemu_malloc(sizeof(SwapEndianContainer
));
3597 /* Swap mmio for big endian targets */
3598 c
->opaque
= io_mem_opaque
[io_index
];
3599 for (i
= 0; i
< 3; i
++) {
3600 c
->read
[i
] = io_mem_read
[io_index
][i
];
3601 c
->write
[i
] = io_mem_write
[io_index
][i
];
3603 io_mem_read
[io_index
][i
] = swapendian_readfn
[i
];
3604 io_mem_write
[io_index
][i
] = swapendian_writefn
[i
];
3606 io_mem_opaque
[io_index
] = c
;
3609 static void swapendian_del(int io_index
)
3611 if (io_mem_read
[io_index
][0] == swapendian_readfn
[0]) {
3612 qemu_free(io_mem_opaque
[io_index
]);
3616 /* mem_read and mem_write are arrays of functions containing the
3617 function to access byte (index 0), word (index 1) and dword (index
3618 2). Functions can be omitted with a NULL function pointer.
3619 If io_index is non zero, the corresponding io zone is
3620 modified. If it is zero, a new io zone is allocated. The return
3621 value can be used with cpu_register_physical_memory(). (-1) is
3622 returned if error. */
3623 static int cpu_register_io_memory_fixed(int io_index
,
3624 CPUReadMemoryFunc
* const *mem_read
,
3625 CPUWriteMemoryFunc
* const *mem_write
,
3626 void *opaque
, enum device_endian endian
)
3630 if (io_index
<= 0) {
3631 io_index
= get_free_io_mem_idx();
3635 io_index
>>= IO_MEM_SHIFT
;
3636 if (io_index
>= IO_MEM_NB_ENTRIES
)
3640 for (i
= 0; i
< 3; ++i
) {
3641 io_mem_read
[io_index
][i
]
3642 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3644 for (i
= 0; i
< 3; ++i
) {
3645 io_mem_write
[io_index
][i
]
3646 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3648 io_mem_opaque
[io_index
] = opaque
;
3651 case DEVICE_BIG_ENDIAN
:
3652 #ifndef TARGET_WORDS_BIGENDIAN
3653 swapendian_init(io_index
);
3656 case DEVICE_LITTLE_ENDIAN
:
3657 #ifdef TARGET_WORDS_BIGENDIAN
3658 swapendian_init(io_index
);
3661 case DEVICE_NATIVE_ENDIAN
:
3666 return (io_index
<< IO_MEM_SHIFT
);
3669 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3670 CPUWriteMemoryFunc
* const *mem_write
,
3671 void *opaque
, enum device_endian endian
)
3673 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
, endian
);
3676 void cpu_unregister_io_memory(int io_table_address
)
3679 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3681 swapendian_del(io_index
);
3683 for (i
=0;i
< 3; i
++) {
3684 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3685 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3687 io_mem_opaque
[io_index
] = NULL
;
3688 io_mem_used
[io_index
] = 0;
3691 static void io_mem_init(void)
3695 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3696 unassigned_mem_write
, NULL
,
3697 DEVICE_NATIVE_ENDIAN
);
3698 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3699 unassigned_mem_write
, NULL
,
3700 DEVICE_NATIVE_ENDIAN
);
3701 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3702 notdirty_mem_write
, NULL
,
3703 DEVICE_NATIVE_ENDIAN
);
3707 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3708 watch_mem_write
, NULL
,
3709 DEVICE_NATIVE_ENDIAN
);
3712 #endif /* !defined(CONFIG_USER_ONLY) */
3714 /* physical memory access (slow version, mainly for debug) */
3715 #if defined(CONFIG_USER_ONLY)
3716 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3717 uint8_t *buf
, int len
, int is_write
)
3724 page
= addr
& TARGET_PAGE_MASK
;
3725 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3728 flags
= page_get_flags(page
);
3729 if (!(flags
& PAGE_VALID
))
3732 if (!(flags
& PAGE_WRITE
))
3734 /* XXX: this code should not depend on lock_user */
3735 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3738 unlock_user(p
, addr
, l
);
3740 if (!(flags
& PAGE_READ
))
3742 /* XXX: this code should not depend on lock_user */
3743 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3746 unlock_user(p
, addr
, 0);
3756 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3757 int len
, int is_write
)
3762 target_phys_addr_t page
;
3767 page
= addr
& TARGET_PAGE_MASK
;
3768 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3771 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3773 pd
= IO_MEM_UNASSIGNED
;
3775 pd
= p
->phys_offset
;
3779 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3780 target_phys_addr_t addr1
= addr
;
3781 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3783 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3784 /* XXX: could force cpu_single_env to NULL to avoid
3786 if (l
>= 4 && ((addr1
& 3) == 0)) {
3787 /* 32 bit write access */
3789 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3791 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3792 /* 16 bit write access */
3794 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3797 /* 8 bit write access */
3799 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3803 unsigned long addr1
;
3804 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3806 ptr
= qemu_get_ram_ptr(addr1
);
3807 memcpy(ptr
, buf
, l
);
3808 if (!cpu_physical_memory_is_dirty(addr1
)) {
3809 /* invalidate code */
3810 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3812 cpu_physical_memory_set_dirty_flags(
3813 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3817 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3818 !(pd
& IO_MEM_ROMD
)) {
3819 target_phys_addr_t addr1
= addr
;
3821 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3823 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3824 if (l
>= 4 && ((addr1
& 3) == 0)) {
3825 /* 32 bit read access */
3826 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3829 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3830 /* 16 bit read access */
3831 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3835 /* 8 bit read access */
3836 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3842 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3843 (addr
& ~TARGET_PAGE_MASK
);
3844 memcpy(buf
, ptr
, l
);
3853 /* used for ROM loading : can write in RAM and ROM */
3854 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3855 const uint8_t *buf
, int len
)
3859 target_phys_addr_t page
;
3864 page
= addr
& TARGET_PAGE_MASK
;
3865 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3868 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3870 pd
= IO_MEM_UNASSIGNED
;
3872 pd
= p
->phys_offset
;
3875 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3876 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3877 !(pd
& IO_MEM_ROMD
)) {
3880 unsigned long addr1
;
3881 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3883 ptr
= qemu_get_ram_ptr(addr1
);
3884 memcpy(ptr
, buf
, l
);
3894 target_phys_addr_t addr
;
3895 target_phys_addr_t len
;
3898 static BounceBuffer bounce
;
3900 typedef struct MapClient
{
3902 void (*callback
)(void *opaque
);
3903 QLIST_ENTRY(MapClient
) link
;
3906 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3907 = QLIST_HEAD_INITIALIZER(map_client_list
);
3909 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3911 MapClient
*client
= qemu_malloc(sizeof(*client
));
3913 client
->opaque
= opaque
;
3914 client
->callback
= callback
;
3915 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3919 void cpu_unregister_map_client(void *_client
)
3921 MapClient
*client
= (MapClient
*)_client
;
3923 QLIST_REMOVE(client
, link
);
3927 static void cpu_notify_map_clients(void)
3931 while (!QLIST_EMPTY(&map_client_list
)) {
3932 client
= QLIST_FIRST(&map_client_list
);
3933 client
->callback(client
->opaque
);
3934 cpu_unregister_map_client(client
);
3938 /* Map a physical memory region into a host virtual address.
3939 * May map a subset of the requested range, given by and returned in *plen.
3940 * May return NULL if resources needed to perform the mapping are exhausted.
3941 * Use only for reads OR writes - not for read-modify-write operations.
3942 * Use cpu_register_map_client() to know when retrying the map operation is
3943 * likely to succeed.
3945 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3946 target_phys_addr_t
*plen
,
3949 target_phys_addr_t len
= *plen
;
3950 target_phys_addr_t done
= 0;
3952 uint8_t *ret
= NULL
;
3954 target_phys_addr_t page
;
3957 unsigned long addr1
;
3960 page
= addr
& TARGET_PAGE_MASK
;
3961 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3964 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3966 pd
= IO_MEM_UNASSIGNED
;
3968 pd
= p
->phys_offset
;
3971 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3972 if (done
|| bounce
.buffer
) {
3975 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3979 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3981 ptr
= bounce
.buffer
;
3983 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3984 ptr
= qemu_get_ram_ptr(addr1
);
3988 } else if (ret
+ done
!= ptr
) {
4000 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4001 * Will also mark the memory as dirty if is_write == 1. access_len gives
4002 * the amount of memory that was actually read or written by the caller.
4004 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
4005 int is_write
, target_phys_addr_t access_len
)
4007 if (buffer
!= bounce
.buffer
) {
4009 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
4010 while (access_len
) {
4012 l
= TARGET_PAGE_SIZE
;
4015 if (!cpu_physical_memory_is_dirty(addr1
)) {
4016 /* invalidate code */
4017 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
4019 cpu_physical_memory_set_dirty_flags(
4020 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4029 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
4031 qemu_vfree(bounce
.buffer
);
4032 bounce
.buffer
= NULL
;
4033 cpu_notify_map_clients();
4036 /* warning: addr must be aligned */
4037 uint32_t ldl_phys(target_phys_addr_t addr
)
4045 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4047 pd
= IO_MEM_UNASSIGNED
;
4049 pd
= p
->phys_offset
;
4052 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4053 !(pd
& IO_MEM_ROMD
)) {
4055 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4057 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4058 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4061 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4062 (addr
& ~TARGET_PAGE_MASK
);
4068 /* warning: addr must be aligned */
4069 uint64_t ldq_phys(target_phys_addr_t addr
)
4077 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4079 pd
= IO_MEM_UNASSIGNED
;
4081 pd
= p
->phys_offset
;
4084 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4085 !(pd
& IO_MEM_ROMD
)) {
4087 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4089 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4090 #ifdef TARGET_WORDS_BIGENDIAN
4091 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4092 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4094 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4095 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4099 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4100 (addr
& ~TARGET_PAGE_MASK
);
4107 uint32_t ldub_phys(target_phys_addr_t addr
)
4110 cpu_physical_memory_read(addr
, &val
, 1);
4114 /* warning: addr must be aligned */
4115 uint32_t lduw_phys(target_phys_addr_t addr
)
4123 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4125 pd
= IO_MEM_UNASSIGNED
;
4127 pd
= p
->phys_offset
;
4130 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4131 !(pd
& IO_MEM_ROMD
)) {
4133 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4135 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4136 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4139 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4140 (addr
& ~TARGET_PAGE_MASK
);
4146 /* warning: addr must be aligned. The ram page is not masked as dirty
4147 and the code inside is not invalidated. It is useful if the dirty
4148 bits are used to track modified PTEs */
4149 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4156 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4158 pd
= IO_MEM_UNASSIGNED
;
4160 pd
= p
->phys_offset
;
4163 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4164 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4166 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4167 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4169 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4170 ptr
= qemu_get_ram_ptr(addr1
);
4173 if (unlikely(in_migration
)) {
4174 if (!cpu_physical_memory_is_dirty(addr1
)) {
4175 /* invalidate code */
4176 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4178 cpu_physical_memory_set_dirty_flags(
4179 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4185 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4192 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4194 pd
= IO_MEM_UNASSIGNED
;
4196 pd
= p
->phys_offset
;
4199 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4200 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4202 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4203 #ifdef TARGET_WORDS_BIGENDIAN
4204 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4205 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4207 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4208 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4211 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4212 (addr
& ~TARGET_PAGE_MASK
);
4217 /* warning: addr must be aligned */
4218 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4225 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4227 pd
= IO_MEM_UNASSIGNED
;
4229 pd
= p
->phys_offset
;
4232 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4233 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4235 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4236 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4238 unsigned long addr1
;
4239 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4241 ptr
= qemu_get_ram_ptr(addr1
);
4243 if (!cpu_physical_memory_is_dirty(addr1
)) {
4244 /* invalidate code */
4245 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4247 cpu_physical_memory_set_dirty_flags(addr1
,
4248 (0xff & ~CODE_DIRTY_FLAG
));
4254 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4257 cpu_physical_memory_write(addr
, &v
, 1);
4260 /* warning: addr must be aligned */
4261 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4268 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4270 pd
= IO_MEM_UNASSIGNED
;
4272 pd
= p
->phys_offset
;
4275 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4276 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4278 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4279 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4281 unsigned long addr1
;
4282 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4284 ptr
= qemu_get_ram_ptr(addr1
);
4286 if (!cpu_physical_memory_is_dirty(addr1
)) {
4287 /* invalidate code */
4288 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4290 cpu_physical_memory_set_dirty_flags(addr1
,
4291 (0xff & ~CODE_DIRTY_FLAG
));
4297 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4300 cpu_physical_memory_write(addr
, &val
, 8);
4303 /* virtual memory access for debug (includes writing to ROM) */
4304 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4305 uint8_t *buf
, int len
, int is_write
)
4308 target_phys_addr_t phys_addr
;
4312 page
= addr
& TARGET_PAGE_MASK
;
4313 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4314 /* if no physical page mapped, return an error */
4315 if (phys_addr
== -1)
4317 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4320 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4322 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4324 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4333 /* in deterministic execution mode, instructions doing device I/Os
4334 must be at the end of the TB */
4335 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4337 TranslationBlock
*tb
;
4339 target_ulong pc
, cs_base
;
4342 tb
= tb_find_pc((unsigned long)retaddr
);
4344 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4347 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4348 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4349 /* Calculate how many instructions had been executed before the fault
4351 n
= n
- env
->icount_decr
.u16
.low
;
4352 /* Generate a new TB ending on the I/O insn. */
4354 /* On MIPS and SH, delay slot instructions can only be restarted if
4355 they were already the first instruction in the TB. If this is not
4356 the first instruction in a TB then re-execute the preceding
4358 #if defined(TARGET_MIPS)
4359 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4360 env
->active_tc
.PC
-= 4;
4361 env
->icount_decr
.u16
.low
++;
4362 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4364 #elif defined(TARGET_SH4)
4365 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4368 env
->icount_decr
.u16
.low
++;
4369 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4372 /* This should never happen. */
4373 if (n
> CF_COUNT_MASK
)
4374 cpu_abort(env
, "TB too big during recompile");
4376 cflags
= n
| CF_LAST_IO
;
4378 cs_base
= tb
->cs_base
;
4380 tb_phys_invalidate(tb
, -1);
4381 /* FIXME: In theory this could raise an exception. In practice
4382 we have already translated the block once so it's probably ok. */
4383 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4384 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4385 the first in the TB) then we end up generating a whole new TB and
4386 repeating the fault, which is horribly inefficient.
4387 Better would be to execute just this insn uncached, or generate a
4389 cpu_resume_from_signal(env
, NULL
);
4392 #if !defined(CONFIG_USER_ONLY)
4394 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4396 int i
, target_code_size
, max_target_code_size
;
4397 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4398 TranslationBlock
*tb
;
4400 target_code_size
= 0;
4401 max_target_code_size
= 0;
4403 direct_jmp_count
= 0;
4404 direct_jmp2_count
= 0;
4405 for(i
= 0; i
< nb_tbs
; i
++) {
4407 target_code_size
+= tb
->size
;
4408 if (tb
->size
> max_target_code_size
)
4409 max_target_code_size
= tb
->size
;
4410 if (tb
->page_addr
[1] != -1)
4412 if (tb
->tb_next_offset
[0] != 0xffff) {
4414 if (tb
->tb_next_offset
[1] != 0xffff) {
4415 direct_jmp2_count
++;
4419 /* XXX: avoid using doubles ? */
4420 cpu_fprintf(f
, "Translation buffer state:\n");
4421 cpu_fprintf(f
, "gen code size %td/%ld\n",
4422 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4423 cpu_fprintf(f
, "TB count %d/%d\n",
4424 nb_tbs
, code_gen_max_blocks
);
4425 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4426 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4427 max_target_code_size
);
4428 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4429 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4430 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4431 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4433 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4434 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4436 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4438 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4439 cpu_fprintf(f
, "\nStatistics:\n");
4440 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4441 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4442 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4443 tcg_dump_info(f
, cpu_fprintf
);
4446 #define MMUSUFFIX _cmmu
4447 #define GETPC() NULL
4448 #define env cpu_single_env
4449 #define SOFTMMU_CODE_ACCESS
4452 #include "softmmu_template.h"
4455 #include "softmmu_template.h"
4458 #include "softmmu_template.h"
4461 #include "softmmu_template.h"