2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
36 #if defined(CONFIG_USER_ONLY)
39 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40 #include <sys/param.h>
41 #if __FreeBSD_version >= 700104
42 #define HAVE_KINFO_GETVMMAP
43 #define sigqueue sigqueue_freebsd /* avoid redefinition */
46 #include <machine/profile.h>
56 //#define DEBUG_TB_INVALIDATE
59 //#define DEBUG_UNASSIGNED
61 /* make various TB consistency checks */
62 //#define DEBUG_TB_CHECK
63 //#define DEBUG_TLB_CHECK
65 //#define DEBUG_IOPORT
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
73 #define SMC_BITMAP_USE_THRESHOLD 10
75 static TranslationBlock
*tbs
;
76 static int code_gen_max_blocks
;
77 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
79 /* any access to the tbs or the page table must use this lock */
80 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
82 #if defined(__arm__) || defined(__sparc_v9__)
83 /* The prologue must be reachable with a direct jump. ARM and Sparc64
84 have limited branch ranges (possibly also PPC) so place it in a
85 section close to code segment. */
86 #define code_gen_section \
87 __attribute__((__section__(".gen_code"))) \
88 __attribute__((aligned (32)))
90 /* Maximum alignment for Win32 is 16. */
91 #define code_gen_section \
92 __attribute__((aligned (16)))
94 #define code_gen_section \
95 __attribute__((aligned (32)))
98 uint8_t code_gen_prologue
[1024] code_gen_section
;
99 static uint8_t *code_gen_buffer
;
100 static unsigned long code_gen_buffer_size
;
101 /* threshold to flush the translated code buffer */
102 static unsigned long code_gen_buffer_max_size
;
103 static uint8_t *code_gen_ptr
;
105 #if !defined(CONFIG_USER_ONLY)
107 static int in_migration
;
109 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
) };
113 /* current CPU in the current thread. It is only valid inside
115 CPUState
*cpu_single_env
;
116 /* 0 = Do not count executed instructions.
117 1 = Precise instruction counting.
118 2 = Adaptive rate instruction counting. */
120 /* Current instruction counter. While executing translated code this may
121 include some instructions that have not yet been executed. */
124 typedef struct PageDesc
{
125 /* list of TBs intersecting this ram page */
126 TranslationBlock
*first_tb
;
127 /* in order to optimize self modifying code, we count the number
128 of lookups we do to a given page to use a bitmap */
129 unsigned int code_write_count
;
130 uint8_t *code_bitmap
;
131 #if defined(CONFIG_USER_ONLY)
136 /* In system mode we want L1_MAP to be based on ram offsets,
137 while in user mode we want it to be based on virtual addresses. */
138 #if !defined(CONFIG_USER_ONLY)
139 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
142 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
145 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
148 /* Size of the L2 (and L3, etc) page tables. */
150 #define L2_SIZE (1 << L2_BITS)
152 /* The bits remaining after N lower levels of page tables. */
153 #define P_L1_BITS_REM \
154 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155 #define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
158 /* Size of the L1 page table. Avoid silly small sizes. */
159 #if P_L1_BITS_REM < 4
160 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
162 #define P_L1_BITS P_L1_BITS_REM
165 #if V_L1_BITS_REM < 4
166 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
168 #define V_L1_BITS V_L1_BITS_REM
171 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
172 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
174 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
177 unsigned long qemu_real_host_page_size
;
178 unsigned long qemu_host_page_bits
;
179 unsigned long qemu_host_page_size
;
180 unsigned long qemu_host_page_mask
;
182 /* This is a multi-level map on the virtual address space.
183 The bottom level has pointers to PageDesc. */
184 static void *l1_map
[V_L1_SIZE
];
186 #if !defined(CONFIG_USER_ONLY)
187 typedef struct PhysPageDesc
{
188 /* offset in host memory of the page + io_index in the low bits */
189 ram_addr_t phys_offset
;
190 ram_addr_t region_offset
;
193 /* This is a multi-level map on the physical address space.
194 The bottom level has pointers to PhysPageDesc. */
195 static void *l1_phys_map
[P_L1_SIZE
];
197 static void io_mem_init(void);
199 /* io memory support */
200 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
201 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
202 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
203 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
204 static int io_mem_watch
;
209 static const char *logfilename
= "qemu.log";
211 static const char *logfilename
= "/tmp/qemu.log";
215 static int log_append
= 0;
218 #if !defined(CONFIG_USER_ONLY)
219 static int tlb_flush_count
;
221 static int tb_flush_count
;
222 static int tb_phys_invalidate_count
;
225 static void map_exec(void *addr
, long size
)
228 VirtualProtect(addr
, size
,
229 PAGE_EXECUTE_READWRITE
, &old_protect
);
233 static void map_exec(void *addr
, long size
)
235 unsigned long start
, end
, page_size
;
237 page_size
= getpagesize();
238 start
= (unsigned long)addr
;
239 start
&= ~(page_size
- 1);
241 end
= (unsigned long)addr
+ size
;
242 end
+= page_size
- 1;
243 end
&= ~(page_size
- 1);
245 mprotect((void *)start
, end
- start
,
246 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
250 static void page_init(void)
252 /* NOTE: we can always suppose that qemu_host_page_size >=
256 SYSTEM_INFO system_info
;
258 GetSystemInfo(&system_info
);
259 qemu_real_host_page_size
= system_info
.dwPageSize
;
262 qemu_real_host_page_size
= getpagesize();
264 if (qemu_host_page_size
== 0)
265 qemu_host_page_size
= qemu_real_host_page_size
;
266 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
267 qemu_host_page_size
= TARGET_PAGE_SIZE
;
268 qemu_host_page_bits
= 0;
269 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
270 qemu_host_page_bits
++;
271 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
273 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
275 #ifdef HAVE_KINFO_GETVMMAP
276 struct kinfo_vmentry
*freep
;
279 freep
= kinfo_getvmmap(getpid(), &cnt
);
282 for (i
= 0; i
< cnt
; i
++) {
283 unsigned long startaddr
, endaddr
;
285 startaddr
= freep
[i
].kve_start
;
286 endaddr
= freep
[i
].kve_end
;
287 if (h2g_valid(startaddr
)) {
288 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
290 if (h2g_valid(endaddr
)) {
291 endaddr
= h2g(endaddr
);
292 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
294 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
296 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
307 last_brk
= (unsigned long)sbrk(0);
309 f
= fopen("/compat/linux/proc/self/maps", "r");
314 unsigned long startaddr
, endaddr
;
317 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
319 if (n
== 2 && h2g_valid(startaddr
)) {
320 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
322 if (h2g_valid(endaddr
)) {
323 endaddr
= h2g(endaddr
);
327 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
339 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
345 #if defined(CONFIG_USER_ONLY)
346 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
347 # define ALLOC(P, SIZE) \
349 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
350 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
353 # define ALLOC(P, SIZE) \
354 do { P = qemu_mallocz(SIZE); } while (0)
357 /* Level 1. Always allocated. */
358 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
361 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
368 ALLOC(p
, sizeof(void *) * L2_SIZE
);
372 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
380 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
386 return pd
+ (index
& (L2_SIZE
- 1));
389 static inline PageDesc
*page_find(tb_page_addr_t index
)
391 return page_find_alloc(index
, 0);
394 #if !defined(CONFIG_USER_ONLY)
395 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
401 /* Level 1. Always allocated. */
402 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
405 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
411 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
413 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
424 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
426 for (i
= 0; i
< L2_SIZE
; i
++) {
427 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
428 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
432 return pd
+ (index
& (L2_SIZE
- 1));
435 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
437 return phys_page_find_alloc(index
, 0);
440 static void tlb_protect_code(ram_addr_t ram_addr
);
441 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
443 #define mmap_lock() do { } while(0)
444 #define mmap_unlock() do { } while(0)
447 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
449 #if defined(CONFIG_USER_ONLY)
450 /* Currently it is not recommended to allocate big chunks of data in
451 user mode. It will change when a dedicated libc will be used */
452 #define USE_STATIC_CODE_GEN_BUFFER
455 #ifdef USE_STATIC_CODE_GEN_BUFFER
456 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
457 __attribute__((aligned (CODE_GEN_ALIGN
)));
460 static void code_gen_alloc(unsigned long tb_size
)
462 #ifdef USE_STATIC_CODE_GEN_BUFFER
463 code_gen_buffer
= static_code_gen_buffer
;
464 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
465 map_exec(code_gen_buffer
, code_gen_buffer_size
);
467 code_gen_buffer_size
= tb_size
;
468 if (code_gen_buffer_size
== 0) {
469 #if defined(CONFIG_USER_ONLY)
470 /* in user mode, phys_ram_size is not meaningful */
471 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
473 /* XXX: needs adjustments */
474 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
477 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
478 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
479 /* The code gen buffer location may have constraints depending on
480 the host cpu and OS */
481 #if defined(__linux__)
486 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
487 #if defined(__x86_64__)
489 /* Cannot map more than that */
490 if (code_gen_buffer_size
> (800 * 1024 * 1024))
491 code_gen_buffer_size
= (800 * 1024 * 1024);
492 #elif defined(__sparc_v9__)
493 // Map the buffer below 2G, so we can use direct calls and branches
495 start
= (void *) 0x60000000UL
;
496 if (code_gen_buffer_size
> (512 * 1024 * 1024))
497 code_gen_buffer_size
= (512 * 1024 * 1024);
498 #elif defined(__arm__)
499 /* Map the buffer below 32M, so we can use direct calls and branches */
501 start
= (void *) 0x01000000UL
;
502 if (code_gen_buffer_size
> 16 * 1024 * 1024)
503 code_gen_buffer_size
= 16 * 1024 * 1024;
504 #elif defined(__s390x__)
505 /* Map the buffer so that we can use direct calls and branches. */
506 /* We have a +- 4GB range on the branches; leave some slop. */
507 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
508 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
510 start
= (void *)0x90000000UL
;
512 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
513 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
515 if (code_gen_buffer
== MAP_FAILED
) {
516 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
520 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 || defined(__DragonFly__) || defined(__OpenBSD__)
525 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
526 #if defined(__x86_64__)
527 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 * 0x40000000 is free */
530 addr
= (void *)0x40000000;
531 /* Cannot map more than that */
532 if (code_gen_buffer_size
> (800 * 1024 * 1024))
533 code_gen_buffer_size
= (800 * 1024 * 1024);
534 #elif defined(__sparc_v9__)
535 // Map the buffer below 2G, so we can use direct calls and branches
537 addr
= (void *) 0x60000000UL
;
538 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
539 code_gen_buffer_size
= (512 * 1024 * 1024);
542 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
543 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
545 if (code_gen_buffer
== MAP_FAILED
) {
546 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
551 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
552 map_exec(code_gen_buffer
, code_gen_buffer_size
);
554 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
555 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
556 code_gen_buffer_max_size
= code_gen_buffer_size
-
557 (TCG_MAX_OP_SIZE
* OPC_MAX_SIZE
);
558 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
559 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
562 /* Must be called before using the QEMU cpus. 'tb_size' is the size
563 (in bytes) allocated to the translation buffer. Zero means default
565 void cpu_exec_init_all(unsigned long tb_size
)
568 code_gen_alloc(tb_size
);
569 code_gen_ptr
= code_gen_buffer
;
571 #if !defined(CONFIG_USER_ONLY)
574 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 /* There's no guest base to take into account, so go ahead and
576 initialize the prologue now. */
577 tcg_prologue_init(&tcg_ctx
);
581 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
583 static int cpu_common_post_load(void *opaque
, int version_id
)
585 CPUState
*env
= opaque
;
587 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588 version_id is increased. */
589 env
->interrupt_request
&= ~0x01;
595 static const VMStateDescription vmstate_cpu_common
= {
596 .name
= "cpu_common",
598 .minimum_version_id
= 1,
599 .minimum_version_id_old
= 1,
600 .post_load
= cpu_common_post_load
,
601 .fields
= (VMStateField
[]) {
602 VMSTATE_UINT32(halted
, CPUState
),
603 VMSTATE_UINT32(interrupt_request
, CPUState
),
604 VMSTATE_END_OF_LIST()
609 CPUState
*qemu_get_cpu(int cpu
)
611 CPUState
*env
= first_cpu
;
614 if (env
->cpu_index
== cpu
)
622 void cpu_exec_init(CPUState
*env
)
627 #if defined(CONFIG_USER_ONLY)
630 env
->next_cpu
= NULL
;
633 while (*penv
!= NULL
) {
634 penv
= &(*penv
)->next_cpu
;
637 env
->cpu_index
= cpu_index
;
639 QTAILQ_INIT(&env
->breakpoints
);
640 QTAILQ_INIT(&env
->watchpoints
);
641 #ifndef CONFIG_USER_ONLY
642 env
->thread_id
= qemu_get_thread_id();
645 #if defined(CONFIG_USER_ONLY)
648 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
649 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
650 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
651 cpu_save
, cpu_load
, env
);
655 /* Allocate a new translation block. Flush the translation buffer if
656 too many translation blocks or too much generated code. */
657 static TranslationBlock
*tb_alloc(target_ulong pc
)
659 TranslationBlock
*tb
;
661 if (nb_tbs
>= code_gen_max_blocks
||
662 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
670 void tb_free(TranslationBlock
*tb
)
672 /* In practice this is mostly used for single use temporary TB
673 Ignore the hard cases and just back up if this TB happens to
674 be the last one generated. */
675 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
676 code_gen_ptr
= tb
->tc_ptr
;
681 static inline void invalidate_page_bitmap(PageDesc
*p
)
683 if (p
->code_bitmap
) {
684 qemu_free(p
->code_bitmap
);
685 p
->code_bitmap
= NULL
;
687 p
->code_write_count
= 0;
690 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
692 static void page_flush_tb_1 (int level
, void **lp
)
701 for (i
= 0; i
< L2_SIZE
; ++i
) {
702 pd
[i
].first_tb
= NULL
;
703 invalidate_page_bitmap(pd
+ i
);
707 for (i
= 0; i
< L2_SIZE
; ++i
) {
708 page_flush_tb_1 (level
- 1, pp
+ i
);
713 static void page_flush_tb(void)
716 for (i
= 0; i
< V_L1_SIZE
; i
++) {
717 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
721 /* flush all the translation blocks */
722 /* XXX: tb_flush is currently not thread safe */
723 void tb_flush(CPUState
*env1
)
726 #if defined(DEBUG_FLUSH)
727 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
728 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
730 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
732 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
733 cpu_abort(env1
, "Internal error: code buffer overflow\n");
737 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
738 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
741 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
744 code_gen_ptr
= code_gen_buffer
;
745 /* XXX: flush processor icache at this point if cache flush is
750 #ifdef DEBUG_TB_CHECK
752 static void tb_invalidate_check(target_ulong address
)
754 TranslationBlock
*tb
;
756 address
&= TARGET_PAGE_MASK
;
757 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
758 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
759 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
760 address
>= tb
->pc
+ tb
->size
)) {
761 printf("ERROR invalidate: address=" TARGET_FMT_lx
762 " PC=%08lx size=%04x\n",
763 address
, (long)tb
->pc
, tb
->size
);
769 /* verify that all the pages have correct rights for code */
770 static void tb_page_check(void)
772 TranslationBlock
*tb
;
773 int i
, flags1
, flags2
;
775 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
776 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
777 flags1
= page_get_flags(tb
->pc
);
778 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
779 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
780 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
781 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
789 /* invalidate one TB */
790 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
793 TranslationBlock
*tb1
;
797 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
800 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
804 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
806 TranslationBlock
*tb1
;
812 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
814 *ptb
= tb1
->page_next
[n1
];
817 ptb
= &tb1
->page_next
[n1
];
821 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
823 TranslationBlock
*tb1
, **ptb
;
826 ptb
= &tb
->jmp_next
[n
];
829 /* find tb(n) in circular list */
833 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
834 if (n1
== n
&& tb1
== tb
)
837 ptb
= &tb1
->jmp_first
;
839 ptb
= &tb1
->jmp_next
[n1
];
842 /* now we can suppress tb(n) from the list */
843 *ptb
= tb
->jmp_next
[n
];
845 tb
->jmp_next
[n
] = NULL
;
849 /* reset the jump entry 'n' of a TB so that it is not chained to
851 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
853 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
856 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
861 tb_page_addr_t phys_pc
;
862 TranslationBlock
*tb1
, *tb2
;
864 /* remove the TB from the hash list */
865 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
866 h
= tb_phys_hash_func(phys_pc
);
867 tb_remove(&tb_phys_hash
[h
], tb
,
868 offsetof(TranslationBlock
, phys_hash_next
));
870 /* remove the TB from the page list */
871 if (tb
->page_addr
[0] != page_addr
) {
872 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
873 tb_page_remove(&p
->first_tb
, tb
);
874 invalidate_page_bitmap(p
);
876 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
877 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
878 tb_page_remove(&p
->first_tb
, tb
);
879 invalidate_page_bitmap(p
);
882 tb_invalidated_flag
= 1;
884 /* remove the TB from the hash list */
885 h
= tb_jmp_cache_hash_func(tb
->pc
);
886 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
887 if (env
->tb_jmp_cache
[h
] == tb
)
888 env
->tb_jmp_cache
[h
] = NULL
;
891 /* suppress this TB from the two jump lists */
892 tb_jmp_remove(tb
, 0);
893 tb_jmp_remove(tb
, 1);
895 /* suppress any remaining jumps to this TB */
901 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
902 tb2
= tb1
->jmp_next
[n1
];
903 tb_reset_jump(tb1
, n1
);
904 tb1
->jmp_next
[n1
] = NULL
;
907 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
909 tb_phys_invalidate_count
++;
912 static inline void set_bits(uint8_t *tab
, int start
, int len
)
918 mask
= 0xff << (start
& 7);
919 if ((start
& ~7) == (end
& ~7)) {
921 mask
&= ~(0xff << (end
& 7));
926 start
= (start
+ 8) & ~7;
928 while (start
< end1
) {
933 mask
= ~(0xff << (end
& 7));
939 static void build_page_bitmap(PageDesc
*p
)
941 int n
, tb_start
, tb_end
;
942 TranslationBlock
*tb
;
944 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
949 tb
= (TranslationBlock
*)((long)tb
& ~3);
950 /* NOTE: this is subtle as a TB may span two physical pages */
952 /* NOTE: tb_end may be after the end of the page, but
953 it is not a problem */
954 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
955 tb_end
= tb_start
+ tb
->size
;
956 if (tb_end
> TARGET_PAGE_SIZE
)
957 tb_end
= TARGET_PAGE_SIZE
;
960 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
962 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
963 tb
= tb
->page_next
[n
];
967 TranslationBlock
*tb_gen_code(CPUState
*env
,
968 target_ulong pc
, target_ulong cs_base
,
969 int flags
, int cflags
)
971 TranslationBlock
*tb
;
973 tb_page_addr_t phys_pc
, phys_page2
;
974 target_ulong virt_page2
;
977 phys_pc
= get_page_addr_code(env
, pc
);
980 /* flush must be done */
982 /* cannot fail at this point */
984 /* Don't forget to invalidate previous TB info. */
985 tb_invalidated_flag
= 1;
987 tc_ptr
= code_gen_ptr
;
989 tb
->cs_base
= cs_base
;
992 cpu_gen_code(env
, tb
, &code_gen_size
);
993 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
995 /* check next page if needed */
996 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
998 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
999 phys_page2
= get_page_addr_code(env
, virt_page2
);
1001 tb_link_page(tb
, phys_pc
, phys_page2
);
1005 /* invalidate all TBs which intersect with the target physical page
1006 starting in range [start;end[. NOTE: start and end must refer to
1007 the same physical page. 'is_cpu_write_access' should be true if called
1008 from a real cpu write access: the virtual CPU will exit the current
1009 TB if code is modified inside this TB. */
1010 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1011 int is_cpu_write_access
)
1013 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1014 CPUState
*env
= cpu_single_env
;
1015 tb_page_addr_t tb_start
, tb_end
;
1018 #ifdef TARGET_HAS_PRECISE_SMC
1019 int current_tb_not_found
= is_cpu_write_access
;
1020 TranslationBlock
*current_tb
= NULL
;
1021 int current_tb_modified
= 0;
1022 target_ulong current_pc
= 0;
1023 target_ulong current_cs_base
= 0;
1024 int current_flags
= 0;
1025 #endif /* TARGET_HAS_PRECISE_SMC */
1027 p
= page_find(start
>> TARGET_PAGE_BITS
);
1030 if (!p
->code_bitmap
&&
1031 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1032 is_cpu_write_access
) {
1033 /* build code bitmap */
1034 build_page_bitmap(p
);
1037 /* we remove all the TBs in the range [start, end[ */
1038 /* XXX: see if in some cases it could be faster to invalidate all the code */
1040 while (tb
!= NULL
) {
1042 tb
= (TranslationBlock
*)((long)tb
& ~3);
1043 tb_next
= tb
->page_next
[n
];
1044 /* NOTE: this is subtle as a TB may span two physical pages */
1046 /* NOTE: tb_end may be after the end of the page, but
1047 it is not a problem */
1048 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1049 tb_end
= tb_start
+ tb
->size
;
1051 tb_start
= tb
->page_addr
[1];
1052 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1054 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1055 #ifdef TARGET_HAS_PRECISE_SMC
1056 if (current_tb_not_found
) {
1057 current_tb_not_found
= 0;
1059 if (env
->mem_io_pc
) {
1060 /* now we have a real cpu fault */
1061 current_tb
= tb_find_pc(env
->mem_io_pc
);
1064 if (current_tb
== tb
&&
1065 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1066 /* If we are modifying the current TB, we must stop
1067 its execution. We could be more precise by checking
1068 that the modification is after the current PC, but it
1069 would require a specialized function to partially
1070 restore the CPU state */
1072 current_tb_modified
= 1;
1073 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1074 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1077 #endif /* TARGET_HAS_PRECISE_SMC */
1078 /* we need to do that to handle the case where a signal
1079 occurs while doing tb_phys_invalidate() */
1082 saved_tb
= env
->current_tb
;
1083 env
->current_tb
= NULL
;
1085 tb_phys_invalidate(tb
, -1);
1087 env
->current_tb
= saved_tb
;
1088 if (env
->interrupt_request
&& env
->current_tb
)
1089 cpu_interrupt(env
, env
->interrupt_request
);
1094 #if !defined(CONFIG_USER_ONLY)
1095 /* if no code remaining, no need to continue to use slow writes */
1097 invalidate_page_bitmap(p
);
1098 if (is_cpu_write_access
) {
1099 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1103 #ifdef TARGET_HAS_PRECISE_SMC
1104 if (current_tb_modified
) {
1105 /* we generate a block containing just the instruction
1106 modifying the memory. It will ensure that it cannot modify
1108 env
->current_tb
= NULL
;
1109 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1110 cpu_resume_from_signal(env
, NULL
);
1115 /* len must be <= 8 and start must be a multiple of len */
1116 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1122 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1123 cpu_single_env
->mem_io_vaddr
, len
,
1124 cpu_single_env
->eip
,
1125 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1128 p
= page_find(start
>> TARGET_PAGE_BITS
);
1131 if (p
->code_bitmap
) {
1132 offset
= start
& ~TARGET_PAGE_MASK
;
1133 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1134 if (b
& ((1 << len
) - 1))
1138 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1142 #if !defined(CONFIG_SOFTMMU)
1143 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1144 unsigned long pc
, void *puc
)
1146 TranslationBlock
*tb
;
1149 #ifdef TARGET_HAS_PRECISE_SMC
1150 TranslationBlock
*current_tb
= NULL
;
1151 CPUState
*env
= cpu_single_env
;
1152 int current_tb_modified
= 0;
1153 target_ulong current_pc
= 0;
1154 target_ulong current_cs_base
= 0;
1155 int current_flags
= 0;
1158 addr
&= TARGET_PAGE_MASK
;
1159 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1163 #ifdef TARGET_HAS_PRECISE_SMC
1164 if (tb
&& pc
!= 0) {
1165 current_tb
= tb_find_pc(pc
);
1168 while (tb
!= NULL
) {
1170 tb
= (TranslationBlock
*)((long)tb
& ~3);
1171 #ifdef TARGET_HAS_PRECISE_SMC
1172 if (current_tb
== tb
&&
1173 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1174 /* If we are modifying the current TB, we must stop
1175 its execution. We could be more precise by checking
1176 that the modification is after the current PC, but it
1177 would require a specialized function to partially
1178 restore the CPU state */
1180 current_tb_modified
= 1;
1181 cpu_restore_state(current_tb
, env
, pc
);
1182 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1185 #endif /* TARGET_HAS_PRECISE_SMC */
1186 tb_phys_invalidate(tb
, addr
);
1187 tb
= tb
->page_next
[n
];
1190 #ifdef TARGET_HAS_PRECISE_SMC
1191 if (current_tb_modified
) {
1192 /* we generate a block containing just the instruction
1193 modifying the memory. It will ensure that it cannot modify
1195 env
->current_tb
= NULL
;
1196 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1197 cpu_resume_from_signal(env
, puc
);
1203 /* add the tb in the target page and protect it if necessary */
1204 static inline void tb_alloc_page(TranslationBlock
*tb
,
1205 unsigned int n
, tb_page_addr_t page_addr
)
1208 TranslationBlock
*last_first_tb
;
1210 tb
->page_addr
[n
] = page_addr
;
1211 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1212 tb
->page_next
[n
] = p
->first_tb
;
1213 last_first_tb
= p
->first_tb
;
1214 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1215 invalidate_page_bitmap(p
);
1217 #if defined(TARGET_HAS_SMC) || 1
1219 #if defined(CONFIG_USER_ONLY)
1220 if (p
->flags
& PAGE_WRITE
) {
1225 /* force the host page as non writable (writes will have a
1226 page fault + mprotect overhead) */
1227 page_addr
&= qemu_host_page_mask
;
1229 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1230 addr
+= TARGET_PAGE_SIZE
) {
1232 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1236 p2
->flags
&= ~PAGE_WRITE
;
1238 mprotect(g2h(page_addr
), qemu_host_page_size
,
1239 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1240 #ifdef DEBUG_TB_INVALIDATE
1241 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1246 /* if some code is already present, then the pages are already
1247 protected. So we handle the case where only the first TB is
1248 allocated in a physical page */
1249 if (!last_first_tb
) {
1250 tlb_protect_code(page_addr
);
1254 #endif /* TARGET_HAS_SMC */
1257 /* add a new TB and link it to the physical page tables. phys_page2 is
1258 (-1) to indicate that only one page contains the TB. */
1259 void tb_link_page(TranslationBlock
*tb
,
1260 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1263 TranslationBlock
**ptb
;
1265 /* Grab the mmap lock to stop another thread invalidating this TB
1266 before we are done. */
1268 /* add in the physical hash table */
1269 h
= tb_phys_hash_func(phys_pc
);
1270 ptb
= &tb_phys_hash
[h
];
1271 tb
->phys_hash_next
= *ptb
;
1274 /* add in the page list */
1275 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1276 if (phys_page2
!= -1)
1277 tb_alloc_page(tb
, 1, phys_page2
);
1279 tb
->page_addr
[1] = -1;
1281 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1282 tb
->jmp_next
[0] = NULL
;
1283 tb
->jmp_next
[1] = NULL
;
1285 /* init original jump addresses */
1286 if (tb
->tb_next_offset
[0] != 0xffff)
1287 tb_reset_jump(tb
, 0);
1288 if (tb
->tb_next_offset
[1] != 0xffff)
1289 tb_reset_jump(tb
, 1);
1291 #ifdef DEBUG_TB_CHECK
1297 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1298 tb[1].tc_ptr. Return NULL if not found */
1299 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1301 int m_min
, m_max
, m
;
1303 TranslationBlock
*tb
;
1307 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1308 tc_ptr
>= (unsigned long)code_gen_ptr
)
1310 /* binary search (cf Knuth) */
1313 while (m_min
<= m_max
) {
1314 m
= (m_min
+ m_max
) >> 1;
1316 v
= (unsigned long)tb
->tc_ptr
;
1319 else if (tc_ptr
< v
) {
1328 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1330 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1332 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1335 tb1
= tb
->jmp_next
[n
];
1337 /* find head of list */
1340 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1343 tb1
= tb1
->jmp_next
[n1
];
1345 /* we are now sure now that tb jumps to tb1 */
1348 /* remove tb from the jmp_first list */
1349 ptb
= &tb_next
->jmp_first
;
1353 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1354 if (n1
== n
&& tb1
== tb
)
1356 ptb
= &tb1
->jmp_next
[n1
];
1358 *ptb
= tb
->jmp_next
[n
];
1359 tb
->jmp_next
[n
] = NULL
;
1361 /* suppress the jump to next tb in generated code */
1362 tb_reset_jump(tb
, n
);
1364 /* suppress jumps in the tb on which we could have jumped */
1365 tb_reset_jump_recursive(tb_next
);
1369 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1371 tb_reset_jump_recursive2(tb
, 0);
1372 tb_reset_jump_recursive2(tb
, 1);
1375 #if defined(TARGET_HAS_ICE)
1376 #if defined(CONFIG_USER_ONLY)
1377 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1379 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1382 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1384 target_phys_addr_t addr
;
1386 ram_addr_t ram_addr
;
1389 addr
= cpu_get_phys_page_debug(env
, pc
);
1390 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1392 pd
= IO_MEM_UNASSIGNED
;
1394 pd
= p
->phys_offset
;
1396 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1397 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1400 #endif /* TARGET_HAS_ICE */
1402 #if defined(CONFIG_USER_ONLY)
1403 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1408 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1409 int flags
, CPUWatchpoint
**watchpoint
)
1414 /* Add a watchpoint. */
1415 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1416 int flags
, CPUWatchpoint
**watchpoint
)
1418 target_ulong len_mask
= ~(len
- 1);
1421 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1422 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1423 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1424 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1427 wp
= qemu_malloc(sizeof(*wp
));
1430 wp
->len_mask
= len_mask
;
1433 /* keep all GDB-injected watchpoints in front */
1435 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1437 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1439 tlb_flush_page(env
, addr
);
1446 /* Remove a specific watchpoint. */
1447 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1450 target_ulong len_mask
= ~(len
- 1);
1453 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1454 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1455 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1456 cpu_watchpoint_remove_by_ref(env
, wp
);
1463 /* Remove a specific watchpoint by reference. */
1464 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1466 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1468 tlb_flush_page(env
, watchpoint
->vaddr
);
1470 qemu_free(watchpoint
);
1473 /* Remove all matching watchpoints. */
1474 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1476 CPUWatchpoint
*wp
, *next
;
1478 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1479 if (wp
->flags
& mask
)
1480 cpu_watchpoint_remove_by_ref(env
, wp
);
1485 /* Add a breakpoint. */
1486 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1487 CPUBreakpoint
**breakpoint
)
1489 #if defined(TARGET_HAS_ICE)
1492 bp
= qemu_malloc(sizeof(*bp
));
1497 /* keep all GDB-injected breakpoints in front */
1499 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1501 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1503 breakpoint_invalidate(env
, pc
);
1513 /* Remove a specific breakpoint. */
1514 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1516 #if defined(TARGET_HAS_ICE)
1519 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1520 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1521 cpu_breakpoint_remove_by_ref(env
, bp
);
1531 /* Remove a specific breakpoint by reference. */
1532 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1534 #if defined(TARGET_HAS_ICE)
1535 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1537 breakpoint_invalidate(env
, breakpoint
->pc
);
1539 qemu_free(breakpoint
);
1543 /* Remove all matching breakpoints. */
1544 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1546 #if defined(TARGET_HAS_ICE)
1547 CPUBreakpoint
*bp
, *next
;
1549 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1550 if (bp
->flags
& mask
)
1551 cpu_breakpoint_remove_by_ref(env
, bp
);
1556 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1557 CPU loop after each instruction */
1558 void cpu_single_step(CPUState
*env
, int enabled
)
1560 #if defined(TARGET_HAS_ICE)
1561 if (env
->singlestep_enabled
!= enabled
) {
1562 env
->singlestep_enabled
= enabled
;
1564 kvm_update_guest_debug(env
, 0);
1566 /* must flush all the translated code to avoid inconsistencies */
1567 /* XXX: only flush what is necessary */
1574 /* enable or disable low levels log */
1575 void cpu_set_log(int log_flags
)
1577 loglevel
= log_flags
;
1578 if (loglevel
&& !logfile
) {
1579 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1581 perror(logfilename
);
1584 #if !defined(CONFIG_SOFTMMU)
1585 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1587 static char logfile_buf
[4096];
1588 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1590 #elif !defined(_WIN32)
1591 /* Win32 doesn't support line-buffering and requires size >= 2 */
1592 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1596 if (!loglevel
&& logfile
) {
1602 void cpu_set_log_filename(const char *filename
)
1604 logfilename
= strdup(filename
);
1609 cpu_set_log(loglevel
);
1612 static void cpu_unlink_tb(CPUState
*env
)
1614 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1615 problem and hope the cpu will stop of its own accord. For userspace
1616 emulation this often isn't actually as bad as it sounds. Often
1617 signals are used primarily to interrupt blocking syscalls. */
1618 TranslationBlock
*tb
;
1619 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1621 spin_lock(&interrupt_lock
);
1622 tb
= env
->current_tb
;
1623 /* if the cpu is currently executing code, we must unlink it and
1624 all the potentially executing TB */
1626 env
->current_tb
= NULL
;
1627 tb_reset_jump_recursive(tb
);
1629 spin_unlock(&interrupt_lock
);
1632 #ifndef CONFIG_USER_ONLY
1633 /* mask must never be zero, except for A20 change call */
1634 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1638 old_mask
= env
->interrupt_request
;
1639 env
->interrupt_request
|= mask
;
1642 * If called from iothread context, wake the target cpu in
1645 if (!qemu_cpu_is_self(env
)) {
1651 env
->icount_decr
.u16
.high
= 0xffff;
1653 && (mask
& ~old_mask
) != 0) {
1654 cpu_abort(env
, "Raised interrupt while not in I/O function");
1661 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1663 #else /* CONFIG_USER_ONLY */
1665 void cpu_interrupt(CPUState
*env
, int mask
)
1667 env
->interrupt_request
|= mask
;
1670 #endif /* CONFIG_USER_ONLY */
1672 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1674 env
->interrupt_request
&= ~mask
;
1677 void cpu_exit(CPUState
*env
)
1679 env
->exit_request
= 1;
1683 const CPULogItem cpu_log_items
[] = {
1684 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1685 "show generated host assembly code for each compiled TB" },
1686 { CPU_LOG_TB_IN_ASM
, "in_asm",
1687 "show target assembly code for each compiled TB" },
1688 { CPU_LOG_TB_OP
, "op",
1689 "show micro ops for each compiled TB" },
1690 { CPU_LOG_TB_OP_OPT
, "op_opt",
1693 "before eflags optimization and "
1695 "after liveness analysis" },
1696 { CPU_LOG_INT
, "int",
1697 "show interrupts/exceptions in short format" },
1698 { CPU_LOG_EXEC
, "exec",
1699 "show trace before each executed TB (lots of logs)" },
1700 { CPU_LOG_TB_CPU
, "cpu",
1701 "show CPU state before block translation" },
1703 { CPU_LOG_PCALL
, "pcall",
1704 "show protected mode far calls/returns/exceptions" },
1705 { CPU_LOG_RESET
, "cpu_reset",
1706 "show CPU state before CPU resets" },
1709 { CPU_LOG_IOPORT
, "ioport",
1710 "show all i/o ports accesses" },
1715 #ifndef CONFIG_USER_ONLY
1716 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1717 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1719 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1721 ram_addr_t phys_offset
,
1724 CPUPhysMemoryClient
*client
;
1725 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1726 client
->set_memory(client
, start_addr
, size
, phys_offset
, log_dirty
);
1730 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1731 target_phys_addr_t end
)
1733 CPUPhysMemoryClient
*client
;
1734 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1735 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1742 static int cpu_notify_migration_log(int enable
)
1744 CPUPhysMemoryClient
*client
;
1745 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1746 int r
= client
->migration_log(client
, enable
);
1753 /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1754 * address. Each intermediate table provides the next L2_BITs of guest
1755 * physical address space. The number of levels vary based on host and
1756 * guest configuration, making it efficient to build the final guest
1757 * physical address by seeding the L1 offset and shifting and adding in
1758 * each L2 offset as we recurse through them. */
1759 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1760 int level
, void **lp
, target_phys_addr_t addr
)
1768 PhysPageDesc
*pd
= *lp
;
1769 addr
<<= L2_BITS
+ TARGET_PAGE_BITS
;
1770 for (i
= 0; i
< L2_SIZE
; ++i
) {
1771 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1772 client
->set_memory(client
, addr
| i
<< TARGET_PAGE_BITS
,
1773 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
, false);
1778 for (i
= 0; i
< L2_SIZE
; ++i
) {
1779 phys_page_for_each_1(client
, level
- 1, pp
+ i
,
1780 (addr
<< L2_BITS
) | i
);
1785 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1788 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1789 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1790 l1_phys_map
+ i
, i
);
1794 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1796 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1797 phys_page_for_each(client
);
1800 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1802 QLIST_REMOVE(client
, list
);
1806 static int cmp1(const char *s1
, int n
, const char *s2
)
1808 if (strlen(s2
) != n
)
1810 return memcmp(s1
, s2
, n
) == 0;
1813 /* takes a comma separated list of log masks. Return 0 if error. */
1814 int cpu_str_to_log_mask(const char *str
)
1816 const CPULogItem
*item
;
1823 p1
= strchr(p
, ',');
1826 if(cmp1(p
,p1
-p
,"all")) {
1827 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1831 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1832 if (cmp1(p
, p1
- p
, item
->name
))
1846 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1853 fprintf(stderr
, "qemu: fatal: ");
1854 vfprintf(stderr
, fmt
, ap
);
1855 fprintf(stderr
, "\n");
1857 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1859 cpu_dump_state(env
, stderr
, fprintf
, 0);
1861 if (qemu_log_enabled()) {
1862 qemu_log("qemu: fatal: ");
1863 qemu_log_vprintf(fmt
, ap2
);
1866 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1868 log_cpu_state(env
, 0);
1875 #if defined(CONFIG_USER_ONLY)
1877 struct sigaction act
;
1878 sigfillset(&act
.sa_mask
);
1879 act
.sa_handler
= SIG_DFL
;
1880 sigaction(SIGABRT
, &act
, NULL
);
1886 CPUState
*cpu_copy(CPUState
*env
)
1888 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1889 CPUState
*next_cpu
= new_env
->next_cpu
;
1890 int cpu_index
= new_env
->cpu_index
;
1891 #if defined(TARGET_HAS_ICE)
1896 memcpy(new_env
, env
, sizeof(CPUState
));
1898 /* Preserve chaining and index. */
1899 new_env
->next_cpu
= next_cpu
;
1900 new_env
->cpu_index
= cpu_index
;
1902 /* Clone all break/watchpoints.
1903 Note: Once we support ptrace with hw-debug register access, make sure
1904 BP_CPU break/watchpoints are handled correctly on clone. */
1905 QTAILQ_INIT(&env
->breakpoints
);
1906 QTAILQ_INIT(&env
->watchpoints
);
1907 #if defined(TARGET_HAS_ICE)
1908 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1909 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1911 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1912 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1920 #if !defined(CONFIG_USER_ONLY)
1922 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1926 /* Discard jump cache entries for any tb which might potentially
1927 overlap the flushed page. */
1928 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1929 memset (&env
->tb_jmp_cache
[i
], 0,
1930 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1932 i
= tb_jmp_cache_hash_page(addr
);
1933 memset (&env
->tb_jmp_cache
[i
], 0,
1934 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1937 static CPUTLBEntry s_cputlb_empty_entry
= {
1944 /* NOTE: if flush_global is true, also flush global entries (not
1946 void tlb_flush(CPUState
*env
, int flush_global
)
1950 #if defined(DEBUG_TLB)
1951 printf("tlb_flush:\n");
1953 /* must reset current TB so that interrupts cannot modify the
1954 links while we are modifying them */
1955 env
->current_tb
= NULL
;
1957 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1959 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1960 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1964 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1966 env
->tlb_flush_addr
= -1;
1967 env
->tlb_flush_mask
= 0;
1971 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1973 if (addr
== (tlb_entry
->addr_read
&
1974 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1975 addr
== (tlb_entry
->addr_write
&
1976 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1977 addr
== (tlb_entry
->addr_code
&
1978 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1979 *tlb_entry
= s_cputlb_empty_entry
;
1983 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1988 #if defined(DEBUG_TLB)
1989 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1991 /* Check if we need to flush due to large pages. */
1992 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1993 #if defined(DEBUG_TLB)
1994 printf("tlb_flush_page: forced full flush ("
1995 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1996 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
2001 /* must reset current TB so that interrupts cannot modify the
2002 links while we are modifying them */
2003 env
->current_tb
= NULL
;
2005 addr
&= TARGET_PAGE_MASK
;
2006 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2007 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2008 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2010 tlb_flush_jmp_cache(env
, addr
);
2013 /* update the TLBs so that writes to code in the virtual page 'addr'
2015 static void tlb_protect_code(ram_addr_t ram_addr
)
2017 cpu_physical_memory_reset_dirty(ram_addr
,
2018 ram_addr
+ TARGET_PAGE_SIZE
,
2022 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2023 tested for self modifying code */
2024 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2027 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2030 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2031 unsigned long start
, unsigned long length
)
2034 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2035 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2036 if ((addr
- start
) < length
) {
2037 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2042 /* Note: start and end must be within the same ram block. */
2043 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2047 unsigned long length
, start1
;
2050 start
&= TARGET_PAGE_MASK
;
2051 end
= TARGET_PAGE_ALIGN(end
);
2053 length
= end
- start
;
2056 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2058 /* we modify the TLB cache so that the dirty bit will be set again
2059 when accessing the range */
2060 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2061 /* Chek that we don't span multiple blocks - this breaks the
2062 address comparisons below. */
2063 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2064 != (end
- 1) - start
) {
2068 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2070 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2071 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2072 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2078 int cpu_physical_memory_set_dirty_tracking(int enable
)
2081 in_migration
= enable
;
2082 ret
= cpu_notify_migration_log(!!enable
);
2086 int cpu_physical_memory_get_dirty_tracking(void)
2088 return in_migration
;
2091 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2092 target_phys_addr_t end_addr
)
2096 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2100 int cpu_physical_log_start(target_phys_addr_t start_addr
,
2103 CPUPhysMemoryClient
*client
;
2104 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2105 if (client
->log_start
) {
2106 int r
= client
->log_start(client
, start_addr
, size
);
2115 int cpu_physical_log_stop(target_phys_addr_t start_addr
,
2118 CPUPhysMemoryClient
*client
;
2119 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2120 if (client
->log_stop
) {
2121 int r
= client
->log_stop(client
, start_addr
, size
);
2130 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2132 ram_addr_t ram_addr
;
2135 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2136 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2137 + tlb_entry
->addend
);
2138 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2139 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2140 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2145 /* update the TLB according to the current state of the dirty bits */
2146 void cpu_tlb_update_dirty(CPUState
*env
)
2150 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2151 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2152 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2156 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2158 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2159 tlb_entry
->addr_write
= vaddr
;
2162 /* update the TLB corresponding to virtual page vaddr
2163 so that it is no longer dirty */
2164 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2169 vaddr
&= TARGET_PAGE_MASK
;
2170 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2171 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2172 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2175 /* Our TLB does not support large pages, so remember the area covered by
2176 large pages and trigger a full TLB flush if these are invalidated. */
2177 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2180 target_ulong mask
= ~(size
- 1);
2182 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2183 env
->tlb_flush_addr
= vaddr
& mask
;
2184 env
->tlb_flush_mask
= mask
;
2187 /* Extend the existing region to include the new page.
2188 This is a compromise between unnecessary flushes and the cost
2189 of maintaining a full variable size TLB. */
2190 mask
&= env
->tlb_flush_mask
;
2191 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2194 env
->tlb_flush_addr
&= mask
;
2195 env
->tlb_flush_mask
= mask
;
2198 /* Add a new TLB entry. At most one entry for a given virtual address
2199 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2200 supplied size is only used by tlb_flush_page. */
2201 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2202 target_phys_addr_t paddr
, int prot
,
2203 int mmu_idx
, target_ulong size
)
2208 target_ulong address
;
2209 target_ulong code_address
;
2210 unsigned long addend
;
2213 target_phys_addr_t iotlb
;
2215 assert(size
>= TARGET_PAGE_SIZE
);
2216 if (size
!= TARGET_PAGE_SIZE
) {
2217 tlb_add_large_page(env
, vaddr
, size
);
2219 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2221 pd
= IO_MEM_UNASSIGNED
;
2223 pd
= p
->phys_offset
;
2225 #if defined(DEBUG_TLB)
2226 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2227 " prot=%x idx=%d pd=0x%08lx\n",
2228 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2232 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2233 /* IO memory case (romd handled later) */
2234 address
|= TLB_MMIO
;
2236 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2237 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2239 iotlb
= pd
& TARGET_PAGE_MASK
;
2240 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2241 iotlb
|= IO_MEM_NOTDIRTY
;
2243 iotlb
|= IO_MEM_ROM
;
2245 /* IO handlers are currently passed a physical address.
2246 It would be nice to pass an offset from the base address
2247 of that region. This would avoid having to special case RAM,
2248 and avoid full address decoding in every device.
2249 We can't use the high bits of pd for this because
2250 IO_MEM_ROMD uses these as a ram address. */
2251 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2253 iotlb
+= p
->region_offset
;
2259 code_address
= address
;
2260 /* Make accesses to pages with watchpoints go via the
2261 watchpoint trap routines. */
2262 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2263 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2264 /* Avoid trapping reads of pages with a write breakpoint. */
2265 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2266 iotlb
= io_mem_watch
+ paddr
;
2267 address
|= TLB_MMIO
;
2273 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2274 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2275 te
= &env
->tlb_table
[mmu_idx
][index
];
2276 te
->addend
= addend
- vaddr
;
2277 if (prot
& PAGE_READ
) {
2278 te
->addr_read
= address
;
2283 if (prot
& PAGE_EXEC
) {
2284 te
->addr_code
= code_address
;
2288 if (prot
& PAGE_WRITE
) {
2289 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2290 (pd
& IO_MEM_ROMD
)) {
2291 /* Write access calls the I/O callback. */
2292 te
->addr_write
= address
| TLB_MMIO
;
2293 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2294 !cpu_physical_memory_is_dirty(pd
)) {
2295 te
->addr_write
= address
| TLB_NOTDIRTY
;
2297 te
->addr_write
= address
;
2300 te
->addr_write
= -1;
2306 void tlb_flush(CPUState
*env
, int flush_global
)
2310 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2315 * Walks guest process memory "regions" one by one
2316 * and calls callback function 'fn' for each region.
2319 struct walk_memory_regions_data
2321 walk_memory_regions_fn fn
;
2323 unsigned long start
;
2327 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2328 abi_ulong end
, int new_prot
)
2330 if (data
->start
!= -1ul) {
2331 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2337 data
->start
= (new_prot
? end
: -1ul);
2338 data
->prot
= new_prot
;
2343 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2344 abi_ulong base
, int level
, void **lp
)
2350 return walk_memory_regions_end(data
, base
, 0);
2355 for (i
= 0; i
< L2_SIZE
; ++i
) {
2356 int prot
= pd
[i
].flags
;
2358 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2359 if (prot
!= data
->prot
) {
2360 rc
= walk_memory_regions_end(data
, pa
, prot
);
2368 for (i
= 0; i
< L2_SIZE
; ++i
) {
2369 pa
= base
| ((abi_ulong
)i
<<
2370 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2371 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2381 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2383 struct walk_memory_regions_data data
;
2391 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2392 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2393 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2399 return walk_memory_regions_end(&data
, 0, 0);
2402 static int dump_region(void *priv
, abi_ulong start
,
2403 abi_ulong end
, unsigned long prot
)
2405 FILE *f
= (FILE *)priv
;
2407 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2408 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2409 start
, end
, end
- start
,
2410 ((prot
& PAGE_READ
) ? 'r' : '-'),
2411 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2412 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2417 /* dump memory mappings */
2418 void page_dump(FILE *f
)
2420 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2421 "start", "end", "size", "prot");
2422 walk_memory_regions(f
, dump_region
);
2425 int page_get_flags(target_ulong address
)
2429 p
= page_find(address
>> TARGET_PAGE_BITS
);
2435 /* Modify the flags of a page and invalidate the code if necessary.
2436 The flag PAGE_WRITE_ORG is positioned automatically depending
2437 on PAGE_WRITE. The mmap_lock should already be held. */
2438 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2440 target_ulong addr
, len
;
2442 /* This function should never be called with addresses outside the
2443 guest address space. If this assert fires, it probably indicates
2444 a missing call to h2g_valid. */
2445 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2446 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2448 assert(start
< end
);
2450 start
= start
& TARGET_PAGE_MASK
;
2451 end
= TARGET_PAGE_ALIGN(end
);
2453 if (flags
& PAGE_WRITE
) {
2454 flags
|= PAGE_WRITE_ORG
;
2457 for (addr
= start
, len
= end
- start
;
2459 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2460 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2462 /* If the write protection bit is set, then we invalidate
2464 if (!(p
->flags
& PAGE_WRITE
) &&
2465 (flags
& PAGE_WRITE
) &&
2467 tb_invalidate_phys_page(addr
, 0, NULL
);
2473 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2479 /* This function should never be called with addresses outside the
2480 guest address space. If this assert fires, it probably indicates
2481 a missing call to h2g_valid. */
2482 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2483 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2489 if (start
+ len
- 1 < start
) {
2490 /* We've wrapped around. */
2494 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2495 start
= start
& TARGET_PAGE_MASK
;
2497 for (addr
= start
, len
= end
- start
;
2499 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2500 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2503 if( !(p
->flags
& PAGE_VALID
) )
2506 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2508 if (flags
& PAGE_WRITE
) {
2509 if (!(p
->flags
& PAGE_WRITE_ORG
))
2511 /* unprotect the page if it was put read-only because it
2512 contains translated code */
2513 if (!(p
->flags
& PAGE_WRITE
)) {
2514 if (!page_unprotect(addr
, 0, NULL
))
2523 /* called from signal handler: invalidate the code and unprotect the
2524 page. Return TRUE if the fault was successfully handled. */
2525 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2529 target_ulong host_start
, host_end
, addr
;
2531 /* Technically this isn't safe inside a signal handler. However we
2532 know this only ever happens in a synchronous SEGV handler, so in
2533 practice it seems to be ok. */
2536 p
= page_find(address
>> TARGET_PAGE_BITS
);
2542 /* if the page was really writable, then we change its
2543 protection back to writable */
2544 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2545 host_start
= address
& qemu_host_page_mask
;
2546 host_end
= host_start
+ qemu_host_page_size
;
2549 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2550 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2551 p
->flags
|= PAGE_WRITE
;
2554 /* and since the content will be modified, we must invalidate
2555 the corresponding translated code. */
2556 tb_invalidate_phys_page(addr
, pc
, puc
);
2557 #ifdef DEBUG_TB_CHECK
2558 tb_invalidate_check(addr
);
2561 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2571 static inline void tlb_set_dirty(CPUState
*env
,
2572 unsigned long addr
, target_ulong vaddr
)
2575 #endif /* defined(CONFIG_USER_ONLY) */
2577 #if !defined(CONFIG_USER_ONLY)
2579 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2580 typedef struct subpage_t
{
2581 target_phys_addr_t base
;
2582 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2583 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2586 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2587 ram_addr_t memory
, ram_addr_t region_offset
);
2588 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2589 ram_addr_t orig_memory
,
2590 ram_addr_t region_offset
);
2591 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2594 if (addr > start_addr) \
2597 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2598 if (start_addr2 > 0) \
2602 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2603 end_addr2 = TARGET_PAGE_SIZE - 1; \
2605 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2606 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2611 /* register physical memory.
2612 For RAM, 'size' must be a multiple of the target page size.
2613 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2614 io memory page. The address used when calling the IO function is
2615 the offset from the start of the region, plus region_offset. Both
2616 start_addr and region_offset are rounded down to a page boundary
2617 before calculating this offset. This should not be a problem unless
2618 the low bits of start_addr and region_offset differ. */
2619 void cpu_register_physical_memory_log(target_phys_addr_t start_addr
,
2621 ram_addr_t phys_offset
,
2622 ram_addr_t region_offset
,
2625 target_phys_addr_t addr
, end_addr
;
2628 ram_addr_t orig_size
= size
;
2632 cpu_notify_set_memory(start_addr
, size
, phys_offset
, log_dirty
);
2634 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2635 region_offset
= start_addr
;
2637 region_offset
&= TARGET_PAGE_MASK
;
2638 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2639 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2643 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2644 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2645 ram_addr_t orig_memory
= p
->phys_offset
;
2646 target_phys_addr_t start_addr2
, end_addr2
;
2647 int need_subpage
= 0;
2649 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2652 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2653 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2654 &p
->phys_offset
, orig_memory
,
2657 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2660 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2662 p
->region_offset
= 0;
2664 p
->phys_offset
= phys_offset
;
2665 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2666 (phys_offset
& IO_MEM_ROMD
))
2667 phys_offset
+= TARGET_PAGE_SIZE
;
2670 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2671 p
->phys_offset
= phys_offset
;
2672 p
->region_offset
= region_offset
;
2673 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2674 (phys_offset
& IO_MEM_ROMD
)) {
2675 phys_offset
+= TARGET_PAGE_SIZE
;
2677 target_phys_addr_t start_addr2
, end_addr2
;
2678 int need_subpage
= 0;
2680 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2681 end_addr2
, need_subpage
);
2684 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2685 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2686 addr
& TARGET_PAGE_MASK
);
2687 subpage_register(subpage
, start_addr2
, end_addr2
,
2688 phys_offset
, region_offset
);
2689 p
->region_offset
= 0;
2693 region_offset
+= TARGET_PAGE_SIZE
;
2694 addr
+= TARGET_PAGE_SIZE
;
2695 } while (addr
!= end_addr
);
2697 /* since each CPU stores ram addresses in its TLB cache, we must
2698 reset the modified entries */
2700 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2705 /* XXX: temporary until new memory mapping API */
2706 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2710 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2712 return IO_MEM_UNASSIGNED
;
2713 return p
->phys_offset
;
2716 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2719 kvm_coalesce_mmio_region(addr
, size
);
2722 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2725 kvm_uncoalesce_mmio_region(addr
, size
);
2728 void qemu_flush_coalesced_mmio_buffer(void)
2731 kvm_flush_coalesced_mmio_buffer();
2734 #if defined(__linux__) && !defined(TARGET_S390X)
2736 #include <sys/vfs.h>
2738 #define HUGETLBFS_MAGIC 0x958458f6
2740 static long gethugepagesize(const char *path
)
2746 ret
= statfs(path
, &fs
);
2747 } while (ret
!= 0 && errno
== EINTR
);
2754 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2755 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2760 static void *file_ram_alloc(RAMBlock
*block
,
2770 unsigned long hpagesize
;
2772 hpagesize
= gethugepagesize(path
);
2777 if (memory
< hpagesize
) {
2781 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2782 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2786 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2790 fd
= mkstemp(filename
);
2792 perror("unable to create backing store for hugepages");
2799 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2802 * ftruncate is not supported by hugetlbfs in older
2803 * hosts, so don't bother bailing out on errors.
2804 * If anything goes wrong with it under other filesystems,
2807 if (ftruncate(fd
, memory
))
2808 perror("ftruncate");
2811 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2812 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2813 * to sidestep this quirk.
2815 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2816 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2818 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2820 if (area
== MAP_FAILED
) {
2821 perror("file_ram_alloc: can't mmap RAM pages");
2830 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2832 RAMBlock
*block
, *next_block
;
2833 ram_addr_t offset
= 0, mingap
= ULONG_MAX
;
2835 if (QLIST_EMPTY(&ram_list
.blocks
))
2838 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2839 ram_addr_t end
, next
= ULONG_MAX
;
2841 end
= block
->offset
+ block
->length
;
2843 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2844 if (next_block
->offset
>= end
) {
2845 next
= MIN(next
, next_block
->offset
);
2848 if (next
- end
>= size
&& next
- end
< mingap
) {
2850 mingap
= next
- end
;
2856 static ram_addr_t
last_ram_offset(void)
2859 ram_addr_t last
= 0;
2861 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2862 last
= MAX(last
, block
->offset
+ block
->length
);
2867 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
2868 ram_addr_t size
, void *host
)
2870 RAMBlock
*new_block
, *block
;
2872 size
= TARGET_PAGE_ALIGN(size
);
2873 new_block
= qemu_mallocz(sizeof(*new_block
));
2875 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2876 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2878 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2882 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2884 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2885 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2886 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2893 new_block
->host
= host
;
2894 new_block
->flags
|= RAM_PREALLOC_MASK
;
2897 #if defined (__linux__) && !defined(TARGET_S390X)
2898 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2899 if (!new_block
->host
) {
2900 new_block
->host
= qemu_vmalloc(size
);
2901 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2904 fprintf(stderr
, "-mem-path option unsupported\n");
2908 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2909 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2910 new_block
->host
= mmap((void*)0x1000000, size
,
2911 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2912 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2914 new_block
->host
= qemu_vmalloc(size
);
2916 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2920 new_block
->offset
= find_ram_offset(size
);
2921 new_block
->length
= size
;
2923 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2925 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
2926 last_ram_offset() >> TARGET_PAGE_BITS
);
2927 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2928 0xff, size
>> TARGET_PAGE_BITS
);
2931 kvm_setup_guest_memory(new_block
->host
, size
);
2933 return new_block
->offset
;
2936 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
)
2938 return qemu_ram_alloc_from_ptr(dev
, name
, size
, NULL
);
2941 void qemu_ram_free(ram_addr_t addr
)
2945 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2946 if (addr
== block
->offset
) {
2947 QLIST_REMOVE(block
, next
);
2948 if (block
->flags
& RAM_PREALLOC_MASK
) {
2950 } else if (mem_path
) {
2951 #if defined (__linux__) && !defined(TARGET_S390X)
2953 munmap(block
->host
, block
->length
);
2956 qemu_vfree(block
->host
);
2962 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2963 munmap(block
->host
, block
->length
);
2965 qemu_vfree(block
->host
);
2976 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2983 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2984 offset
= addr
- block
->offset
;
2985 if (offset
< block
->length
) {
2986 vaddr
= block
->host
+ offset
;
2987 if (block
->flags
& RAM_PREALLOC_MASK
) {
2991 munmap(vaddr
, length
);
2993 #if defined(__linux__) && !defined(TARGET_S390X)
2996 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2999 flags
|= MAP_PRIVATE
;
3001 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3002 flags
, block
->fd
, offset
);
3004 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3005 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3012 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3013 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
3014 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
3017 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3018 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3022 if (area
!= vaddr
) {
3023 fprintf(stderr
, "Could not remap addr: %lx@%lx\n",
3027 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3033 #endif /* !_WIN32 */
3035 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3036 With the exception of the softmmu code in this file, this should
3037 only be used for local memory (e.g. video ram) that the device owns,
3038 and knows it isn't going to access beyond the end of the block.
3040 It should not be used for general purpose DMA.
3041 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3043 void *qemu_get_ram_ptr(ram_addr_t addr
)
3047 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3048 if (addr
- block
->offset
< block
->length
) {
3049 /* Move this entry to to start of the list. */
3050 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3051 QLIST_REMOVE(block
, next
);
3052 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3054 return block
->host
+ (addr
- block
->offset
);
3058 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3064 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3065 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3067 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3071 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3072 if (addr
- block
->offset
< block
->length
) {
3073 return block
->host
+ (addr
- block
->offset
);
3077 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3083 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3086 uint8_t *host
= ptr
;
3088 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3089 if (host
- block
->host
< block
->length
) {
3090 *ram_addr
= block
->offset
+ (host
- block
->host
);
3097 /* Some of the softmmu routines need to translate from a host pointer
3098 (typically a TLB entry) back to a ram offset. */
3099 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3101 ram_addr_t ram_addr
;
3103 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3104 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3110 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3112 #ifdef DEBUG_UNASSIGNED
3113 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3115 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3116 do_unassigned_access(addr
, 0, 0, 0, 1);
3121 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3123 #ifdef DEBUG_UNASSIGNED
3124 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3126 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3127 do_unassigned_access(addr
, 0, 0, 0, 2);
3132 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3134 #ifdef DEBUG_UNASSIGNED
3135 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3137 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3138 do_unassigned_access(addr
, 0, 0, 0, 4);
3143 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3145 #ifdef DEBUG_UNASSIGNED
3146 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3148 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3149 do_unassigned_access(addr
, 1, 0, 0, 1);
3153 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3155 #ifdef DEBUG_UNASSIGNED
3156 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3158 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3159 do_unassigned_access(addr
, 1, 0, 0, 2);
3163 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3165 #ifdef DEBUG_UNASSIGNED
3166 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3168 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3169 do_unassigned_access(addr
, 1, 0, 0, 4);
3173 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3174 unassigned_mem_readb
,
3175 unassigned_mem_readw
,
3176 unassigned_mem_readl
,
3179 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3180 unassigned_mem_writeb
,
3181 unassigned_mem_writew
,
3182 unassigned_mem_writel
,
3185 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3189 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3190 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3191 #if !defined(CONFIG_USER_ONLY)
3192 tb_invalidate_phys_page_fast(ram_addr
, 1);
3193 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3196 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3197 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3198 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3199 /* we remove the notdirty callback only if the code has been
3201 if (dirty_flags
== 0xff)
3202 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3205 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3209 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3210 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3211 #if !defined(CONFIG_USER_ONLY)
3212 tb_invalidate_phys_page_fast(ram_addr
, 2);
3213 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3216 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3217 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3218 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3219 /* we remove the notdirty callback only if the code has been
3221 if (dirty_flags
== 0xff)
3222 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3225 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3229 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3230 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3231 #if !defined(CONFIG_USER_ONLY)
3232 tb_invalidate_phys_page_fast(ram_addr
, 4);
3233 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3236 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3237 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3238 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3239 /* we remove the notdirty callback only if the code has been
3241 if (dirty_flags
== 0xff)
3242 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3245 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3246 NULL
, /* never used */
3247 NULL
, /* never used */
3248 NULL
, /* never used */
3251 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3252 notdirty_mem_writeb
,
3253 notdirty_mem_writew
,
3254 notdirty_mem_writel
,
3257 /* Generate a debug exception if a watchpoint has been hit. */
3258 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3260 CPUState
*env
= cpu_single_env
;
3261 target_ulong pc
, cs_base
;
3262 TranslationBlock
*tb
;
3267 if (env
->watchpoint_hit
) {
3268 /* We re-entered the check after replacing the TB. Now raise
3269 * the debug interrupt so that is will trigger after the
3270 * current instruction. */
3271 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3274 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3275 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3276 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3277 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3278 wp
->flags
|= BP_WATCHPOINT_HIT
;
3279 if (!env
->watchpoint_hit
) {
3280 env
->watchpoint_hit
= wp
;
3281 tb
= tb_find_pc(env
->mem_io_pc
);
3283 cpu_abort(env
, "check_watchpoint: could not find TB for "
3284 "pc=%p", (void *)env
->mem_io_pc
);
3286 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3287 tb_phys_invalidate(tb
, -1);
3288 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3289 env
->exception_index
= EXCP_DEBUG
;
3291 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3292 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3294 cpu_resume_from_signal(env
, NULL
);
3297 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3302 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3303 so these check for a hit then pass through to the normal out-of-line
3305 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3307 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3308 return ldub_phys(addr
);
3311 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3313 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3314 return lduw_phys(addr
);
3317 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3319 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3320 return ldl_phys(addr
);
3323 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3326 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3327 stb_phys(addr
, val
);
3330 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3333 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3334 stw_phys(addr
, val
);
3337 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3340 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3341 stl_phys(addr
, val
);
3344 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3350 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3356 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3357 target_phys_addr_t addr
,
3360 unsigned int idx
= SUBPAGE_IDX(addr
);
3361 #if defined(DEBUG_SUBPAGE)
3362 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3363 mmio
, len
, addr
, idx
);
3366 addr
+= mmio
->region_offset
[idx
];
3367 idx
= mmio
->sub_io_index
[idx
];
3368 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3371 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3372 uint32_t value
, unsigned int len
)
3374 unsigned int idx
= SUBPAGE_IDX(addr
);
3375 #if defined(DEBUG_SUBPAGE)
3376 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3377 __func__
, mmio
, len
, addr
, idx
, value
);
3380 addr
+= mmio
->region_offset
[idx
];
3381 idx
= mmio
->sub_io_index
[idx
];
3382 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3385 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3387 return subpage_readlen(opaque
, addr
, 0);
3390 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3393 subpage_writelen(opaque
, addr
, value
, 0);
3396 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3398 return subpage_readlen(opaque
, addr
, 1);
3401 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3404 subpage_writelen(opaque
, addr
, value
, 1);
3407 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3409 return subpage_readlen(opaque
, addr
, 2);
3412 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3415 subpage_writelen(opaque
, addr
, value
, 2);
3418 static CPUReadMemoryFunc
* const subpage_read
[] = {
3424 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3430 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3431 ram_addr_t memory
, ram_addr_t region_offset
)
3435 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3437 idx
= SUBPAGE_IDX(start
);
3438 eidx
= SUBPAGE_IDX(end
);
3439 #if defined(DEBUG_SUBPAGE)
3440 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3441 mmio
, start
, end
, idx
, eidx
, memory
);
3443 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
3444 memory
= IO_MEM_UNASSIGNED
;
3445 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3446 for (; idx
<= eidx
; idx
++) {
3447 mmio
->sub_io_index
[idx
] = memory
;
3448 mmio
->region_offset
[idx
] = region_offset
;
3454 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3455 ram_addr_t orig_memory
,
3456 ram_addr_t region_offset
)
3461 mmio
= qemu_mallocz(sizeof(subpage_t
));
3464 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
,
3465 DEVICE_NATIVE_ENDIAN
);
3466 #if defined(DEBUG_SUBPAGE)
3467 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3468 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3470 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3471 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3476 static int get_free_io_mem_idx(void)
3480 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3481 if (!io_mem_used
[i
]) {
3485 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3490 * Usually, devices operate in little endian mode. There are devices out
3491 * there that operate in big endian too. Each device gets byte swapped
3492 * mmio if plugged onto a CPU that does the other endianness.
3502 typedef struct SwapEndianContainer
{
3503 CPUReadMemoryFunc
*read
[3];
3504 CPUWriteMemoryFunc
*write
[3];
3506 } SwapEndianContainer
;
3508 static uint32_t swapendian_mem_readb (void *opaque
, target_phys_addr_t addr
)
3511 SwapEndianContainer
*c
= opaque
;
3512 val
= c
->read
[0](c
->opaque
, addr
);
3516 static uint32_t swapendian_mem_readw(void *opaque
, target_phys_addr_t addr
)
3519 SwapEndianContainer
*c
= opaque
;
3520 val
= bswap16(c
->read
[1](c
->opaque
, addr
));
3524 static uint32_t swapendian_mem_readl(void *opaque
, target_phys_addr_t addr
)
3527 SwapEndianContainer
*c
= opaque
;
3528 val
= bswap32(c
->read
[2](c
->opaque
, addr
));
3532 static CPUReadMemoryFunc
* const swapendian_readfn
[3]={
3533 swapendian_mem_readb
,
3534 swapendian_mem_readw
,
3535 swapendian_mem_readl
3538 static void swapendian_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3541 SwapEndianContainer
*c
= opaque
;
3542 c
->write
[0](c
->opaque
, addr
, val
);
3545 static void swapendian_mem_writew(void *opaque
, target_phys_addr_t addr
,
3548 SwapEndianContainer
*c
= opaque
;
3549 c
->write
[1](c
->opaque
, addr
, bswap16(val
));
3552 static void swapendian_mem_writel(void *opaque
, target_phys_addr_t addr
,
3555 SwapEndianContainer
*c
= opaque
;
3556 c
->write
[2](c
->opaque
, addr
, bswap32(val
));
3559 static CPUWriteMemoryFunc
* const swapendian_writefn
[3]={
3560 swapendian_mem_writeb
,
3561 swapendian_mem_writew
,
3562 swapendian_mem_writel
3565 static void swapendian_init(int io_index
)
3567 SwapEndianContainer
*c
= qemu_malloc(sizeof(SwapEndianContainer
));
3570 /* Swap mmio for big endian targets */
3571 c
->opaque
= io_mem_opaque
[io_index
];
3572 for (i
= 0; i
< 3; i
++) {
3573 c
->read
[i
] = io_mem_read
[io_index
][i
];
3574 c
->write
[i
] = io_mem_write
[io_index
][i
];
3576 io_mem_read
[io_index
][i
] = swapendian_readfn
[i
];
3577 io_mem_write
[io_index
][i
] = swapendian_writefn
[i
];
3579 io_mem_opaque
[io_index
] = c
;
3582 static void swapendian_del(int io_index
)
3584 if (io_mem_read
[io_index
][0] == swapendian_readfn
[0]) {
3585 qemu_free(io_mem_opaque
[io_index
]);
3589 /* mem_read and mem_write are arrays of functions containing the
3590 function to access byte (index 0), word (index 1) and dword (index
3591 2). Functions can be omitted with a NULL function pointer.
3592 If io_index is non zero, the corresponding io zone is
3593 modified. If it is zero, a new io zone is allocated. The return
3594 value can be used with cpu_register_physical_memory(). (-1) is
3595 returned if error. */
3596 static int cpu_register_io_memory_fixed(int io_index
,
3597 CPUReadMemoryFunc
* const *mem_read
,
3598 CPUWriteMemoryFunc
* const *mem_write
,
3599 void *opaque
, enum device_endian endian
)
3603 if (io_index
<= 0) {
3604 io_index
= get_free_io_mem_idx();
3608 io_index
>>= IO_MEM_SHIFT
;
3609 if (io_index
>= IO_MEM_NB_ENTRIES
)
3613 for (i
= 0; i
< 3; ++i
) {
3614 io_mem_read
[io_index
][i
]
3615 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3617 for (i
= 0; i
< 3; ++i
) {
3618 io_mem_write
[io_index
][i
]
3619 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3621 io_mem_opaque
[io_index
] = opaque
;
3624 case DEVICE_BIG_ENDIAN
:
3625 #ifndef TARGET_WORDS_BIGENDIAN
3626 swapendian_init(io_index
);
3629 case DEVICE_LITTLE_ENDIAN
:
3630 #ifdef TARGET_WORDS_BIGENDIAN
3631 swapendian_init(io_index
);
3634 case DEVICE_NATIVE_ENDIAN
:
3639 return (io_index
<< IO_MEM_SHIFT
);
3642 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3643 CPUWriteMemoryFunc
* const *mem_write
,
3644 void *opaque
, enum device_endian endian
)
3646 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
, endian
);
3649 void cpu_unregister_io_memory(int io_table_address
)
3652 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3654 swapendian_del(io_index
);
3656 for (i
=0;i
< 3; i
++) {
3657 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3658 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3660 io_mem_opaque
[io_index
] = NULL
;
3661 io_mem_used
[io_index
] = 0;
3664 static void io_mem_init(void)
3668 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3669 unassigned_mem_write
, NULL
,
3670 DEVICE_NATIVE_ENDIAN
);
3671 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3672 unassigned_mem_write
, NULL
,
3673 DEVICE_NATIVE_ENDIAN
);
3674 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3675 notdirty_mem_write
, NULL
,
3676 DEVICE_NATIVE_ENDIAN
);
3680 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3681 watch_mem_write
, NULL
,
3682 DEVICE_NATIVE_ENDIAN
);
3685 #endif /* !defined(CONFIG_USER_ONLY) */
3687 /* physical memory access (slow version, mainly for debug) */
3688 #if defined(CONFIG_USER_ONLY)
3689 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3690 uint8_t *buf
, int len
, int is_write
)
3697 page
= addr
& TARGET_PAGE_MASK
;
3698 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3701 flags
= page_get_flags(page
);
3702 if (!(flags
& PAGE_VALID
))
3705 if (!(flags
& PAGE_WRITE
))
3707 /* XXX: this code should not depend on lock_user */
3708 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3711 unlock_user(p
, addr
, l
);
3713 if (!(flags
& PAGE_READ
))
3715 /* XXX: this code should not depend on lock_user */
3716 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3719 unlock_user(p
, addr
, 0);
3729 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3730 int len
, int is_write
)
3735 target_phys_addr_t page
;
3740 page
= addr
& TARGET_PAGE_MASK
;
3741 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3744 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3746 pd
= IO_MEM_UNASSIGNED
;
3748 pd
= p
->phys_offset
;
3752 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3753 target_phys_addr_t addr1
= addr
;
3754 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3756 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3757 /* XXX: could force cpu_single_env to NULL to avoid
3759 if (l
>= 4 && ((addr1
& 3) == 0)) {
3760 /* 32 bit write access */
3762 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3764 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3765 /* 16 bit write access */
3767 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3770 /* 8 bit write access */
3772 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3776 unsigned long addr1
;
3777 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3779 ptr
= qemu_get_ram_ptr(addr1
);
3780 memcpy(ptr
, buf
, l
);
3781 if (!cpu_physical_memory_is_dirty(addr1
)) {
3782 /* invalidate code */
3783 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3785 cpu_physical_memory_set_dirty_flags(
3786 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3790 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3791 !(pd
& IO_MEM_ROMD
)) {
3792 target_phys_addr_t addr1
= addr
;
3794 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3796 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3797 if (l
>= 4 && ((addr1
& 3) == 0)) {
3798 /* 32 bit read access */
3799 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3802 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3803 /* 16 bit read access */
3804 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3808 /* 8 bit read access */
3809 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3815 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3816 (addr
& ~TARGET_PAGE_MASK
);
3817 memcpy(buf
, ptr
, l
);
3826 /* used for ROM loading : can write in RAM and ROM */
3827 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3828 const uint8_t *buf
, int len
)
3832 target_phys_addr_t page
;
3837 page
= addr
& TARGET_PAGE_MASK
;
3838 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3841 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3843 pd
= IO_MEM_UNASSIGNED
;
3845 pd
= p
->phys_offset
;
3848 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3849 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3850 !(pd
& IO_MEM_ROMD
)) {
3853 unsigned long addr1
;
3854 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3856 ptr
= qemu_get_ram_ptr(addr1
);
3857 memcpy(ptr
, buf
, l
);
3867 target_phys_addr_t addr
;
3868 target_phys_addr_t len
;
3871 static BounceBuffer bounce
;
3873 typedef struct MapClient
{
3875 void (*callback
)(void *opaque
);
3876 QLIST_ENTRY(MapClient
) link
;
3879 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3880 = QLIST_HEAD_INITIALIZER(map_client_list
);
3882 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3884 MapClient
*client
= qemu_malloc(sizeof(*client
));
3886 client
->opaque
= opaque
;
3887 client
->callback
= callback
;
3888 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3892 void cpu_unregister_map_client(void *_client
)
3894 MapClient
*client
= (MapClient
*)_client
;
3896 QLIST_REMOVE(client
, link
);
3900 static void cpu_notify_map_clients(void)
3904 while (!QLIST_EMPTY(&map_client_list
)) {
3905 client
= QLIST_FIRST(&map_client_list
);
3906 client
->callback(client
->opaque
);
3907 cpu_unregister_map_client(client
);
3911 /* Map a physical memory region into a host virtual address.
3912 * May map a subset of the requested range, given by and returned in *plen.
3913 * May return NULL if resources needed to perform the mapping are exhausted.
3914 * Use only for reads OR writes - not for read-modify-write operations.
3915 * Use cpu_register_map_client() to know when retrying the map operation is
3916 * likely to succeed.
3918 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3919 target_phys_addr_t
*plen
,
3922 target_phys_addr_t len
= *plen
;
3923 target_phys_addr_t done
= 0;
3925 uint8_t *ret
= NULL
;
3927 target_phys_addr_t page
;
3930 unsigned long addr1
;
3933 page
= addr
& TARGET_PAGE_MASK
;
3934 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3937 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3939 pd
= IO_MEM_UNASSIGNED
;
3941 pd
= p
->phys_offset
;
3944 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3945 if (done
|| bounce
.buffer
) {
3948 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3952 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3954 ptr
= bounce
.buffer
;
3956 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3957 ptr
= qemu_get_ram_ptr(addr1
);
3961 } else if (ret
+ done
!= ptr
) {
3973 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3974 * Will also mark the memory as dirty if is_write == 1. access_len gives
3975 * the amount of memory that was actually read or written by the caller.
3977 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3978 int is_write
, target_phys_addr_t access_len
)
3980 if (buffer
!= bounce
.buffer
) {
3982 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3983 while (access_len
) {
3985 l
= TARGET_PAGE_SIZE
;
3988 if (!cpu_physical_memory_is_dirty(addr1
)) {
3989 /* invalidate code */
3990 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3992 cpu_physical_memory_set_dirty_flags(
3993 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4002 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
4004 qemu_vfree(bounce
.buffer
);
4005 bounce
.buffer
= NULL
;
4006 cpu_notify_map_clients();
4009 /* warning: addr must be aligned */
4010 uint32_t ldl_phys(target_phys_addr_t addr
)
4018 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4020 pd
= IO_MEM_UNASSIGNED
;
4022 pd
= p
->phys_offset
;
4025 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4026 !(pd
& IO_MEM_ROMD
)) {
4028 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4030 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4031 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4034 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4035 (addr
& ~TARGET_PAGE_MASK
);
4041 /* warning: addr must be aligned */
4042 uint64_t ldq_phys(target_phys_addr_t addr
)
4050 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4052 pd
= IO_MEM_UNASSIGNED
;
4054 pd
= p
->phys_offset
;
4057 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4058 !(pd
& IO_MEM_ROMD
)) {
4060 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4062 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4063 #ifdef TARGET_WORDS_BIGENDIAN
4064 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4065 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4067 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4068 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4072 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4073 (addr
& ~TARGET_PAGE_MASK
);
4080 uint32_t ldub_phys(target_phys_addr_t addr
)
4083 cpu_physical_memory_read(addr
, &val
, 1);
4087 /* warning: addr must be aligned */
4088 uint32_t lduw_phys(target_phys_addr_t addr
)
4096 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4098 pd
= IO_MEM_UNASSIGNED
;
4100 pd
= p
->phys_offset
;
4103 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4104 !(pd
& IO_MEM_ROMD
)) {
4106 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4108 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4109 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4112 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4113 (addr
& ~TARGET_PAGE_MASK
);
4119 /* warning: addr must be aligned. The ram page is not masked as dirty
4120 and the code inside is not invalidated. It is useful if the dirty
4121 bits are used to track modified PTEs */
4122 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4129 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4131 pd
= IO_MEM_UNASSIGNED
;
4133 pd
= p
->phys_offset
;
4136 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4137 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4139 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4140 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4142 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4143 ptr
= qemu_get_ram_ptr(addr1
);
4146 if (unlikely(in_migration
)) {
4147 if (!cpu_physical_memory_is_dirty(addr1
)) {
4148 /* invalidate code */
4149 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4151 cpu_physical_memory_set_dirty_flags(
4152 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4158 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4165 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4167 pd
= IO_MEM_UNASSIGNED
;
4169 pd
= p
->phys_offset
;
4172 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4173 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4175 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4176 #ifdef TARGET_WORDS_BIGENDIAN
4177 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4178 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4180 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4181 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4184 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4185 (addr
& ~TARGET_PAGE_MASK
);
4190 /* warning: addr must be aligned */
4191 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4198 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4200 pd
= IO_MEM_UNASSIGNED
;
4202 pd
= p
->phys_offset
;
4205 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4206 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4208 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4209 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4211 unsigned long addr1
;
4212 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4214 ptr
= qemu_get_ram_ptr(addr1
);
4216 if (!cpu_physical_memory_is_dirty(addr1
)) {
4217 /* invalidate code */
4218 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4220 cpu_physical_memory_set_dirty_flags(addr1
,
4221 (0xff & ~CODE_DIRTY_FLAG
));
4227 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4230 cpu_physical_memory_write(addr
, &v
, 1);
4233 /* warning: addr must be aligned */
4234 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4241 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4243 pd
= IO_MEM_UNASSIGNED
;
4245 pd
= p
->phys_offset
;
4248 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4249 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4251 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4252 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4254 unsigned long addr1
;
4255 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4257 ptr
= qemu_get_ram_ptr(addr1
);
4259 if (!cpu_physical_memory_is_dirty(addr1
)) {
4260 /* invalidate code */
4261 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4263 cpu_physical_memory_set_dirty_flags(addr1
,
4264 (0xff & ~CODE_DIRTY_FLAG
));
4270 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4273 cpu_physical_memory_write(addr
, &val
, 8);
4276 /* virtual memory access for debug (includes writing to ROM) */
4277 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4278 uint8_t *buf
, int len
, int is_write
)
4281 target_phys_addr_t phys_addr
;
4285 page
= addr
& TARGET_PAGE_MASK
;
4286 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4287 /* if no physical page mapped, return an error */
4288 if (phys_addr
== -1)
4290 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4293 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4295 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4297 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4306 /* in deterministic execution mode, instructions doing device I/Os
4307 must be at the end of the TB */
4308 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4310 TranslationBlock
*tb
;
4312 target_ulong pc
, cs_base
;
4315 tb
= tb_find_pc((unsigned long)retaddr
);
4317 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4320 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4321 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4322 /* Calculate how many instructions had been executed before the fault
4324 n
= n
- env
->icount_decr
.u16
.low
;
4325 /* Generate a new TB ending on the I/O insn. */
4327 /* On MIPS and SH, delay slot instructions can only be restarted if
4328 they were already the first instruction in the TB. If this is not
4329 the first instruction in a TB then re-execute the preceding
4331 #if defined(TARGET_MIPS)
4332 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4333 env
->active_tc
.PC
-= 4;
4334 env
->icount_decr
.u16
.low
++;
4335 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4337 #elif defined(TARGET_SH4)
4338 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4341 env
->icount_decr
.u16
.low
++;
4342 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4345 /* This should never happen. */
4346 if (n
> CF_COUNT_MASK
)
4347 cpu_abort(env
, "TB too big during recompile");
4349 cflags
= n
| CF_LAST_IO
;
4351 cs_base
= tb
->cs_base
;
4353 tb_phys_invalidate(tb
, -1);
4354 /* FIXME: In theory this could raise an exception. In practice
4355 we have already translated the block once so it's probably ok. */
4356 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4357 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4358 the first in the TB) then we end up generating a whole new TB and
4359 repeating the fault, which is horribly inefficient.
4360 Better would be to execute just this insn uncached, or generate a
4362 cpu_resume_from_signal(env
, NULL
);
4365 #if !defined(CONFIG_USER_ONLY)
4367 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4369 int i
, target_code_size
, max_target_code_size
;
4370 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4371 TranslationBlock
*tb
;
4373 target_code_size
= 0;
4374 max_target_code_size
= 0;
4376 direct_jmp_count
= 0;
4377 direct_jmp2_count
= 0;
4378 for(i
= 0; i
< nb_tbs
; i
++) {
4380 target_code_size
+= tb
->size
;
4381 if (tb
->size
> max_target_code_size
)
4382 max_target_code_size
= tb
->size
;
4383 if (tb
->page_addr
[1] != -1)
4385 if (tb
->tb_next_offset
[0] != 0xffff) {
4387 if (tb
->tb_next_offset
[1] != 0xffff) {
4388 direct_jmp2_count
++;
4392 /* XXX: avoid using doubles ? */
4393 cpu_fprintf(f
, "Translation buffer state:\n");
4394 cpu_fprintf(f
, "gen code size %td/%ld\n",
4395 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4396 cpu_fprintf(f
, "TB count %d/%d\n",
4397 nb_tbs
, code_gen_max_blocks
);
4398 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4399 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4400 max_target_code_size
);
4401 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4402 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4403 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4404 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4406 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4407 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4409 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4411 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4412 cpu_fprintf(f
, "\nStatistics:\n");
4413 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4414 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4415 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4416 tcg_dump_info(f
, cpu_fprintf
);
4419 #define MMUSUFFIX _cmmu
4420 #define GETPC() NULL
4421 #define env cpu_single_env
4422 #define SOFTMMU_CODE_ACCESS
4425 #include "softmmu_template.h"
4428 #include "softmmu_template.h"
4431 #include "softmmu_template.h"
4434 #include "softmmu_template.h"