2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
37 #include "cache-utils.h"
39 #if !defined(TARGET_IA64)
48 #include "qemu-timer.h"
49 #if defined(CONFIG_USER_ONLY)
52 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
53 #include <sys/param.h>
54 #if __FreeBSD_version >= 700104
55 #define HAVE_KINFO_GETVMMAP
56 #define sigqueue sigqueue_freebsd /* avoid redefinition */
59 #include <machine/profile.h>
69 //#define DEBUG_TB_INVALIDATE
72 //#define DEBUG_UNASSIGNED
74 /* make various TB consistency checks */
75 //#define DEBUG_TB_CHECK
76 //#define DEBUG_TLB_CHECK
78 //#define DEBUG_IOPORT
79 //#define DEBUG_SUBPAGE
81 #if !defined(CONFIG_USER_ONLY)
82 /* TB consistency checks only implemented for usermode emulation. */
86 #define SMC_BITMAP_USE_THRESHOLD 10
88 static TranslationBlock
*tbs
;
89 static int code_gen_max_blocks
;
90 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
92 /* any access to the tbs or the page table must use this lock */
93 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
95 #if defined(__arm__) || defined(__sparc_v9__)
96 /* The prologue must be reachable with a direct jump. ARM and Sparc64
97 have limited branch ranges (possibly also PPC) so place it in a
98 section close to code segment. */
99 #define code_gen_section \
100 __attribute__((__section__(".gen_code"))) \
101 __attribute__((aligned (32)))
102 #elif defined(_WIN32)
103 /* Maximum alignment for Win32 is 16. */
104 #define code_gen_section \
105 __attribute__((aligned (16)))
107 #define code_gen_section \
108 __attribute__((aligned (32)))
111 uint8_t code_gen_prologue
[1024] code_gen_section
;
112 static uint8_t *code_gen_buffer
;
113 static unsigned long code_gen_buffer_size
;
114 /* threshold to flush the translated code buffer */
115 static unsigned long code_gen_buffer_max_size
;
116 static uint8_t *code_gen_ptr
;
118 #if !defined(CONFIG_USER_ONLY)
120 static int in_migration
;
122 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
) };
126 /* current CPU in the current thread. It is only valid inside
128 CPUState
*cpu_single_env
;
129 /* 0 = Do not count executed instructions.
130 1 = Precise instruction counting.
131 2 = Adaptive rate instruction counting. */
133 /* Current instruction counter. While executing translated code this may
134 include some instructions that have not yet been executed. */
137 typedef struct PageDesc
{
138 /* list of TBs intersecting this ram page */
139 TranslationBlock
*first_tb
;
140 /* in order to optimize self modifying code, we count the number
141 of lookups we do to a given page to use a bitmap */
142 unsigned int code_write_count
;
143 uint8_t *code_bitmap
;
144 #if defined(CONFIG_USER_ONLY)
149 /* In system mode we want L1_MAP to be based on ram offsets,
150 while in user mode we want it to be based on virtual addresses. */
151 #if !defined(CONFIG_USER_ONLY)
152 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
153 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
155 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
158 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
161 /* Size of the L2 (and L3, etc) page tables. */
163 #define L2_SIZE (1 << L2_BITS)
165 /* The bits remaining after N lower levels of page tables. */
166 #define P_L1_BITS_REM \
167 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168 #define V_L1_BITS_REM \
169 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
171 /* Size of the L1 page table. Avoid silly small sizes. */
172 #if P_L1_BITS_REM < 4
173 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
175 #define P_L1_BITS P_L1_BITS_REM
178 #if V_L1_BITS_REM < 4
179 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
181 #define V_L1_BITS V_L1_BITS_REM
184 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
185 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
187 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
188 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
190 unsigned long qemu_real_host_page_size
;
191 unsigned long qemu_host_page_bits
;
192 unsigned long qemu_host_page_size
;
193 unsigned long qemu_host_page_mask
;
195 /* This is a multi-level map on the virtual address space.
196 The bottom level has pointers to PageDesc. */
197 static void *l1_map
[V_L1_SIZE
];
199 #if !defined(CONFIG_USER_ONLY)
200 typedef struct PhysPageDesc
{
201 /* offset in host memory of the page + io_index in the low bits */
202 ram_addr_t phys_offset
;
203 ram_addr_t region_offset
;
206 /* This is a multi-level map on the physical address space.
207 The bottom level has pointers to PhysPageDesc. */
208 static void *l1_phys_map
[P_L1_SIZE
];
210 static void io_mem_init(void);
212 /* io memory support */
213 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
214 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
215 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
216 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
217 static int io_mem_watch
;
222 static const char *logfilename
= "qemu.log";
224 static const char *logfilename
= "/tmp/qemu.log";
228 static int log_append
= 0;
231 #if !defined(CONFIG_USER_ONLY)
232 static int tlb_flush_count
;
234 static int tb_flush_count
;
235 static int tb_phys_invalidate_count
;
238 static void map_exec(void *addr
, long size
)
241 VirtualProtect(addr
, size
,
242 PAGE_EXECUTE_READWRITE
, &old_protect
);
246 static void map_exec(void *addr
, long size
)
248 unsigned long start
, end
, page_size
;
250 page_size
= getpagesize();
251 start
= (unsigned long)addr
;
252 start
&= ~(page_size
- 1);
254 end
= (unsigned long)addr
+ size
;
255 end
+= page_size
- 1;
256 end
&= ~(page_size
- 1);
258 mprotect((void *)start
, end
- start
,
259 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
263 static void page_init(void)
265 /* NOTE: we can always suppose that qemu_host_page_size >=
269 SYSTEM_INFO system_info
;
271 GetSystemInfo(&system_info
);
272 qemu_real_host_page_size
= system_info
.dwPageSize
;
275 qemu_real_host_page_size
= getpagesize();
277 if (qemu_host_page_size
== 0)
278 qemu_host_page_size
= qemu_real_host_page_size
;
279 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
280 qemu_host_page_size
= TARGET_PAGE_SIZE
;
281 qemu_host_page_bits
= 0;
282 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
283 qemu_host_page_bits
++;
284 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
286 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
288 #ifdef HAVE_KINFO_GETVMMAP
289 struct kinfo_vmentry
*freep
;
292 freep
= kinfo_getvmmap(getpid(), &cnt
);
295 for (i
= 0; i
< cnt
; i
++) {
296 unsigned long startaddr
, endaddr
;
298 startaddr
= freep
[i
].kve_start
;
299 endaddr
= freep
[i
].kve_end
;
300 if (h2g_valid(startaddr
)) {
301 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
303 if (h2g_valid(endaddr
)) {
304 endaddr
= h2g(endaddr
);
305 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
307 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
309 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
320 last_brk
= (unsigned long)sbrk(0);
322 f
= fopen("/compat/linux/proc/self/maps", "r");
327 unsigned long startaddr
, endaddr
;
330 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
332 if (n
== 2 && h2g_valid(startaddr
)) {
333 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
335 if (h2g_valid(endaddr
)) {
336 endaddr
= h2g(endaddr
);
340 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
352 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
358 #if defined(CONFIG_USER_ONLY)
359 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
360 # define ALLOC(P, SIZE) \
362 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
363 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
366 # define ALLOC(P, SIZE) \
367 do { P = qemu_mallocz(SIZE); } while (0)
370 /* Level 1. Always allocated. */
371 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
374 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
381 ALLOC(p
, sizeof(void *) * L2_SIZE
);
385 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
393 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
399 return pd
+ (index
& (L2_SIZE
- 1));
402 static inline PageDesc
*page_find(tb_page_addr_t index
)
404 return page_find_alloc(index
, 0);
407 #if !defined(CONFIG_USER_ONLY)
408 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
414 /* Level 1. Always allocated. */
415 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
418 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
424 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
426 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
437 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
439 for (i
= 0; i
< L2_SIZE
; i
++) {
440 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
441 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
445 return pd
+ (index
& (L2_SIZE
- 1));
448 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
450 return phys_page_find_alloc(index
, 0);
453 static void tlb_protect_code(ram_addr_t ram_addr
);
454 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
456 #define mmap_lock() do { } while(0)
457 #define mmap_unlock() do { } while(0)
460 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
462 #if defined(CONFIG_USER_ONLY)
463 /* Currently it is not recommended to allocate big chunks of data in
464 user mode. It will change when a dedicated libc will be used */
465 #define USE_STATIC_CODE_GEN_BUFFER
468 #ifdef USE_STATIC_CODE_GEN_BUFFER
469 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
470 __attribute__((aligned (CODE_GEN_ALIGN
)));
473 static void code_gen_alloc(unsigned long tb_size
)
478 #ifdef USE_STATIC_CODE_GEN_BUFFER
479 code_gen_buffer
= static_code_gen_buffer
;
480 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
481 map_exec(code_gen_buffer
, code_gen_buffer_size
);
483 code_gen_buffer_size
= tb_size
;
484 if (code_gen_buffer_size
== 0) {
485 #if defined(CONFIG_USER_ONLY)
486 /* in user mode, phys_ram_size is not meaningful */
487 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
489 /* XXX: needs adjustments */
490 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
493 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
494 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
495 /* The code gen buffer location may have constraints depending on
496 the host cpu and OS */
497 #if defined(__linux__)
502 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
503 #if defined(__x86_64__)
505 /* Cannot map more than that */
506 if (code_gen_buffer_size
> (800 * 1024 * 1024))
507 code_gen_buffer_size
= (800 * 1024 * 1024);
508 #elif defined(__sparc_v9__)
509 // Map the buffer below 2G, so we can use direct calls and branches
511 start
= (void *) 0x60000000UL
;
512 if (code_gen_buffer_size
> (512 * 1024 * 1024))
513 code_gen_buffer_size
= (512 * 1024 * 1024);
514 #elif defined(__arm__)
515 /* Map the buffer below 32M, so we can use direct calls and branches */
517 start
= (void *) 0x01000000UL
;
518 if (code_gen_buffer_size
> 16 * 1024 * 1024)
519 code_gen_buffer_size
= 16 * 1024 * 1024;
520 #elif defined(__s390x__)
521 /* Map the buffer so that we can use direct calls and branches. */
522 /* We have a +- 4GB range on the branches; leave some slop. */
523 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
524 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
526 start
= (void *)0x90000000UL
;
528 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
529 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
531 if (code_gen_buffer
== MAP_FAILED
) {
532 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
536 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
540 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
541 #if defined(__x86_64__)
542 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
543 * 0x40000000 is free */
545 addr
= (void *)0x40000000;
546 /* Cannot map more than that */
547 if (code_gen_buffer_size
> (800 * 1024 * 1024))
548 code_gen_buffer_size
= (800 * 1024 * 1024);
550 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
551 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
553 if (code_gen_buffer
== MAP_FAILED
) {
554 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
559 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
560 map_exec(code_gen_buffer
, code_gen_buffer_size
);
562 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
563 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
564 code_gen_buffer_max_size
= code_gen_buffer_size
-
565 (TCG_MAX_OP_SIZE
* OPC_MAX_SIZE
);
566 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
567 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
570 /* Must be called before using the QEMU cpus. 'tb_size' is the size
571 (in bytes) allocated to the translation buffer. Zero means default
573 void cpu_exec_init_all(unsigned long tb_size
)
576 code_gen_alloc(tb_size
);
577 code_gen_ptr
= code_gen_buffer
;
579 #if !defined(CONFIG_USER_ONLY)
582 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
583 /* There's no guest base to take into account, so go ahead and
584 initialize the prologue now. */
585 tcg_prologue_init(&tcg_ctx
);
589 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
591 static int cpu_common_post_load(void *opaque
, int version_id
)
593 CPUState
*env
= opaque
;
595 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
596 version_id is increased. */
597 env
->interrupt_request
&= ~0x01;
603 static const VMStateDescription vmstate_cpu_common
= {
604 .name
= "cpu_common",
606 .minimum_version_id
= 1,
607 .minimum_version_id_old
= 1,
608 .post_load
= cpu_common_post_load
,
609 .fields
= (VMStateField
[]) {
610 VMSTATE_UINT32(halted
, CPUState
),
611 VMSTATE_UINT32(interrupt_request
, CPUState
),
612 VMSTATE_END_OF_LIST()
617 CPUState
*qemu_get_cpu(int cpu
)
619 CPUState
*env
= first_cpu
;
622 if (env
->cpu_index
== cpu
)
630 void cpu_exec_init(CPUState
*env
)
635 #if defined(CONFIG_USER_ONLY)
638 env
->next_cpu
= NULL
;
641 while (*penv
!= NULL
) {
642 penv
= &(*penv
)->next_cpu
;
645 env
->cpu_index
= cpu_index
;
647 QTAILQ_INIT(&env
->breakpoints
);
648 QTAILQ_INIT(&env
->watchpoints
);
650 env
->thread_id
= GetCurrentProcessId();
652 env
->thread_id
= getpid();
655 #if defined(CONFIG_USER_ONLY)
658 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
659 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
660 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
661 cpu_save
, cpu_load
, env
);
665 static inline void invalidate_page_bitmap(PageDesc
*p
)
667 if (p
->code_bitmap
) {
668 qemu_free(p
->code_bitmap
);
669 p
->code_bitmap
= NULL
;
671 p
->code_write_count
= 0;
674 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
676 static void page_flush_tb_1 (int level
, void **lp
)
685 for (i
= 0; i
< L2_SIZE
; ++i
) {
686 pd
[i
].first_tb
= NULL
;
687 invalidate_page_bitmap(pd
+ i
);
691 for (i
= 0; i
< L2_SIZE
; ++i
) {
692 page_flush_tb_1 (level
- 1, pp
+ i
);
697 static void page_flush_tb(void)
700 for (i
= 0; i
< V_L1_SIZE
; i
++) {
701 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
705 /* flush all the translation blocks */
706 /* XXX: tb_flush is currently not thread safe */
707 void tb_flush(CPUState
*env1
)
710 #if defined(DEBUG_FLUSH)
711 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
712 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
714 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
716 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
717 cpu_abort(env1
, "Internal error: code buffer overflow\n");
721 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
722 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
725 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
728 code_gen_ptr
= code_gen_buffer
;
729 /* XXX: flush processor icache at this point if cache flush is
734 #ifdef DEBUG_TB_CHECK
736 static void tb_invalidate_check(target_ulong address
)
738 TranslationBlock
*tb
;
740 address
&= TARGET_PAGE_MASK
;
741 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
742 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
743 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
744 address
>= tb
->pc
+ tb
->size
)) {
745 printf("ERROR invalidate: address=" TARGET_FMT_lx
746 " PC=%08lx size=%04x\n",
747 address
, (long)tb
->pc
, tb
->size
);
753 /* verify that all the pages have correct rights for code */
754 static void tb_page_check(void)
756 TranslationBlock
*tb
;
757 int i
, flags1
, flags2
;
759 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
760 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
761 flags1
= page_get_flags(tb
->pc
);
762 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
763 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
764 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
765 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
773 /* invalidate one TB */
774 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
777 TranslationBlock
*tb1
;
781 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
784 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
788 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
790 TranslationBlock
*tb1
;
796 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
798 *ptb
= tb1
->page_next
[n1
];
801 ptb
= &tb1
->page_next
[n1
];
805 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
807 TranslationBlock
*tb1
, **ptb
;
810 ptb
= &tb
->jmp_next
[n
];
813 /* find tb(n) in circular list */
817 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
818 if (n1
== n
&& tb1
== tb
)
821 ptb
= &tb1
->jmp_first
;
823 ptb
= &tb1
->jmp_next
[n1
];
826 /* now we can suppress tb(n) from the list */
827 *ptb
= tb
->jmp_next
[n
];
829 tb
->jmp_next
[n
] = NULL
;
833 /* reset the jump entry 'n' of a TB so that it is not chained to
835 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
837 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
840 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
845 tb_page_addr_t phys_pc
;
846 TranslationBlock
*tb1
, *tb2
;
848 /* remove the TB from the hash list */
849 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
850 h
= tb_phys_hash_func(phys_pc
);
851 tb_remove(&tb_phys_hash
[h
], tb
,
852 offsetof(TranslationBlock
, phys_hash_next
));
854 /* remove the TB from the page list */
855 if (tb
->page_addr
[0] != page_addr
) {
856 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
857 tb_page_remove(&p
->first_tb
, tb
);
858 invalidate_page_bitmap(p
);
860 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
861 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
862 tb_page_remove(&p
->first_tb
, tb
);
863 invalidate_page_bitmap(p
);
866 tb_invalidated_flag
= 1;
868 /* remove the TB from the hash list */
869 h
= tb_jmp_cache_hash_func(tb
->pc
);
870 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
871 if (env
->tb_jmp_cache
[h
] == tb
)
872 env
->tb_jmp_cache
[h
] = NULL
;
875 /* suppress this TB from the two jump lists */
876 tb_jmp_remove(tb
, 0);
877 tb_jmp_remove(tb
, 1);
879 /* suppress any remaining jumps to this TB */
885 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
886 tb2
= tb1
->jmp_next
[n1
];
887 tb_reset_jump(tb1
, n1
);
888 tb1
->jmp_next
[n1
] = NULL
;
891 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
893 tb_phys_invalidate_count
++;
896 static inline void set_bits(uint8_t *tab
, int start
, int len
)
902 mask
= 0xff << (start
& 7);
903 if ((start
& ~7) == (end
& ~7)) {
905 mask
&= ~(0xff << (end
& 7));
910 start
= (start
+ 8) & ~7;
912 while (start
< end1
) {
917 mask
= ~(0xff << (end
& 7));
923 static void build_page_bitmap(PageDesc
*p
)
925 int n
, tb_start
, tb_end
;
926 TranslationBlock
*tb
;
928 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
933 tb
= (TranslationBlock
*)((long)tb
& ~3);
934 /* NOTE: this is subtle as a TB may span two physical pages */
936 /* NOTE: tb_end may be after the end of the page, but
937 it is not a problem */
938 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
939 tb_end
= tb_start
+ tb
->size
;
940 if (tb_end
> TARGET_PAGE_SIZE
)
941 tb_end
= TARGET_PAGE_SIZE
;
944 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
946 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
947 tb
= tb
->page_next
[n
];
951 TranslationBlock
*tb_gen_code(CPUState
*env
,
952 target_ulong pc
, target_ulong cs_base
,
953 int flags
, int cflags
)
955 TranslationBlock
*tb
;
957 tb_page_addr_t phys_pc
, phys_page2
;
958 target_ulong virt_page2
;
961 phys_pc
= get_page_addr_code(env
, pc
);
964 /* flush must be done */
966 /* cannot fail at this point */
968 /* Don't forget to invalidate previous TB info. */
969 tb_invalidated_flag
= 1;
971 tc_ptr
= code_gen_ptr
;
973 tb
->cs_base
= cs_base
;
976 cpu_gen_code(env
, tb
, &code_gen_size
);
977 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
979 /* check next page if needed */
980 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
982 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
983 phys_page2
= get_page_addr_code(env
, virt_page2
);
985 tb_link_page(tb
, phys_pc
, phys_page2
);
989 /* invalidate all TBs which intersect with the target physical page
990 starting in range [start;end[. NOTE: start and end must refer to
991 the same physical page. 'is_cpu_write_access' should be true if called
992 from a real cpu write access: the virtual CPU will exit the current
993 TB if code is modified inside this TB. */
994 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
995 int is_cpu_write_access
)
997 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
998 CPUState
*env
= cpu_single_env
;
999 tb_page_addr_t tb_start
, tb_end
;
1002 #ifdef TARGET_HAS_PRECISE_SMC
1003 int current_tb_not_found
= is_cpu_write_access
;
1004 TranslationBlock
*current_tb
= NULL
;
1005 int current_tb_modified
= 0;
1006 target_ulong current_pc
= 0;
1007 target_ulong current_cs_base
= 0;
1008 int current_flags
= 0;
1009 #endif /* TARGET_HAS_PRECISE_SMC */
1011 p
= page_find(start
>> TARGET_PAGE_BITS
);
1014 if (!p
->code_bitmap
&&
1015 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1016 is_cpu_write_access
) {
1017 /* build code bitmap */
1018 build_page_bitmap(p
);
1021 /* we remove all the TBs in the range [start, end[ */
1022 /* XXX: see if in some cases it could be faster to invalidate all the code */
1024 while (tb
!= NULL
) {
1026 tb
= (TranslationBlock
*)((long)tb
& ~3);
1027 tb_next
= tb
->page_next
[n
];
1028 /* NOTE: this is subtle as a TB may span two physical pages */
1030 /* NOTE: tb_end may be after the end of the page, but
1031 it is not a problem */
1032 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1033 tb_end
= tb_start
+ tb
->size
;
1035 tb_start
= tb
->page_addr
[1];
1036 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1038 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1039 #ifdef TARGET_HAS_PRECISE_SMC
1040 if (current_tb_not_found
) {
1041 current_tb_not_found
= 0;
1043 if (env
->mem_io_pc
) {
1044 /* now we have a real cpu fault */
1045 current_tb
= tb_find_pc(env
->mem_io_pc
);
1048 if (current_tb
== tb
&&
1049 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1050 /* If we are modifying the current TB, we must stop
1051 its execution. We could be more precise by checking
1052 that the modification is after the current PC, but it
1053 would require a specialized function to partially
1054 restore the CPU state */
1056 current_tb_modified
= 1;
1057 cpu_restore_state(current_tb
, env
,
1058 env
->mem_io_pc
, NULL
);
1059 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1062 #endif /* TARGET_HAS_PRECISE_SMC */
1063 /* we need to do that to handle the case where a signal
1064 occurs while doing tb_phys_invalidate() */
1067 saved_tb
= env
->current_tb
;
1068 env
->current_tb
= NULL
;
1070 tb_phys_invalidate(tb
, -1);
1072 env
->current_tb
= saved_tb
;
1073 if (env
->interrupt_request
&& env
->current_tb
)
1074 cpu_interrupt(env
, env
->interrupt_request
);
1079 #if !defined(CONFIG_USER_ONLY)
1080 /* if no code remaining, no need to continue to use slow writes */
1082 invalidate_page_bitmap(p
);
1083 if (is_cpu_write_access
) {
1084 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1088 #ifdef TARGET_HAS_PRECISE_SMC
1089 if (current_tb_modified
) {
1090 /* we generate a block containing just the instruction
1091 modifying the memory. It will ensure that it cannot modify
1093 env
->current_tb
= NULL
;
1094 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1095 cpu_resume_from_signal(env
, NULL
);
1100 /* len must be <= 8 and start must be a multiple of len */
1101 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1107 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1108 cpu_single_env
->mem_io_vaddr
, len
,
1109 cpu_single_env
->eip
,
1110 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1113 p
= page_find(start
>> TARGET_PAGE_BITS
);
1116 if (p
->code_bitmap
) {
1117 offset
= start
& ~TARGET_PAGE_MASK
;
1118 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1119 if (b
& ((1 << len
) - 1))
1123 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1127 #if !defined(CONFIG_SOFTMMU)
1128 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1129 unsigned long pc
, void *puc
)
1131 TranslationBlock
*tb
;
1134 #ifdef TARGET_HAS_PRECISE_SMC
1135 TranslationBlock
*current_tb
= NULL
;
1136 CPUState
*env
= cpu_single_env
;
1137 int current_tb_modified
= 0;
1138 target_ulong current_pc
= 0;
1139 target_ulong current_cs_base
= 0;
1140 int current_flags
= 0;
1143 addr
&= TARGET_PAGE_MASK
;
1144 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1148 #ifdef TARGET_HAS_PRECISE_SMC
1149 if (tb
&& pc
!= 0) {
1150 current_tb
= tb_find_pc(pc
);
1153 while (tb
!= NULL
) {
1155 tb
= (TranslationBlock
*)((long)tb
& ~3);
1156 #ifdef TARGET_HAS_PRECISE_SMC
1157 if (current_tb
== tb
&&
1158 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1159 /* If we are modifying the current TB, we must stop
1160 its execution. We could be more precise by checking
1161 that the modification is after the current PC, but it
1162 would require a specialized function to partially
1163 restore the CPU state */
1165 current_tb_modified
= 1;
1166 cpu_restore_state(current_tb
, env
, pc
, puc
);
1167 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1170 #endif /* TARGET_HAS_PRECISE_SMC */
1171 tb_phys_invalidate(tb
, addr
);
1172 tb
= tb
->page_next
[n
];
1175 #ifdef TARGET_HAS_PRECISE_SMC
1176 if (current_tb_modified
) {
1177 /* we generate a block containing just the instruction
1178 modifying the memory. It will ensure that it cannot modify
1180 env
->current_tb
= NULL
;
1181 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1182 cpu_resume_from_signal(env
, puc
);
1188 /* add the tb in the target page and protect it if necessary */
1189 static inline void tb_alloc_page(TranslationBlock
*tb
,
1190 unsigned int n
, tb_page_addr_t page_addr
)
1193 TranslationBlock
*last_first_tb
;
1195 tb
->page_addr
[n
] = page_addr
;
1196 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1197 tb
->page_next
[n
] = p
->first_tb
;
1198 last_first_tb
= p
->first_tb
;
1199 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1200 invalidate_page_bitmap(p
);
1202 #if defined(TARGET_HAS_SMC) || 1
1204 #if defined(CONFIG_USER_ONLY)
1205 if (p
->flags
& PAGE_WRITE
) {
1210 /* force the host page as non writable (writes will have a
1211 page fault + mprotect overhead) */
1212 page_addr
&= qemu_host_page_mask
;
1214 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1215 addr
+= TARGET_PAGE_SIZE
) {
1217 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1221 p2
->flags
&= ~PAGE_WRITE
;
1223 mprotect(g2h(page_addr
), qemu_host_page_size
,
1224 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1225 #ifdef DEBUG_TB_INVALIDATE
1226 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1231 /* if some code is already present, then the pages are already
1232 protected. So we handle the case where only the first TB is
1233 allocated in a physical page */
1234 if (!last_first_tb
) {
1235 tlb_protect_code(page_addr
);
1239 #endif /* TARGET_HAS_SMC */
1242 /* Allocate a new translation block. Flush the translation buffer if
1243 too many translation blocks or too much generated code. */
1244 TranslationBlock
*tb_alloc(target_ulong pc
)
1246 TranslationBlock
*tb
;
1248 if (nb_tbs
>= code_gen_max_blocks
||
1249 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1251 tb
= &tbs
[nb_tbs
++];
1257 void tb_free(TranslationBlock
*tb
)
1259 /* In practice this is mostly used for single use temporary TB
1260 Ignore the hard cases and just back up if this TB happens to
1261 be the last one generated. */
1262 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1263 code_gen_ptr
= tb
->tc_ptr
;
1268 /* add a new TB and link it to the physical page tables. phys_page2 is
1269 (-1) to indicate that only one page contains the TB. */
1270 void tb_link_page(TranslationBlock
*tb
,
1271 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1274 TranslationBlock
**ptb
;
1276 /* Grab the mmap lock to stop another thread invalidating this TB
1277 before we are done. */
1279 /* add in the physical hash table */
1280 h
= tb_phys_hash_func(phys_pc
);
1281 ptb
= &tb_phys_hash
[h
];
1282 tb
->phys_hash_next
= *ptb
;
1285 /* add in the page list */
1286 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1287 if (phys_page2
!= -1)
1288 tb_alloc_page(tb
, 1, phys_page2
);
1290 tb
->page_addr
[1] = -1;
1292 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1293 tb
->jmp_next
[0] = NULL
;
1294 tb
->jmp_next
[1] = NULL
;
1296 /* init original jump addresses */
1297 if (tb
->tb_next_offset
[0] != 0xffff)
1298 tb_reset_jump(tb
, 0);
1299 if (tb
->tb_next_offset
[1] != 0xffff)
1300 tb_reset_jump(tb
, 1);
1302 #ifdef DEBUG_TB_CHECK
1308 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1309 tb[1].tc_ptr. Return NULL if not found */
1310 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1312 int m_min
, m_max
, m
;
1314 TranslationBlock
*tb
;
1318 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1319 tc_ptr
>= (unsigned long)code_gen_ptr
)
1321 /* binary search (cf Knuth) */
1324 while (m_min
<= m_max
) {
1325 m
= (m_min
+ m_max
) >> 1;
1327 v
= (unsigned long)tb
->tc_ptr
;
1330 else if (tc_ptr
< v
) {
1339 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1341 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1343 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1346 tb1
= tb
->jmp_next
[n
];
1348 /* find head of list */
1351 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1354 tb1
= tb1
->jmp_next
[n1
];
1356 /* we are now sure now that tb jumps to tb1 */
1359 /* remove tb from the jmp_first list */
1360 ptb
= &tb_next
->jmp_first
;
1364 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1365 if (n1
== n
&& tb1
== tb
)
1367 ptb
= &tb1
->jmp_next
[n1
];
1369 *ptb
= tb
->jmp_next
[n
];
1370 tb
->jmp_next
[n
] = NULL
;
1372 /* suppress the jump to next tb in generated code */
1373 tb_reset_jump(tb
, n
);
1375 /* suppress jumps in the tb on which we could have jumped */
1376 tb_reset_jump_recursive(tb_next
);
1380 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1382 tb_reset_jump_recursive2(tb
, 0);
1383 tb_reset_jump_recursive2(tb
, 1);
1386 #if defined(TARGET_HAS_ICE)
1387 #if defined(CONFIG_USER_ONLY)
1388 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1390 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1393 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1395 target_phys_addr_t addr
;
1397 ram_addr_t ram_addr
;
1400 addr
= cpu_get_phys_page_debug(env
, pc
);
1401 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1403 pd
= IO_MEM_UNASSIGNED
;
1405 pd
= p
->phys_offset
;
1407 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1408 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1411 #endif /* TARGET_HAS_ICE */
1413 #if defined(CONFIG_USER_ONLY)
1414 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1419 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1420 int flags
, CPUWatchpoint
**watchpoint
)
1425 /* Add a watchpoint. */
1426 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1427 int flags
, CPUWatchpoint
**watchpoint
)
1429 target_ulong len_mask
= ~(len
- 1);
1432 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1433 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1434 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1435 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1438 wp
= qemu_malloc(sizeof(*wp
));
1441 wp
->len_mask
= len_mask
;
1444 /* keep all GDB-injected watchpoints in front */
1446 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1448 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1450 tlb_flush_page(env
, addr
);
1457 /* Remove a specific watchpoint. */
1458 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1461 target_ulong len_mask
= ~(len
- 1);
1464 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1465 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1466 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1467 cpu_watchpoint_remove_by_ref(env
, wp
);
1474 /* Remove a specific watchpoint by reference. */
1475 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1477 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1479 tlb_flush_page(env
, watchpoint
->vaddr
);
1481 qemu_free(watchpoint
);
1484 /* Remove all matching watchpoints. */
1485 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1487 CPUWatchpoint
*wp
, *next
;
1489 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1490 if (wp
->flags
& mask
)
1491 cpu_watchpoint_remove_by_ref(env
, wp
);
1496 /* Add a breakpoint. */
1497 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1498 CPUBreakpoint
**breakpoint
)
1500 #if defined(TARGET_HAS_ICE)
1503 bp
= qemu_malloc(sizeof(*bp
));
1508 /* keep all GDB-injected breakpoints in front */
1510 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1512 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1514 breakpoint_invalidate(env
, pc
);
1524 /* Remove a specific breakpoint. */
1525 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1527 #if defined(TARGET_HAS_ICE)
1530 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1531 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1532 cpu_breakpoint_remove_by_ref(env
, bp
);
1542 /* Remove a specific breakpoint by reference. */
1543 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1545 #if defined(TARGET_HAS_ICE)
1546 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1548 breakpoint_invalidate(env
, breakpoint
->pc
);
1550 qemu_free(breakpoint
);
1554 /* Remove all matching breakpoints. */
1555 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1557 #if defined(TARGET_HAS_ICE)
1558 CPUBreakpoint
*bp
, *next
;
1560 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1561 if (bp
->flags
& mask
)
1562 cpu_breakpoint_remove_by_ref(env
, bp
);
1567 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1568 CPU loop after each instruction */
1569 void cpu_single_step(CPUState
*env
, int enabled
)
1571 #if defined(TARGET_HAS_ICE)
1572 if (env
->singlestep_enabled
!= enabled
) {
1573 env
->singlestep_enabled
= enabled
;
1575 kvm_update_guest_debug(env
, 0);
1577 /* must flush all the translated code to avoid inconsistencies */
1578 /* XXX: only flush what is necessary */
1585 /* enable or disable low levels log */
1586 void cpu_set_log(int log_flags
)
1588 loglevel
= log_flags
;
1589 if (loglevel
&& !logfile
) {
1590 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1592 perror(logfilename
);
1595 #if !defined(CONFIG_SOFTMMU)
1596 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1598 static char logfile_buf
[4096];
1599 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1601 #elif !defined(_WIN32)
1602 /* Win32 doesn't support line-buffering and requires size >= 2 */
1603 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1607 if (!loglevel
&& logfile
) {
1613 void cpu_set_log_filename(const char *filename
)
1615 logfilename
= strdup(filename
);
1620 cpu_set_log(loglevel
);
1623 static void cpu_unlink_tb(CPUState
*env
)
1625 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1626 problem and hope the cpu will stop of its own accord. For userspace
1627 emulation this often isn't actually as bad as it sounds. Often
1628 signals are used primarily to interrupt blocking syscalls. */
1629 TranslationBlock
*tb
;
1630 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1632 spin_lock(&interrupt_lock
);
1633 tb
= env
->current_tb
;
1634 /* if the cpu is currently executing code, we must unlink it and
1635 all the potentially executing TB */
1637 env
->current_tb
= NULL
;
1638 tb_reset_jump_recursive(tb
);
1640 spin_unlock(&interrupt_lock
);
1643 /* mask must never be zero, except for A20 change call */
1644 void cpu_interrupt(CPUState
*env
, int mask
)
1648 old_mask
= env
->interrupt_request
;
1649 env
->interrupt_request
|= mask
;
1650 if (kvm_enabled() && !kvm_irqchip_in_kernel())
1651 kvm_update_interrupt_request(env
);
1653 #ifndef CONFIG_USER_ONLY
1655 * If called from iothread context, wake the target cpu in
1658 if (!qemu_cpu_self(env
)) {
1665 env
->icount_decr
.u16
.high
= 0xffff;
1666 #ifndef CONFIG_USER_ONLY
1668 && (mask
& ~old_mask
) != 0) {
1669 cpu_abort(env
, "Raised interrupt while not in I/O function");
1677 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1679 env
->interrupt_request
&= ~mask
;
1682 void cpu_exit(CPUState
*env
)
1684 env
->exit_request
= 1;
1688 const CPULogItem cpu_log_items
[] = {
1689 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1690 "show generated host assembly code for each compiled TB" },
1691 { CPU_LOG_TB_IN_ASM
, "in_asm",
1692 "show target assembly code for each compiled TB" },
1693 { CPU_LOG_TB_OP
, "op",
1694 "show micro ops for each compiled TB" },
1695 { CPU_LOG_TB_OP_OPT
, "op_opt",
1698 "before eflags optimization and "
1700 "after liveness analysis" },
1701 { CPU_LOG_INT
, "int",
1702 "show interrupts/exceptions in short format" },
1703 { CPU_LOG_EXEC
, "exec",
1704 "show trace before each executed TB (lots of logs)" },
1705 { CPU_LOG_TB_CPU
, "cpu",
1706 "show CPU state before block translation" },
1708 { CPU_LOG_PCALL
, "pcall",
1709 "show protected mode far calls/returns/exceptions" },
1710 { CPU_LOG_RESET
, "cpu_reset",
1711 "show CPU state before CPU resets" },
1714 { CPU_LOG_IOPORT
, "ioport",
1715 "show all i/o ports accesses" },
1720 #ifndef CONFIG_USER_ONLY
1721 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1722 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1724 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1726 ram_addr_t phys_offset
)
1728 CPUPhysMemoryClient
*client
;
1729 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1730 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1734 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1735 target_phys_addr_t end
)
1737 CPUPhysMemoryClient
*client
;
1738 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1739 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1746 static int cpu_notify_migration_log(int enable
)
1748 CPUPhysMemoryClient
*client
;
1749 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1750 int r
= client
->migration_log(client
, enable
);
1757 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1758 int level
, void **lp
)
1766 PhysPageDesc
*pd
= *lp
;
1767 for (i
= 0; i
< L2_SIZE
; ++i
) {
1768 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1769 client
->set_memory(client
, pd
[i
].region_offset
,
1770 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
);
1775 for (i
= 0; i
< L2_SIZE
; ++i
) {
1776 phys_page_for_each_1(client
, level
- 1, pp
+ i
);
1781 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1784 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1785 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1790 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1792 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1793 phys_page_for_each(client
);
1796 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1798 QLIST_REMOVE(client
, list
);
1802 static int cmp1(const char *s1
, int n
, const char *s2
)
1804 if (strlen(s2
) != n
)
1806 return memcmp(s1
, s2
, n
) == 0;
1809 /* takes a comma separated list of log masks. Return 0 if error. */
1810 int cpu_str_to_log_mask(const char *str
)
1812 const CPULogItem
*item
;
1819 p1
= strchr(p
, ',');
1822 if(cmp1(p
,p1
-p
,"all")) {
1823 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1827 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1828 if (cmp1(p
, p1
- p
, item
->name
))
1842 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1849 fprintf(stderr
, "qemu: fatal: ");
1850 vfprintf(stderr
, fmt
, ap
);
1851 fprintf(stderr
, "\n");
1853 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1855 cpu_dump_state(env
, stderr
, fprintf
, 0);
1857 if (qemu_log_enabled()) {
1858 qemu_log("qemu: fatal: ");
1859 qemu_log_vprintf(fmt
, ap2
);
1862 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1864 log_cpu_state(env
, 0);
1871 #if defined(CONFIG_USER_ONLY)
1873 struct sigaction act
;
1874 sigfillset(&act
.sa_mask
);
1875 act
.sa_handler
= SIG_DFL
;
1876 sigaction(SIGABRT
, &act
, NULL
);
1882 CPUState
*cpu_copy(CPUState
*env
)
1884 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1885 CPUState
*next_cpu
= new_env
->next_cpu
;
1886 int cpu_index
= new_env
->cpu_index
;
1887 #if defined(TARGET_HAS_ICE)
1892 memcpy(new_env
, env
, sizeof(CPUState
));
1894 /* Preserve chaining and index. */
1895 new_env
->next_cpu
= next_cpu
;
1896 new_env
->cpu_index
= cpu_index
;
1898 /* Clone all break/watchpoints.
1899 Note: Once we support ptrace with hw-debug register access, make sure
1900 BP_CPU break/watchpoints are handled correctly on clone. */
1901 QTAILQ_INIT(&env
->breakpoints
);
1902 QTAILQ_INIT(&env
->watchpoints
);
1903 #if defined(TARGET_HAS_ICE)
1904 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1905 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1907 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1908 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1916 #if !defined(CONFIG_USER_ONLY)
1918 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1922 /* Discard jump cache entries for any tb which might potentially
1923 overlap the flushed page. */
1924 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1925 memset (&env
->tb_jmp_cache
[i
], 0,
1926 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1928 i
= tb_jmp_cache_hash_page(addr
);
1929 memset (&env
->tb_jmp_cache
[i
], 0,
1930 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1933 static CPUTLBEntry s_cputlb_empty_entry
= {
1940 /* NOTE: if flush_global is true, also flush global entries (not
1942 void tlb_flush(CPUState
*env
, int flush_global
)
1946 #if defined(DEBUG_TLB)
1947 printf("tlb_flush:\n");
1949 /* must reset current TB so that interrupts cannot modify the
1950 links while we are modifying them */
1951 env
->current_tb
= NULL
;
1953 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1955 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1956 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1960 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1962 env
->tlb_flush_addr
= -1;
1963 env
->tlb_flush_mask
= 0;
1967 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1969 if (addr
== (tlb_entry
->addr_read
&
1970 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1971 addr
== (tlb_entry
->addr_write
&
1972 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1973 addr
== (tlb_entry
->addr_code
&
1974 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1975 *tlb_entry
= s_cputlb_empty_entry
;
1979 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1984 #if defined(DEBUG_TLB)
1985 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1987 /* Check if we need to flush due to large pages. */
1988 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1989 #if defined(DEBUG_TLB)
1990 printf("tlb_flush_page: forced full flush ("
1991 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1992 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1997 /* must reset current TB so that interrupts cannot modify the
1998 links while we are modifying them */
1999 env
->current_tb
= NULL
;
2001 addr
&= TARGET_PAGE_MASK
;
2002 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2003 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2004 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2006 tlb_flush_jmp_cache(env
, addr
);
2009 /* update the TLBs so that writes to code in the virtual page 'addr'
2011 static void tlb_protect_code(ram_addr_t ram_addr
)
2013 cpu_physical_memory_reset_dirty(ram_addr
,
2014 ram_addr
+ TARGET_PAGE_SIZE
,
2018 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2019 tested for self modifying code */
2020 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2023 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2026 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2027 unsigned long start
, unsigned long length
)
2030 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2031 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2032 if ((addr
- start
) < length
) {
2033 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2038 /* Note: start and end must be within the same ram block. */
2039 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2043 unsigned long length
, start1
;
2046 start
&= TARGET_PAGE_MASK
;
2047 end
= TARGET_PAGE_ALIGN(end
);
2049 length
= end
- start
;
2052 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2054 /* we modify the TLB cache so that the dirty bit will be set again
2055 when accessing the range */
2056 start1
= (unsigned long)qemu_get_ram_ptr(start
);
2057 /* Chek that we don't span multiple blocks - this breaks the
2058 address comparisons below. */
2059 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
2060 != (end
- 1) - start
) {
2064 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2066 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2067 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2068 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2074 int cpu_physical_memory_set_dirty_tracking(int enable
)
2077 in_migration
= enable
;
2078 ret
= cpu_notify_migration_log(!!enable
);
2082 int cpu_physical_memory_get_dirty_tracking(void)
2084 return in_migration
;
2087 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2088 target_phys_addr_t end_addr
)
2092 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2096 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2098 ram_addr_t ram_addr
;
2101 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2102 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2103 + tlb_entry
->addend
);
2104 ram_addr
= qemu_ram_addr_from_host(p
);
2105 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2106 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2111 /* update the TLB according to the current state of the dirty bits */
2112 void cpu_tlb_update_dirty(CPUState
*env
)
2116 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2117 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2118 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2122 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2124 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2125 tlb_entry
->addr_write
= vaddr
;
2128 /* update the TLB corresponding to virtual page vaddr
2129 so that it is no longer dirty */
2130 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2135 vaddr
&= TARGET_PAGE_MASK
;
2136 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2137 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2138 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2141 /* Our TLB does not support large pages, so remember the area covered by
2142 large pages and trigger a full TLB flush if these are invalidated. */
2143 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2146 target_ulong mask
= ~(size
- 1);
2148 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2149 env
->tlb_flush_addr
= vaddr
& mask
;
2150 env
->tlb_flush_mask
= mask
;
2153 /* Extend the existing region to include the new page.
2154 This is a compromise between unnecessary flushes and the cost
2155 of maintaining a full variable size TLB. */
2156 mask
&= env
->tlb_flush_mask
;
2157 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2160 env
->tlb_flush_addr
&= mask
;
2161 env
->tlb_flush_mask
= mask
;
2164 /* Add a new TLB entry. At most one entry for a given virtual address
2165 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2166 supplied size is only used by tlb_flush_page. */
2167 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2168 target_phys_addr_t paddr
, int prot
,
2169 int mmu_idx
, target_ulong size
)
2174 target_ulong address
;
2175 target_ulong code_address
;
2176 unsigned long addend
;
2179 target_phys_addr_t iotlb
;
2181 assert(size
>= TARGET_PAGE_SIZE
);
2182 if (size
!= TARGET_PAGE_SIZE
) {
2183 tlb_add_large_page(env
, vaddr
, size
);
2185 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2187 pd
= IO_MEM_UNASSIGNED
;
2189 pd
= p
->phys_offset
;
2191 #if defined(DEBUG_TLB)
2192 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2193 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2197 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2198 /* IO memory case (romd handled later) */
2199 address
|= TLB_MMIO
;
2201 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2202 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2204 iotlb
= pd
& TARGET_PAGE_MASK
;
2205 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2206 iotlb
|= IO_MEM_NOTDIRTY
;
2208 iotlb
|= IO_MEM_ROM
;
2210 /* IO handlers are currently passed a physical address.
2211 It would be nice to pass an offset from the base address
2212 of that region. This would avoid having to special case RAM,
2213 and avoid full address decoding in every device.
2214 We can't use the high bits of pd for this because
2215 IO_MEM_ROMD uses these as a ram address. */
2216 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2218 iotlb
+= p
->region_offset
;
2224 code_address
= address
;
2225 /* Make accesses to pages with watchpoints go via the
2226 watchpoint trap routines. */
2227 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2228 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2229 /* Avoid trapping reads of pages with a write breakpoint. */
2230 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2231 iotlb
= io_mem_watch
+ paddr
;
2232 address
|= TLB_MMIO
;
2238 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2239 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2240 te
= &env
->tlb_table
[mmu_idx
][index
];
2241 te
->addend
= addend
- vaddr
;
2242 if (prot
& PAGE_READ
) {
2243 te
->addr_read
= address
;
2248 if (prot
& PAGE_EXEC
) {
2249 te
->addr_code
= code_address
;
2253 if (prot
& PAGE_WRITE
) {
2254 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2255 (pd
& IO_MEM_ROMD
)) {
2256 /* Write access calls the I/O callback. */
2257 te
->addr_write
= address
| TLB_MMIO
;
2258 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2259 !cpu_physical_memory_is_dirty(pd
)) {
2260 te
->addr_write
= address
| TLB_NOTDIRTY
;
2262 te
->addr_write
= address
;
2265 te
->addr_write
= -1;
2271 void tlb_flush(CPUState
*env
, int flush_global
)
2275 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2280 * Walks guest process memory "regions" one by one
2281 * and calls callback function 'fn' for each region.
2284 struct walk_memory_regions_data
2286 walk_memory_regions_fn fn
;
2288 unsigned long start
;
2292 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2293 abi_ulong end
, int new_prot
)
2295 if (data
->start
!= -1ul) {
2296 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2302 data
->start
= (new_prot
? end
: -1ul);
2303 data
->prot
= new_prot
;
2308 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2309 abi_ulong base
, int level
, void **lp
)
2315 return walk_memory_regions_end(data
, base
, 0);
2320 for (i
= 0; i
< L2_SIZE
; ++i
) {
2321 int prot
= pd
[i
].flags
;
2323 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2324 if (prot
!= data
->prot
) {
2325 rc
= walk_memory_regions_end(data
, pa
, prot
);
2333 for (i
= 0; i
< L2_SIZE
; ++i
) {
2334 pa
= base
| ((abi_ulong
)i
<<
2335 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2336 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2346 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2348 struct walk_memory_regions_data data
;
2356 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2357 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2358 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2364 return walk_memory_regions_end(&data
, 0, 0);
2367 static int dump_region(void *priv
, abi_ulong start
,
2368 abi_ulong end
, unsigned long prot
)
2370 FILE *f
= (FILE *)priv
;
2372 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2373 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2374 start
, end
, end
- start
,
2375 ((prot
& PAGE_READ
) ? 'r' : '-'),
2376 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2377 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2382 /* dump memory mappings */
2383 void page_dump(FILE *f
)
2385 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2386 "start", "end", "size", "prot");
2387 walk_memory_regions(f
, dump_region
);
2390 int page_get_flags(target_ulong address
)
2394 p
= page_find(address
>> TARGET_PAGE_BITS
);
2400 /* Modify the flags of a page and invalidate the code if necessary.
2401 The flag PAGE_WRITE_ORG is positioned automatically depending
2402 on PAGE_WRITE. The mmap_lock should already be held. */
2403 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2405 target_ulong addr
, len
;
2407 /* This function should never be called with addresses outside the
2408 guest address space. If this assert fires, it probably indicates
2409 a missing call to h2g_valid. */
2410 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2411 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2413 assert(start
< end
);
2415 start
= start
& TARGET_PAGE_MASK
;
2416 end
= TARGET_PAGE_ALIGN(end
);
2418 if (flags
& PAGE_WRITE
) {
2419 flags
|= PAGE_WRITE_ORG
;
2422 for (addr
= start
, len
= end
- start
;
2424 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2425 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2427 /* If the write protection bit is set, then we invalidate
2429 if (!(p
->flags
& PAGE_WRITE
) &&
2430 (flags
& PAGE_WRITE
) &&
2432 tb_invalidate_phys_page(addr
, 0, NULL
);
2438 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2444 /* This function should never be called with addresses outside the
2445 guest address space. If this assert fires, it probably indicates
2446 a missing call to h2g_valid. */
2447 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2448 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2454 if (start
+ len
- 1 < start
) {
2455 /* We've wrapped around. */
2459 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2460 start
= start
& TARGET_PAGE_MASK
;
2462 for (addr
= start
, len
= end
- start
;
2464 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2465 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2468 if( !(p
->flags
& PAGE_VALID
) )
2471 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2473 if (flags
& PAGE_WRITE
) {
2474 if (!(p
->flags
& PAGE_WRITE_ORG
))
2476 /* unprotect the page if it was put read-only because it
2477 contains translated code */
2478 if (!(p
->flags
& PAGE_WRITE
)) {
2479 if (!page_unprotect(addr
, 0, NULL
))
2488 /* called from signal handler: invalidate the code and unprotect the
2489 page. Return TRUE if the fault was successfully handled. */
2490 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2494 target_ulong host_start
, host_end
, addr
;
2496 /* Technically this isn't safe inside a signal handler. However we
2497 know this only ever happens in a synchronous SEGV handler, so in
2498 practice it seems to be ok. */
2501 p
= page_find(address
>> TARGET_PAGE_BITS
);
2507 /* if the page was really writable, then we change its
2508 protection back to writable */
2509 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2510 host_start
= address
& qemu_host_page_mask
;
2511 host_end
= host_start
+ qemu_host_page_size
;
2514 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2515 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2516 p
->flags
|= PAGE_WRITE
;
2519 /* and since the content will be modified, we must invalidate
2520 the corresponding translated code. */
2521 tb_invalidate_phys_page(addr
, pc
, puc
);
2522 #ifdef DEBUG_TB_CHECK
2523 tb_invalidate_check(addr
);
2526 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2536 static inline void tlb_set_dirty(CPUState
*env
,
2537 unsigned long addr
, target_ulong vaddr
)
2540 #endif /* defined(CONFIG_USER_ONLY) */
2542 #if !defined(CONFIG_USER_ONLY)
2544 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2545 typedef struct subpage_t
{
2546 target_phys_addr_t base
;
2547 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2548 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2551 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2552 ram_addr_t memory
, ram_addr_t region_offset
);
2553 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2554 ram_addr_t orig_memory
,
2555 ram_addr_t region_offset
);
2556 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2559 if (addr > start_addr) \
2562 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2563 if (start_addr2 > 0) \
2567 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2568 end_addr2 = TARGET_PAGE_SIZE - 1; \
2570 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2571 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2576 /* register physical memory.
2577 For RAM, 'size' must be a multiple of the target page size.
2578 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2579 io memory page. The address used when calling the IO function is
2580 the offset from the start of the region, plus region_offset. Both
2581 start_addr and region_offset are rounded down to a page boundary
2582 before calculating this offset. This should not be a problem unless
2583 the low bits of start_addr and region_offset differ. */
2584 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2586 ram_addr_t phys_offset
,
2587 ram_addr_t region_offset
)
2589 target_phys_addr_t addr
, end_addr
;
2592 ram_addr_t orig_size
= size
;
2595 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2597 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2598 region_offset
= start_addr
;
2600 region_offset
&= TARGET_PAGE_MASK
;
2601 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2602 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2603 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2604 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2605 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2606 ram_addr_t orig_memory
= p
->phys_offset
;
2607 target_phys_addr_t start_addr2
, end_addr2
;
2608 int need_subpage
= 0;
2610 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2613 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2614 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2615 &p
->phys_offset
, orig_memory
,
2618 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2621 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2623 p
->region_offset
= 0;
2625 p
->phys_offset
= phys_offset
;
2626 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2627 (phys_offset
& IO_MEM_ROMD
))
2628 phys_offset
+= TARGET_PAGE_SIZE
;
2631 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2632 p
->phys_offset
= phys_offset
;
2633 p
->region_offset
= region_offset
;
2634 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2635 (phys_offset
& IO_MEM_ROMD
)) {
2636 phys_offset
+= TARGET_PAGE_SIZE
;
2638 target_phys_addr_t start_addr2
, end_addr2
;
2639 int need_subpage
= 0;
2641 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2642 end_addr2
, need_subpage
);
2645 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2646 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2647 addr
& TARGET_PAGE_MASK
);
2648 subpage_register(subpage
, start_addr2
, end_addr2
,
2649 phys_offset
, region_offset
);
2650 p
->region_offset
= 0;
2654 region_offset
+= TARGET_PAGE_SIZE
;
2657 /* since each CPU stores ram addresses in its TLB cache, we must
2658 reset the modified entries */
2660 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2665 /* XXX: temporary until new memory mapping API */
2666 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2670 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2672 return IO_MEM_UNASSIGNED
;
2673 return p
->phys_offset
;
2676 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2679 kvm_coalesce_mmio_region(addr
, size
);
2682 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2685 kvm_uncoalesce_mmio_region(addr
, size
);
2688 void qemu_flush_coalesced_mmio_buffer(void)
2691 kvm_flush_coalesced_mmio_buffer();
2694 #if defined(__linux__) && !defined(TARGET_S390X)
2696 #include <sys/vfs.h>
2698 #define HUGETLBFS_MAGIC 0x958458f6
2700 static long gethugepagesize(const char *path
)
2706 ret
= statfs(path
, &fs
);
2707 } while (ret
!= 0 && errno
== EINTR
);
2714 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2715 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2720 static void *file_ram_alloc(RAMBlock
*block
,
2730 unsigned long hpagesize
;
2732 hpagesize
= gethugepagesize(path
);
2737 if (memory
< hpagesize
) {
2741 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2742 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2746 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2750 fd
= mkstemp(filename
);
2752 perror("unable to create backing store for hugepages");
2759 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2762 * ftruncate is not supported by hugetlbfs in older
2763 * hosts, so don't bother bailing out on errors.
2764 * If anything goes wrong with it under other filesystems,
2767 if (ftruncate(fd
, memory
))
2768 perror("ftruncate");
2771 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2772 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2773 * to sidestep this quirk.
2775 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2776 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2778 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2780 if (area
== MAP_FAILED
) {
2781 perror("file_ram_alloc: can't mmap RAM pages");
2790 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2792 RAMBlock
*block
, *next_block
;
2793 ram_addr_t offset
= 0, mingap
= ULONG_MAX
;
2795 if (QLIST_EMPTY(&ram_list
.blocks
))
2798 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2799 ram_addr_t end
, next
= ULONG_MAX
;
2801 end
= block
->offset
+ block
->length
;
2803 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2804 if (next_block
->offset
>= end
) {
2805 next
= MIN(next
, next_block
->offset
);
2808 if (next
- end
>= size
&& next
- end
< mingap
) {
2810 mingap
= next
- end
;
2816 static ram_addr_t
last_ram_offset(void)
2819 ram_addr_t last
= 0;
2821 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2822 last
= MAX(last
, block
->offset
+ block
->length
);
2827 ram_addr_t
qemu_ram_map(DeviceState
*dev
, const char *name
,
2828 ram_addr_t size
, void *host
)
2830 RAMBlock
*new_block
, *block
;
2832 size
= TARGET_PAGE_ALIGN(size
);
2833 new_block
= qemu_mallocz(sizeof(*new_block
));
2835 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2836 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2838 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2842 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2844 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2845 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2846 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2852 new_block
->host
= host
;
2854 new_block
->offset
= find_ram_offset(size
);
2855 new_block
->length
= size
;
2857 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2859 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
2860 last_ram_offset() >> TARGET_PAGE_BITS
);
2861 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2862 0xff, size
>> TARGET_PAGE_BITS
);
2865 kvm_setup_guest_memory(new_block
->host
, size
);
2867 return new_block
->offset
;
2870 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
)
2872 RAMBlock
*new_block
, *block
;
2874 size
= TARGET_PAGE_ALIGN(size
);
2875 new_block
= qemu_mallocz(sizeof(*new_block
));
2877 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2878 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2880 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2884 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2886 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2887 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2888 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2895 #if defined (__linux__) && !defined(TARGET_S390X)
2896 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2897 if (!new_block
->host
) {
2898 new_block
->host
= qemu_vmalloc(size
);
2899 #ifdef MADV_MERGEABLE
2900 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2904 fprintf(stderr
, "-mem-path option unsupported\n");
2908 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2909 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2910 new_block
->host
= mmap((void*)0x1000000, size
,
2911 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2912 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2914 new_block
->host
= qemu_vmalloc(size
);
2916 #ifdef MADV_MERGEABLE
2917 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2920 new_block
->offset
= find_ram_offset(size
);
2921 new_block
->length
= size
;
2923 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2925 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
2926 last_ram_offset() >> TARGET_PAGE_BITS
);
2927 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2928 0xff, size
>> TARGET_PAGE_BITS
);
2931 kvm_setup_guest_memory(new_block
->host
, size
);
2933 return new_block
->offset
;
2936 void qemu_ram_unmap(ram_addr_t addr
)
2940 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2941 if (addr
== block
->offset
) {
2942 QLIST_REMOVE(block
, next
);
2949 void qemu_ram_free(ram_addr_t addr
)
2953 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2954 if (addr
== block
->offset
) {
2955 QLIST_REMOVE(block
, next
);
2957 #if defined (__linux__) && !defined(TARGET_S390X)
2959 munmap(block
->host
, block
->length
);
2962 qemu_vfree(block
->host
);
2966 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2967 munmap(block
->host
, block
->length
);
2969 qemu_vfree(block
->host
);
2979 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2980 With the exception of the softmmu code in this file, this should
2981 only be used for local memory (e.g. video ram) that the device owns,
2982 and knows it isn't going to access beyond the end of the block.
2984 It should not be used for general purpose DMA.
2985 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2987 void *qemu_get_ram_ptr(ram_addr_t addr
)
2991 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2992 if (addr
- block
->offset
< block
->length
) {
2993 QLIST_REMOVE(block
, next
);
2994 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2995 return block
->host
+ (addr
- block
->offset
);
2999 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3005 int do_qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3008 uint8_t *host
= ptr
;
3010 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3011 if (host
- block
->host
< block
->length
) {
3012 *ram_addr
= block
->offset
+ (host
- block
->host
);
3019 /* Some of the softmmu routines need to translate from a host pointer
3020 (typically a TLB entry) back to a ram offset. */
3021 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
3023 ram_addr_t ram_addr
;
3025 if (do_qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3026 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3032 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3034 #ifdef DEBUG_UNASSIGNED
3035 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3037 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3038 do_unassigned_access(addr
, 0, 0, 0, 1);
3043 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3045 #ifdef DEBUG_UNASSIGNED
3046 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3048 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3049 do_unassigned_access(addr
, 0, 0, 0, 2);
3054 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3056 #ifdef DEBUG_UNASSIGNED
3057 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3059 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3060 do_unassigned_access(addr
, 0, 0, 0, 4);
3065 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3067 #ifdef DEBUG_UNASSIGNED
3068 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3070 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3071 do_unassigned_access(addr
, 1, 0, 0, 1);
3075 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3077 #ifdef DEBUG_UNASSIGNED
3078 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3080 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3081 do_unassigned_access(addr
, 1, 0, 0, 2);
3085 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3087 #ifdef DEBUG_UNASSIGNED
3088 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3090 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3091 do_unassigned_access(addr
, 1, 0, 0, 4);
3095 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3096 unassigned_mem_readb
,
3097 unassigned_mem_readw
,
3098 unassigned_mem_readl
,
3101 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3102 unassigned_mem_writeb
,
3103 unassigned_mem_writew
,
3104 unassigned_mem_writel
,
3107 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3111 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3112 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3113 #if !defined(CONFIG_USER_ONLY)
3114 tb_invalidate_phys_page_fast(ram_addr
, 1);
3115 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3118 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3119 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3120 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3121 /* we remove the notdirty callback only if the code has been
3123 if (dirty_flags
== 0xff)
3124 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3127 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3131 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3132 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3133 #if !defined(CONFIG_USER_ONLY)
3134 tb_invalidate_phys_page_fast(ram_addr
, 2);
3135 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3138 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3139 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3140 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3141 /* we remove the notdirty callback only if the code has been
3143 if (dirty_flags
== 0xff)
3144 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3147 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3151 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3152 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3153 #if !defined(CONFIG_USER_ONLY)
3154 tb_invalidate_phys_page_fast(ram_addr
, 4);
3155 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3158 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3159 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3160 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3161 /* we remove the notdirty callback only if the code has been
3163 if (dirty_flags
== 0xff)
3164 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3167 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3168 NULL
, /* never used */
3169 NULL
, /* never used */
3170 NULL
, /* never used */
3173 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3174 notdirty_mem_writeb
,
3175 notdirty_mem_writew
,
3176 notdirty_mem_writel
,
3179 /* Generate a debug exception if a watchpoint has been hit. */
3180 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3182 CPUState
*env
= cpu_single_env
;
3183 target_ulong pc
, cs_base
;
3184 TranslationBlock
*tb
;
3189 if (env
->watchpoint_hit
) {
3190 /* We re-entered the check after replacing the TB. Now raise
3191 * the debug interrupt so that is will trigger after the
3192 * current instruction. */
3193 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3196 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3197 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3198 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3199 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3200 wp
->flags
|= BP_WATCHPOINT_HIT
;
3201 if (!env
->watchpoint_hit
) {
3202 env
->watchpoint_hit
= wp
;
3203 tb
= tb_find_pc(env
->mem_io_pc
);
3205 cpu_abort(env
, "check_watchpoint: could not find TB for "
3206 "pc=%p", (void *)env
->mem_io_pc
);
3208 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
3209 tb_phys_invalidate(tb
, -1);
3210 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3211 env
->exception_index
= EXCP_DEBUG
;
3213 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3214 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3216 cpu_resume_from_signal(env
, NULL
);
3219 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3224 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3225 so these check for a hit then pass through to the normal out-of-line
3227 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3229 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3230 return ldub_phys(addr
);
3233 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3235 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3236 return lduw_phys(addr
);
3239 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3241 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3242 return ldl_phys(addr
);
3245 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3248 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3249 stb_phys(addr
, val
);
3252 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3255 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3256 stw_phys(addr
, val
);
3259 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3262 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3263 stl_phys(addr
, val
);
3266 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3272 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3278 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3279 target_phys_addr_t addr
,
3282 unsigned int idx
= SUBPAGE_IDX(addr
);
3283 #if defined(DEBUG_SUBPAGE)
3284 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3285 mmio
, len
, addr
, idx
);
3288 addr
+= mmio
->region_offset
[idx
];
3289 idx
= mmio
->sub_io_index
[idx
];
3290 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3293 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3294 uint32_t value
, unsigned int len
)
3296 unsigned int idx
= SUBPAGE_IDX(addr
);
3297 #if defined(DEBUG_SUBPAGE)
3298 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3299 __func__
, mmio
, len
, addr
, idx
, value
);
3302 addr
+= mmio
->region_offset
[idx
];
3303 idx
= mmio
->sub_io_index
[idx
];
3304 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3307 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3309 return subpage_readlen(opaque
, addr
, 0);
3312 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3315 subpage_writelen(opaque
, addr
, value
, 0);
3318 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3320 return subpage_readlen(opaque
, addr
, 1);
3323 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3326 subpage_writelen(opaque
, addr
, value
, 1);
3329 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3331 return subpage_readlen(opaque
, addr
, 2);
3334 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3337 subpage_writelen(opaque
, addr
, value
, 2);
3340 static CPUReadMemoryFunc
* const subpage_read
[] = {
3346 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3352 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3353 ram_addr_t memory
, ram_addr_t region_offset
)
3357 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3359 idx
= SUBPAGE_IDX(start
);
3360 eidx
= SUBPAGE_IDX(end
);
3361 #if defined(DEBUG_SUBPAGE)
3362 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3363 mmio
, start
, end
, idx
, eidx
, memory
);
3365 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3366 for (; idx
<= eidx
; idx
++) {
3367 mmio
->sub_io_index
[idx
] = memory
;
3368 mmio
->region_offset
[idx
] = region_offset
;
3374 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3375 ram_addr_t orig_memory
,
3376 ram_addr_t region_offset
)
3381 mmio
= qemu_mallocz(sizeof(subpage_t
));
3384 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3385 #if defined(DEBUG_SUBPAGE)
3386 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3387 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3389 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3390 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3395 static int get_free_io_mem_idx(void)
3399 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3400 if (!io_mem_used
[i
]) {
3404 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3408 /* mem_read and mem_write are arrays of functions containing the
3409 function to access byte (index 0), word (index 1) and dword (index
3410 2). Functions can be omitted with a NULL function pointer.
3411 If io_index is non zero, the corresponding io zone is
3412 modified. If it is zero, a new io zone is allocated. The return
3413 value can be used with cpu_register_physical_memory(). (-1) is
3414 returned if error. */
3415 static int cpu_register_io_memory_fixed(int io_index
,
3416 CPUReadMemoryFunc
* const *mem_read
,
3417 CPUWriteMemoryFunc
* const *mem_write
,
3422 if (io_index
<= 0) {
3423 io_index
= get_free_io_mem_idx();
3427 io_index
>>= IO_MEM_SHIFT
;
3428 if (io_index
>= IO_MEM_NB_ENTRIES
)
3432 for (i
= 0; i
< 3; ++i
) {
3433 io_mem_read
[io_index
][i
]
3434 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3436 for (i
= 0; i
< 3; ++i
) {
3437 io_mem_write
[io_index
][i
]
3438 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3440 io_mem_opaque
[io_index
] = opaque
;
3442 return (io_index
<< IO_MEM_SHIFT
);
3445 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3446 CPUWriteMemoryFunc
* const *mem_write
,
3449 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3452 void cpu_unregister_io_memory(int io_table_address
)
3455 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3457 for (i
=0;i
< 3; i
++) {
3458 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3459 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3461 io_mem_opaque
[io_index
] = NULL
;
3462 io_mem_used
[io_index
] = 0;
3465 static void io_mem_init(void)
3469 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3470 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3471 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3475 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3476 watch_mem_write
, NULL
);
3479 #endif /* !defined(CONFIG_USER_ONLY) */
3481 /* physical memory access (slow version, mainly for debug) */
3482 #if defined(CONFIG_USER_ONLY)
3483 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3484 uint8_t *buf
, int len
, int is_write
)
3491 page
= addr
& TARGET_PAGE_MASK
;
3492 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3495 flags
= page_get_flags(page
);
3496 if (!(flags
& PAGE_VALID
))
3499 if (!(flags
& PAGE_WRITE
))
3501 /* XXX: this code should not depend on lock_user */
3502 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3505 unlock_user(p
, addr
, l
);
3507 if (!(flags
& PAGE_READ
))
3509 /* XXX: this code should not depend on lock_user */
3510 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3513 unlock_user(p
, addr
, 0);
3523 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3524 int len
, int is_write
)
3529 target_phys_addr_t page
;
3534 page
= addr
& TARGET_PAGE_MASK
;
3535 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3538 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3540 pd
= IO_MEM_UNASSIGNED
;
3542 pd
= p
->phys_offset
;
3546 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3547 target_phys_addr_t addr1
= addr
;
3548 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3550 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3551 /* XXX: could force cpu_single_env to NULL to avoid
3553 if (l
>= 4 && ((addr1
& 3) == 0)) {
3554 /* 32 bit write access */
3556 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3558 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3559 /* 16 bit write access */
3561 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3564 /* 8 bit write access */
3566 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3570 unsigned long addr1
;
3571 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3573 ptr
= qemu_get_ram_ptr(addr1
);
3574 memcpy(ptr
, buf
, l
);
3575 if (!cpu_physical_memory_is_dirty(addr1
)) {
3576 /* invalidate code */
3577 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3579 cpu_physical_memory_set_dirty_flags(
3580 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3582 /* qemu doesn't execute guest code directly, but kvm does
3583 therefore flush instruction caches */
3585 flush_icache_range((unsigned long)ptr
,
3586 ((unsigned long)ptr
)+l
);
3589 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3590 !(pd
& IO_MEM_ROMD
)) {
3591 target_phys_addr_t addr1
= addr
;
3593 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3595 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3596 if (l
>= 4 && ((addr1
& 3) == 0)) {
3597 /* 32 bit read access */
3598 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3601 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3602 /* 16 bit read access */
3603 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3607 /* 8 bit read access */
3608 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3614 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3615 (addr
& ~TARGET_PAGE_MASK
);
3616 memcpy(buf
, ptr
, l
);
3625 /* used for ROM loading : can write in RAM and ROM */
3626 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3627 const uint8_t *buf
, int len
)
3631 target_phys_addr_t page
;
3636 page
= addr
& TARGET_PAGE_MASK
;
3637 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3640 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3642 pd
= IO_MEM_UNASSIGNED
;
3644 pd
= p
->phys_offset
;
3647 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3648 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3649 !(pd
& IO_MEM_ROMD
)) {
3652 unsigned long addr1
;
3653 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3655 ptr
= qemu_get_ram_ptr(addr1
);
3656 memcpy(ptr
, buf
, l
);
3666 target_phys_addr_t addr
;
3667 target_phys_addr_t len
;
3670 static BounceBuffer bounce
;
3672 typedef struct MapClient
{
3674 void (*callback
)(void *opaque
);
3675 QLIST_ENTRY(MapClient
) link
;
3678 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3679 = QLIST_HEAD_INITIALIZER(map_client_list
);
3681 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3683 MapClient
*client
= qemu_malloc(sizeof(*client
));
3685 client
->opaque
= opaque
;
3686 client
->callback
= callback
;
3687 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3691 void cpu_unregister_map_client(void *_client
)
3693 MapClient
*client
= (MapClient
*)_client
;
3695 QLIST_REMOVE(client
, link
);
3699 static void cpu_notify_map_clients(void)
3703 while (!QLIST_EMPTY(&map_client_list
)) {
3704 client
= QLIST_FIRST(&map_client_list
);
3705 client
->callback(client
->opaque
);
3706 cpu_unregister_map_client(client
);
3710 /* Map a physical memory region into a host virtual address.
3711 * May map a subset of the requested range, given by and returned in *plen.
3712 * May return NULL if resources needed to perform the mapping are exhausted.
3713 * Use only for reads OR writes - not for read-modify-write operations.
3714 * Use cpu_register_map_client() to know when retrying the map operation is
3715 * likely to succeed.
3717 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3718 target_phys_addr_t
*plen
,
3721 target_phys_addr_t len
= *plen
;
3722 target_phys_addr_t done
= 0;
3724 uint8_t *ret
= NULL
;
3726 target_phys_addr_t page
;
3729 unsigned long addr1
;
3732 page
= addr
& TARGET_PAGE_MASK
;
3733 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3736 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3738 pd
= IO_MEM_UNASSIGNED
;
3740 pd
= p
->phys_offset
;
3743 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3744 if (done
|| bounce
.buffer
) {
3747 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3751 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3753 ptr
= bounce
.buffer
;
3755 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3756 ptr
= qemu_get_ram_ptr(addr1
);
3760 } else if (ret
+ done
!= ptr
) {
3772 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3773 * Will also mark the memory as dirty if is_write == 1. access_len gives
3774 * the amount of memory that was actually read or written by the caller.
3776 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3777 int is_write
, target_phys_addr_t access_len
)
3779 unsigned long flush_len
= (unsigned long)access_len
;
3781 if (buffer
!= bounce
.buffer
) {
3783 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3784 while (access_len
) {
3786 l
= TARGET_PAGE_SIZE
;
3789 if (!cpu_physical_memory_is_dirty(addr1
)) {
3790 /* invalidate code */
3791 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3793 cpu_physical_memory_set_dirty_flags(
3794 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3799 dma_flush_range((unsigned long)buffer
,
3800 (unsigned long)buffer
+ flush_len
);
3805 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3807 qemu_vfree(bounce
.buffer
);
3808 bounce
.buffer
= NULL
;
3809 cpu_notify_map_clients();
3812 /* warning: addr must be aligned */
3813 uint32_t ldl_phys(target_phys_addr_t addr
)
3821 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3823 pd
= IO_MEM_UNASSIGNED
;
3825 pd
= p
->phys_offset
;
3828 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3829 !(pd
& IO_MEM_ROMD
)) {
3831 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3833 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3834 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3837 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3838 (addr
& ~TARGET_PAGE_MASK
);
3844 /* warning: addr must be aligned */
3845 uint64_t ldq_phys(target_phys_addr_t addr
)
3853 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3855 pd
= IO_MEM_UNASSIGNED
;
3857 pd
= p
->phys_offset
;
3860 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3861 !(pd
& IO_MEM_ROMD
)) {
3863 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3865 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3866 #ifdef TARGET_WORDS_BIGENDIAN
3867 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3868 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3870 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3871 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3875 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3876 (addr
& ~TARGET_PAGE_MASK
);
3883 uint32_t ldub_phys(target_phys_addr_t addr
)
3886 cpu_physical_memory_read(addr
, &val
, 1);
3890 /* warning: addr must be aligned */
3891 uint32_t lduw_phys(target_phys_addr_t addr
)
3899 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3901 pd
= IO_MEM_UNASSIGNED
;
3903 pd
= p
->phys_offset
;
3906 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3907 !(pd
& IO_MEM_ROMD
)) {
3909 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3911 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3912 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
3915 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3916 (addr
& ~TARGET_PAGE_MASK
);
3922 /* warning: addr must be aligned. The ram page is not masked as dirty
3923 and the code inside is not invalidated. It is useful if the dirty
3924 bits are used to track modified PTEs */
3925 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3932 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3934 pd
= IO_MEM_UNASSIGNED
;
3936 pd
= p
->phys_offset
;
3939 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3940 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3942 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3943 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3945 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3946 ptr
= qemu_get_ram_ptr(addr1
);
3949 if (unlikely(in_migration
)) {
3950 if (!cpu_physical_memory_is_dirty(addr1
)) {
3951 /* invalidate code */
3952 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3954 cpu_physical_memory_set_dirty_flags(
3955 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3961 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3968 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3970 pd
= IO_MEM_UNASSIGNED
;
3972 pd
= p
->phys_offset
;
3975 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3976 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3978 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3979 #ifdef TARGET_WORDS_BIGENDIAN
3980 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3981 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3983 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3984 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3987 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3988 (addr
& ~TARGET_PAGE_MASK
);
3993 /* warning: addr must be aligned */
3994 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4001 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4003 pd
= IO_MEM_UNASSIGNED
;
4005 pd
= p
->phys_offset
;
4008 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4009 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4011 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4012 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4014 unsigned long addr1
;
4015 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4017 ptr
= qemu_get_ram_ptr(addr1
);
4019 if (!cpu_physical_memory_is_dirty(addr1
)) {
4020 /* invalidate code */
4021 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4023 cpu_physical_memory_set_dirty_flags(addr1
,
4024 (0xff & ~CODE_DIRTY_FLAG
));
4030 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4033 cpu_physical_memory_write(addr
, &v
, 1);
4036 /* warning: addr must be aligned */
4037 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4044 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4046 pd
= IO_MEM_UNASSIGNED
;
4048 pd
= p
->phys_offset
;
4051 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4052 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4054 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4055 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4057 unsigned long addr1
;
4058 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4060 ptr
= qemu_get_ram_ptr(addr1
);
4062 if (!cpu_physical_memory_is_dirty(addr1
)) {
4063 /* invalidate code */
4064 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4066 cpu_physical_memory_set_dirty_flags(addr1
,
4067 (0xff & ~CODE_DIRTY_FLAG
));
4073 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4076 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
4079 /* virtual memory access for debug (includes writing to ROM) */
4080 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4081 uint8_t *buf
, int len
, int is_write
)
4084 target_phys_addr_t phys_addr
;
4088 page
= addr
& TARGET_PAGE_MASK
;
4089 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4090 /* if no physical page mapped, return an error */
4091 if (phys_addr
== -1)
4093 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4096 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4098 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4100 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4109 /* in deterministic execution mode, instructions doing device I/Os
4110 must be at the end of the TB */
4111 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4113 TranslationBlock
*tb
;
4115 target_ulong pc
, cs_base
;
4118 tb
= tb_find_pc((unsigned long)retaddr
);
4120 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4123 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4124 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
4125 /* Calculate how many instructions had been executed before the fault
4127 n
= n
- env
->icount_decr
.u16
.low
;
4128 /* Generate a new TB ending on the I/O insn. */
4130 /* On MIPS and SH, delay slot instructions can only be restarted if
4131 they were already the first instruction in the TB. If this is not
4132 the first instruction in a TB then re-execute the preceding
4134 #if defined(TARGET_MIPS)
4135 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4136 env
->active_tc
.PC
-= 4;
4137 env
->icount_decr
.u16
.low
++;
4138 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4140 #elif defined(TARGET_SH4)
4141 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4144 env
->icount_decr
.u16
.low
++;
4145 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4148 /* This should never happen. */
4149 if (n
> CF_COUNT_MASK
)
4150 cpu_abort(env
, "TB too big during recompile");
4152 cflags
= n
| CF_LAST_IO
;
4154 cs_base
= tb
->cs_base
;
4156 tb_phys_invalidate(tb
, -1);
4157 /* FIXME: In theory this could raise an exception. In practice
4158 we have already translated the block once so it's probably ok. */
4159 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4160 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4161 the first in the TB) then we end up generating a whole new TB and
4162 repeating the fault, which is horribly inefficient.
4163 Better would be to execute just this insn uncached, or generate a
4165 cpu_resume_from_signal(env
, NULL
);
4168 #if !defined(CONFIG_USER_ONLY)
4170 void dump_exec_info(FILE *f
,
4171 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
4173 int i
, target_code_size
, max_target_code_size
;
4174 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4175 TranslationBlock
*tb
;
4177 target_code_size
= 0;
4178 max_target_code_size
= 0;
4180 direct_jmp_count
= 0;
4181 direct_jmp2_count
= 0;
4182 for(i
= 0; i
< nb_tbs
; i
++) {
4184 target_code_size
+= tb
->size
;
4185 if (tb
->size
> max_target_code_size
)
4186 max_target_code_size
= tb
->size
;
4187 if (tb
->page_addr
[1] != -1)
4189 if (tb
->tb_next_offset
[0] != 0xffff) {
4191 if (tb
->tb_next_offset
[1] != 0xffff) {
4192 direct_jmp2_count
++;
4196 /* XXX: avoid using doubles ? */
4197 cpu_fprintf(f
, "Translation buffer state:\n");
4198 cpu_fprintf(f
, "gen code size %ld/%ld\n",
4199 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4200 cpu_fprintf(f
, "TB count %d/%d\n",
4201 nb_tbs
, code_gen_max_blocks
);
4202 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4203 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4204 max_target_code_size
);
4205 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4206 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4207 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4208 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4210 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4211 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4213 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4215 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4216 cpu_fprintf(f
, "\nStatistics:\n");
4217 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4218 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4219 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4220 #ifdef CONFIG_PROFILER
4221 tcg_dump_info(f
, cpu_fprintf
);
4225 #define MMUSUFFIX _cmmu
4226 #define GETPC() NULL
4227 #define env cpu_single_env
4228 #define SOFTMMU_CODE_ACCESS
4231 #include "softmmu_template.h"
4234 #include "softmmu_template.h"
4237 #include "softmmu_template.h"
4240 #include "softmmu_template.h"