2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
37 #include "cache-utils.h"
39 #if !defined(TARGET_IA64)
47 #include "qemu-timer.h"
48 #if defined(CONFIG_USER_ONLY)
51 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
52 #include <sys/param.h>
53 #if __FreeBSD_version >= 700104
54 #define HAVE_KINFO_GETVMMAP
55 #define sigqueue sigqueue_freebsd /* avoid redefinition */
58 #include <machine/profile.h>
68 //#define DEBUG_TB_INVALIDATE
71 //#define DEBUG_UNASSIGNED
73 /* make various TB consistency checks */
74 //#define DEBUG_TB_CHECK
75 //#define DEBUG_TLB_CHECK
77 //#define DEBUG_IOPORT
78 //#define DEBUG_SUBPAGE
80 #if !defined(CONFIG_USER_ONLY)
81 /* TB consistency checks only implemented for usermode emulation. */
85 #define SMC_BITMAP_USE_THRESHOLD 10
87 static TranslationBlock
*tbs
;
88 int code_gen_max_blocks
;
89 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #elif defined(_WIN32)
102 /* Maximum alignment for Win32 is 16. */
103 #define code_gen_section \
104 __attribute__((aligned (16)))
106 #define code_gen_section \
107 __attribute__((aligned (32)))
110 uint8_t code_gen_prologue
[1024] code_gen_section
;
111 static uint8_t *code_gen_buffer
;
112 static unsigned long code_gen_buffer_size
;
113 /* threshold to flush the translated code buffer */
114 static unsigned long code_gen_buffer_max_size
;
115 uint8_t *code_gen_ptr
;
117 #if !defined(CONFIG_USER_ONLY)
119 uint8_t *phys_ram_dirty
;
120 static int in_migration
;
122 typedef struct RAMBlock
{
126 struct RAMBlock
*next
;
129 static RAMBlock
*ram_blocks
;
130 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
131 then we can no longer assume contiguous ram offsets, and external uses
132 of this variable will break. */
133 ram_addr_t last_ram_offset
;
137 /* current CPU in the current thread. It is only valid inside
139 CPUState
*cpu_single_env
;
140 /* 0 = Do not count executed instructions.
141 1 = Precise instruction counting.
142 2 = Adaptive rate instruction counting. */
144 /* Current instruction counter. While executing translated code this may
145 include some instructions that have not yet been executed. */
148 typedef struct PageDesc
{
149 /* list of TBs intersecting this ram page */
150 TranslationBlock
*first_tb
;
151 /* in order to optimize self modifying code, we count the number
152 of lookups we do to a given page to use a bitmap */
153 unsigned int code_write_count
;
154 uint8_t *code_bitmap
;
155 #if defined(CONFIG_USER_ONLY)
160 /* In system mode we want L1_MAP to be based on ram offsets,
161 while in user mode we want it to be based on virtual addresses. */
162 #if !defined(CONFIG_USER_ONLY)
163 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
164 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
166 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
169 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
172 /* Size of the L2 (and L3, etc) page tables. */
174 #define L2_SIZE (1 << L2_BITS)
176 /* The bits remaining after N lower levels of page tables. */
177 #define P_L1_BITS_REM \
178 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
179 #define V_L1_BITS_REM \
180 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
182 /* Size of the L1 page table. Avoid silly small sizes. */
183 #if P_L1_BITS_REM < 4
184 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
186 #define P_L1_BITS P_L1_BITS_REM
189 #if V_L1_BITS_REM < 4
190 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
192 #define V_L1_BITS V_L1_BITS_REM
195 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
196 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
198 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
199 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
201 unsigned long qemu_real_host_page_size
;
202 unsigned long qemu_host_page_bits
;
203 unsigned long qemu_host_page_size
;
204 unsigned long qemu_host_page_mask
;
206 /* This is a multi-level map on the virtual address space.
207 The bottom level has pointers to PageDesc. */
208 static void *l1_map
[V_L1_SIZE
];
210 #if !defined(CONFIG_USER_ONLY)
211 typedef struct PhysPageDesc
{
212 /* offset in host memory of the page + io_index in the low bits */
213 ram_addr_t phys_offset
;
214 ram_addr_t region_offset
;
217 /* This is a multi-level map on the physical address space.
218 The bottom level has pointers to PhysPageDesc. */
219 static void *l1_phys_map
[P_L1_SIZE
];
221 static void io_mem_init(void);
223 /* io memory support */
224 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
225 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
226 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
227 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
228 static int io_mem_watch
;
233 static const char *logfilename
= "qemu.log";
235 static const char *logfilename
= "/tmp/qemu.log";
239 static int log_append
= 0;
242 #if !defined(CONFIG_USER_ONLY)
243 static int tlb_flush_count
;
245 static int tb_flush_count
;
246 static int tb_phys_invalidate_count
;
249 static void map_exec(void *addr
, long size
)
252 VirtualProtect(addr
, size
,
253 PAGE_EXECUTE_READWRITE
, &old_protect
);
257 static void map_exec(void *addr
, long size
)
259 unsigned long start
, end
, page_size
;
261 page_size
= getpagesize();
262 start
= (unsigned long)addr
;
263 start
&= ~(page_size
- 1);
265 end
= (unsigned long)addr
+ size
;
266 end
+= page_size
- 1;
267 end
&= ~(page_size
- 1);
269 mprotect((void *)start
, end
- start
,
270 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
274 static void page_init(void)
276 /* NOTE: we can always suppose that qemu_host_page_size >=
280 SYSTEM_INFO system_info
;
282 GetSystemInfo(&system_info
);
283 qemu_real_host_page_size
= system_info
.dwPageSize
;
286 qemu_real_host_page_size
= getpagesize();
288 if (qemu_host_page_size
== 0)
289 qemu_host_page_size
= qemu_real_host_page_size
;
290 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
291 qemu_host_page_size
= TARGET_PAGE_SIZE
;
292 qemu_host_page_bits
= 0;
293 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
294 qemu_host_page_bits
++;
295 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
297 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
299 #ifdef HAVE_KINFO_GETVMMAP
300 struct kinfo_vmentry
*freep
;
303 freep
= kinfo_getvmmap(getpid(), &cnt
);
306 for (i
= 0; i
< cnt
; i
++) {
307 unsigned long startaddr
, endaddr
;
309 startaddr
= freep
[i
].kve_start
;
310 endaddr
= freep
[i
].kve_end
;
311 if (h2g_valid(startaddr
)) {
312 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
314 if (h2g_valid(endaddr
)) {
315 endaddr
= h2g(endaddr
);
316 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
318 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
320 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
331 last_brk
= (unsigned long)sbrk(0);
333 f
= fopen("/compat/linux/proc/self/maps", "r");
338 unsigned long startaddr
, endaddr
;
341 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
343 if (n
== 2 && h2g_valid(startaddr
)) {
344 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
346 if (h2g_valid(endaddr
)) {
347 endaddr
= h2g(endaddr
);
351 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
363 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
369 #if defined(CONFIG_USER_ONLY)
370 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
371 # define ALLOC(P, SIZE) \
373 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
374 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
377 # define ALLOC(P, SIZE) \
378 do { P = qemu_mallocz(SIZE); } while (0)
381 /* Level 1. Always allocated. */
382 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
385 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
392 ALLOC(p
, sizeof(void *) * L2_SIZE
);
396 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
404 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
410 return pd
+ (index
& (L2_SIZE
- 1));
413 static inline PageDesc
*page_find(tb_page_addr_t index
)
415 return page_find_alloc(index
, 0);
418 #if !defined(CONFIG_USER_ONLY)
419 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
425 /* Level 1. Always allocated. */
426 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
429 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
435 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
437 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
448 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
450 for (i
= 0; i
< L2_SIZE
; i
++) {
451 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
452 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
456 return pd
+ (index
& (L2_SIZE
- 1));
459 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
461 return phys_page_find_alloc(index
, 0);
464 static void tlb_protect_code(ram_addr_t ram_addr
);
465 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
467 #define mmap_lock() do { } while(0)
468 #define mmap_unlock() do { } while(0)
471 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
473 #if defined(CONFIG_USER_ONLY)
474 /* Currently it is not recommended to allocate big chunks of data in
475 user mode. It will change when a dedicated libc will be used */
476 #define USE_STATIC_CODE_GEN_BUFFER
479 #ifdef USE_STATIC_CODE_GEN_BUFFER
480 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
481 __attribute__((aligned (CODE_GEN_ALIGN
)));
484 static void code_gen_alloc(unsigned long tb_size
)
489 #ifdef USE_STATIC_CODE_GEN_BUFFER
490 code_gen_buffer
= static_code_gen_buffer
;
491 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
492 map_exec(code_gen_buffer
, code_gen_buffer_size
);
494 code_gen_buffer_size
= tb_size
;
495 if (code_gen_buffer_size
== 0) {
496 #if defined(CONFIG_USER_ONLY)
497 /* in user mode, phys_ram_size is not meaningful */
498 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
500 /* XXX: needs adjustments */
501 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
504 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
505 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
506 /* The code gen buffer location may have constraints depending on
507 the host cpu and OS */
508 #if defined(__linux__)
513 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
514 #if defined(__x86_64__)
516 /* Cannot map more than that */
517 if (code_gen_buffer_size
> (800 * 1024 * 1024))
518 code_gen_buffer_size
= (800 * 1024 * 1024);
519 #elif defined(__sparc_v9__)
520 // Map the buffer below 2G, so we can use direct calls and branches
522 start
= (void *) 0x60000000UL
;
523 if (code_gen_buffer_size
> (512 * 1024 * 1024))
524 code_gen_buffer_size
= (512 * 1024 * 1024);
525 #elif defined(__arm__)
526 /* Map the buffer below 32M, so we can use direct calls and branches */
528 start
= (void *) 0x01000000UL
;
529 if (code_gen_buffer_size
> 16 * 1024 * 1024)
530 code_gen_buffer_size
= 16 * 1024 * 1024;
532 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
533 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
535 if (code_gen_buffer
== MAP_FAILED
) {
536 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
540 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
544 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
545 #if defined(__x86_64__)
546 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
547 * 0x40000000 is free */
549 addr
= (void *)0x40000000;
550 /* Cannot map more than that */
551 if (code_gen_buffer_size
> (800 * 1024 * 1024))
552 code_gen_buffer_size
= (800 * 1024 * 1024);
554 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
555 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
557 if (code_gen_buffer
== MAP_FAILED
) {
558 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
563 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
564 map_exec(code_gen_buffer
, code_gen_buffer_size
);
566 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
567 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
568 code_gen_buffer_max_size
= code_gen_buffer_size
-
569 code_gen_max_block_size();
570 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
571 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
574 /* Must be called before using the QEMU cpus. 'tb_size' is the size
575 (in bytes) allocated to the translation buffer. Zero means default
577 void cpu_exec_init_all(unsigned long tb_size
)
580 code_gen_alloc(tb_size
);
581 code_gen_ptr
= code_gen_buffer
;
583 #if !defined(CONFIG_USER_ONLY)
586 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
587 /* There's no guest base to take into account, so go ahead and
588 initialize the prologue now. */
589 tcg_prologue_init(&tcg_ctx
);
593 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
595 static int cpu_common_post_load(void *opaque
, int version_id
)
597 CPUState
*env
= opaque
;
599 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
600 version_id is increased. */
601 env
->interrupt_request
&= ~0x01;
607 static const VMStateDescription vmstate_cpu_common
= {
608 .name
= "cpu_common",
610 .minimum_version_id
= 1,
611 .minimum_version_id_old
= 1,
612 .post_load
= cpu_common_post_load
,
613 .fields
= (VMStateField
[]) {
614 VMSTATE_UINT32(halted
, CPUState
),
615 VMSTATE_UINT32(interrupt_request
, CPUState
),
616 VMSTATE_END_OF_LIST()
621 CPUState
*qemu_get_cpu(int cpu
)
623 CPUState
*env
= first_cpu
;
626 if (env
->cpu_index
== cpu
)
634 void cpu_exec_init(CPUState
*env
)
639 #if defined(CONFIG_USER_ONLY)
642 env
->next_cpu
= NULL
;
645 while (*penv
!= NULL
) {
646 penv
= &(*penv
)->next_cpu
;
649 env
->cpu_index
= cpu_index
;
651 QTAILQ_INIT(&env
->breakpoints
);
652 QTAILQ_INIT(&env
->watchpoints
);
654 env
->thread_id
= GetCurrentProcessId();
656 env
->thread_id
= getpid();
659 #if defined(CONFIG_USER_ONLY)
662 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
663 vmstate_register(cpu_index
, &vmstate_cpu_common
, env
);
664 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
665 cpu_save
, cpu_load
, env
);
669 static inline void invalidate_page_bitmap(PageDesc
*p
)
671 if (p
->code_bitmap
) {
672 qemu_free(p
->code_bitmap
);
673 p
->code_bitmap
= NULL
;
675 p
->code_write_count
= 0;
678 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
680 static void page_flush_tb_1 (int level
, void **lp
)
689 for (i
= 0; i
< L2_SIZE
; ++i
) {
690 pd
[i
].first_tb
= NULL
;
691 invalidate_page_bitmap(pd
+ i
);
695 for (i
= 0; i
< L2_SIZE
; ++i
) {
696 page_flush_tb_1 (level
- 1, pp
+ i
);
701 static void page_flush_tb(void)
704 for (i
= 0; i
< V_L1_SIZE
; i
++) {
705 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
709 /* flush all the translation blocks */
710 /* XXX: tb_flush is currently not thread safe */
711 void tb_flush(CPUState
*env1
)
714 #if defined(DEBUG_FLUSH)
715 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
716 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
718 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
720 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
721 cpu_abort(env1
, "Internal error: code buffer overflow\n");
725 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
726 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
729 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
732 code_gen_ptr
= code_gen_buffer
;
733 /* XXX: flush processor icache at this point if cache flush is
738 #ifdef DEBUG_TB_CHECK
740 static void tb_invalidate_check(target_ulong address
)
742 TranslationBlock
*tb
;
744 address
&= TARGET_PAGE_MASK
;
745 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
746 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
747 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
748 address
>= tb
->pc
+ tb
->size
)) {
749 printf("ERROR invalidate: address=" TARGET_FMT_lx
750 " PC=%08lx size=%04x\n",
751 address
, (long)tb
->pc
, tb
->size
);
757 /* verify that all the pages have correct rights for code */
758 static void tb_page_check(void)
760 TranslationBlock
*tb
;
761 int i
, flags1
, flags2
;
763 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
764 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
765 flags1
= page_get_flags(tb
->pc
);
766 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
767 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
768 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
769 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
777 /* invalidate one TB */
778 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
781 TranslationBlock
*tb1
;
785 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
788 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
792 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
794 TranslationBlock
*tb1
;
800 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
802 *ptb
= tb1
->page_next
[n1
];
805 ptb
= &tb1
->page_next
[n1
];
809 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
811 TranslationBlock
*tb1
, **ptb
;
814 ptb
= &tb
->jmp_next
[n
];
817 /* find tb(n) in circular list */
821 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
822 if (n1
== n
&& tb1
== tb
)
825 ptb
= &tb1
->jmp_first
;
827 ptb
= &tb1
->jmp_next
[n1
];
830 /* now we can suppress tb(n) from the list */
831 *ptb
= tb
->jmp_next
[n
];
833 tb
->jmp_next
[n
] = NULL
;
837 /* reset the jump entry 'n' of a TB so that it is not chained to
839 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
841 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
844 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
849 tb_page_addr_t phys_pc
;
850 TranslationBlock
*tb1
, *tb2
;
852 /* remove the TB from the hash list */
853 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
854 h
= tb_phys_hash_func(phys_pc
);
855 tb_remove(&tb_phys_hash
[h
], tb
,
856 offsetof(TranslationBlock
, phys_hash_next
));
858 /* remove the TB from the page list */
859 if (tb
->page_addr
[0] != page_addr
) {
860 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
861 tb_page_remove(&p
->first_tb
, tb
);
862 invalidate_page_bitmap(p
);
864 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
865 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
866 tb_page_remove(&p
->first_tb
, tb
);
867 invalidate_page_bitmap(p
);
870 tb_invalidated_flag
= 1;
872 /* remove the TB from the hash list */
873 h
= tb_jmp_cache_hash_func(tb
->pc
);
874 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
875 if (env
->tb_jmp_cache
[h
] == tb
)
876 env
->tb_jmp_cache
[h
] = NULL
;
879 /* suppress this TB from the two jump lists */
880 tb_jmp_remove(tb
, 0);
881 tb_jmp_remove(tb
, 1);
883 /* suppress any remaining jumps to this TB */
889 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
890 tb2
= tb1
->jmp_next
[n1
];
891 tb_reset_jump(tb1
, n1
);
892 tb1
->jmp_next
[n1
] = NULL
;
895 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
897 tb_phys_invalidate_count
++;
900 static inline void set_bits(uint8_t *tab
, int start
, int len
)
906 mask
= 0xff << (start
& 7);
907 if ((start
& ~7) == (end
& ~7)) {
909 mask
&= ~(0xff << (end
& 7));
914 start
= (start
+ 8) & ~7;
916 while (start
< end1
) {
921 mask
= ~(0xff << (end
& 7));
927 static void build_page_bitmap(PageDesc
*p
)
929 int n
, tb_start
, tb_end
;
930 TranslationBlock
*tb
;
932 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
937 tb
= (TranslationBlock
*)((long)tb
& ~3);
938 /* NOTE: this is subtle as a TB may span two physical pages */
940 /* NOTE: tb_end may be after the end of the page, but
941 it is not a problem */
942 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
943 tb_end
= tb_start
+ tb
->size
;
944 if (tb_end
> TARGET_PAGE_SIZE
)
945 tb_end
= TARGET_PAGE_SIZE
;
948 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
950 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
951 tb
= tb
->page_next
[n
];
955 TranslationBlock
*tb_gen_code(CPUState
*env
,
956 target_ulong pc
, target_ulong cs_base
,
957 int flags
, int cflags
)
959 TranslationBlock
*tb
;
961 tb_page_addr_t phys_pc
, phys_page2
;
962 target_ulong virt_page2
;
965 phys_pc
= get_page_addr_code(env
, pc
);
968 /* flush must be done */
970 /* cannot fail at this point */
972 /* Don't forget to invalidate previous TB info. */
973 tb_invalidated_flag
= 1;
975 tc_ptr
= code_gen_ptr
;
977 tb
->cs_base
= cs_base
;
980 cpu_gen_code(env
, tb
, &code_gen_size
);
981 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
983 /* check next page if needed */
984 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
986 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
987 phys_page2
= get_page_addr_code(env
, virt_page2
);
989 tb_link_page(tb
, phys_pc
, phys_page2
);
993 /* invalidate all TBs which intersect with the target physical page
994 starting in range [start;end[. NOTE: start and end must refer to
995 the same physical page. 'is_cpu_write_access' should be true if called
996 from a real cpu write access: the virtual CPU will exit the current
997 TB if code is modified inside this TB. */
998 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
999 int is_cpu_write_access
)
1001 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1002 CPUState
*env
= cpu_single_env
;
1003 tb_page_addr_t tb_start
, tb_end
;
1006 #ifdef TARGET_HAS_PRECISE_SMC
1007 int current_tb_not_found
= is_cpu_write_access
;
1008 TranslationBlock
*current_tb
= NULL
;
1009 int current_tb_modified
= 0;
1010 target_ulong current_pc
= 0;
1011 target_ulong current_cs_base
= 0;
1012 int current_flags
= 0;
1013 #endif /* TARGET_HAS_PRECISE_SMC */
1015 p
= page_find(start
>> TARGET_PAGE_BITS
);
1018 if (!p
->code_bitmap
&&
1019 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1020 is_cpu_write_access
) {
1021 /* build code bitmap */
1022 build_page_bitmap(p
);
1025 /* we remove all the TBs in the range [start, end[ */
1026 /* XXX: see if in some cases it could be faster to invalidate all the code */
1028 while (tb
!= NULL
) {
1030 tb
= (TranslationBlock
*)((long)tb
& ~3);
1031 tb_next
= tb
->page_next
[n
];
1032 /* NOTE: this is subtle as a TB may span two physical pages */
1034 /* NOTE: tb_end may be after the end of the page, but
1035 it is not a problem */
1036 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1037 tb_end
= tb_start
+ tb
->size
;
1039 tb_start
= tb
->page_addr
[1];
1040 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1042 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1043 #ifdef TARGET_HAS_PRECISE_SMC
1044 if (current_tb_not_found
) {
1045 current_tb_not_found
= 0;
1047 if (env
->mem_io_pc
) {
1048 /* now we have a real cpu fault */
1049 current_tb
= tb_find_pc(env
->mem_io_pc
);
1052 if (current_tb
== tb
&&
1053 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1054 /* If we are modifying the current TB, we must stop
1055 its execution. We could be more precise by checking
1056 that the modification is after the current PC, but it
1057 would require a specialized function to partially
1058 restore the CPU state */
1060 current_tb_modified
= 1;
1061 cpu_restore_state(current_tb
, env
,
1062 env
->mem_io_pc
, NULL
);
1063 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1066 #endif /* TARGET_HAS_PRECISE_SMC */
1067 /* we need to do that to handle the case where a signal
1068 occurs while doing tb_phys_invalidate() */
1071 saved_tb
= env
->current_tb
;
1072 env
->current_tb
= NULL
;
1074 tb_phys_invalidate(tb
, -1);
1076 env
->current_tb
= saved_tb
;
1077 if (env
->interrupt_request
&& env
->current_tb
)
1078 cpu_interrupt(env
, env
->interrupt_request
);
1083 #if !defined(CONFIG_USER_ONLY)
1084 /* if no code remaining, no need to continue to use slow writes */
1086 invalidate_page_bitmap(p
);
1087 if (is_cpu_write_access
) {
1088 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1092 #ifdef TARGET_HAS_PRECISE_SMC
1093 if (current_tb_modified
) {
1094 /* we generate a block containing just the instruction
1095 modifying the memory. It will ensure that it cannot modify
1097 env
->current_tb
= NULL
;
1098 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1099 cpu_resume_from_signal(env
, NULL
);
1104 /* len must be <= 8 and start must be a multiple of len */
1105 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1111 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1112 cpu_single_env
->mem_io_vaddr
, len
,
1113 cpu_single_env
->eip
,
1114 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1117 p
= page_find(start
>> TARGET_PAGE_BITS
);
1120 if (p
->code_bitmap
) {
1121 offset
= start
& ~TARGET_PAGE_MASK
;
1122 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1123 if (b
& ((1 << len
) - 1))
1127 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1131 #if !defined(CONFIG_SOFTMMU)
1132 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1133 unsigned long pc
, void *puc
)
1135 TranslationBlock
*tb
;
1138 #ifdef TARGET_HAS_PRECISE_SMC
1139 TranslationBlock
*current_tb
= NULL
;
1140 CPUState
*env
= cpu_single_env
;
1141 int current_tb_modified
= 0;
1142 target_ulong current_pc
= 0;
1143 target_ulong current_cs_base
= 0;
1144 int current_flags
= 0;
1147 addr
&= TARGET_PAGE_MASK
;
1148 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1152 #ifdef TARGET_HAS_PRECISE_SMC
1153 if (tb
&& pc
!= 0) {
1154 current_tb
= tb_find_pc(pc
);
1157 while (tb
!= NULL
) {
1159 tb
= (TranslationBlock
*)((long)tb
& ~3);
1160 #ifdef TARGET_HAS_PRECISE_SMC
1161 if (current_tb
== tb
&&
1162 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1163 /* If we are modifying the current TB, we must stop
1164 its execution. We could be more precise by checking
1165 that the modification is after the current PC, but it
1166 would require a specialized function to partially
1167 restore the CPU state */
1169 current_tb_modified
= 1;
1170 cpu_restore_state(current_tb
, env
, pc
, puc
);
1171 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1174 #endif /* TARGET_HAS_PRECISE_SMC */
1175 tb_phys_invalidate(tb
, addr
);
1176 tb
= tb
->page_next
[n
];
1179 #ifdef TARGET_HAS_PRECISE_SMC
1180 if (current_tb_modified
) {
1181 /* we generate a block containing just the instruction
1182 modifying the memory. It will ensure that it cannot modify
1184 env
->current_tb
= NULL
;
1185 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1186 cpu_resume_from_signal(env
, puc
);
1192 /* add the tb in the target page and protect it if necessary */
1193 static inline void tb_alloc_page(TranslationBlock
*tb
,
1194 unsigned int n
, tb_page_addr_t page_addr
)
1197 TranslationBlock
*last_first_tb
;
1199 tb
->page_addr
[n
] = page_addr
;
1200 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1201 tb
->page_next
[n
] = p
->first_tb
;
1202 last_first_tb
= p
->first_tb
;
1203 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1204 invalidate_page_bitmap(p
);
1206 #if defined(TARGET_HAS_SMC) || 1
1208 #if defined(CONFIG_USER_ONLY)
1209 if (p
->flags
& PAGE_WRITE
) {
1214 /* force the host page as non writable (writes will have a
1215 page fault + mprotect overhead) */
1216 page_addr
&= qemu_host_page_mask
;
1218 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1219 addr
+= TARGET_PAGE_SIZE
) {
1221 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1225 p2
->flags
&= ~PAGE_WRITE
;
1227 mprotect(g2h(page_addr
), qemu_host_page_size
,
1228 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1229 #ifdef DEBUG_TB_INVALIDATE
1230 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1235 /* if some code is already present, then the pages are already
1236 protected. So we handle the case where only the first TB is
1237 allocated in a physical page */
1238 if (!last_first_tb
) {
1239 tlb_protect_code(page_addr
);
1243 #endif /* TARGET_HAS_SMC */
1246 /* Allocate a new translation block. Flush the translation buffer if
1247 too many translation blocks or too much generated code. */
1248 TranslationBlock
*tb_alloc(target_ulong pc
)
1250 TranslationBlock
*tb
;
1252 if (nb_tbs
>= code_gen_max_blocks
||
1253 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1255 tb
= &tbs
[nb_tbs
++];
1261 void tb_free(TranslationBlock
*tb
)
1263 /* In practice this is mostly used for single use temporary TB
1264 Ignore the hard cases and just back up if this TB happens to
1265 be the last one generated. */
1266 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1267 code_gen_ptr
= tb
->tc_ptr
;
1272 /* add a new TB and link it to the physical page tables. phys_page2 is
1273 (-1) to indicate that only one page contains the TB. */
1274 void tb_link_page(TranslationBlock
*tb
,
1275 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1278 TranslationBlock
**ptb
;
1280 /* Grab the mmap lock to stop another thread invalidating this TB
1281 before we are done. */
1283 /* add in the physical hash table */
1284 h
= tb_phys_hash_func(phys_pc
);
1285 ptb
= &tb_phys_hash
[h
];
1286 tb
->phys_hash_next
= *ptb
;
1289 /* add in the page list */
1290 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1291 if (phys_page2
!= -1)
1292 tb_alloc_page(tb
, 1, phys_page2
);
1294 tb
->page_addr
[1] = -1;
1296 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1297 tb
->jmp_next
[0] = NULL
;
1298 tb
->jmp_next
[1] = NULL
;
1300 /* init original jump addresses */
1301 if (tb
->tb_next_offset
[0] != 0xffff)
1302 tb_reset_jump(tb
, 0);
1303 if (tb
->tb_next_offset
[1] != 0xffff)
1304 tb_reset_jump(tb
, 1);
1306 #ifdef DEBUG_TB_CHECK
1312 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1313 tb[1].tc_ptr. Return NULL if not found */
1314 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1316 int m_min
, m_max
, m
;
1318 TranslationBlock
*tb
;
1322 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1323 tc_ptr
>= (unsigned long)code_gen_ptr
)
1325 /* binary search (cf Knuth) */
1328 while (m_min
<= m_max
) {
1329 m
= (m_min
+ m_max
) >> 1;
1331 v
= (unsigned long)tb
->tc_ptr
;
1334 else if (tc_ptr
< v
) {
1343 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1345 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1347 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1350 tb1
= tb
->jmp_next
[n
];
1352 /* find head of list */
1355 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1358 tb1
= tb1
->jmp_next
[n1
];
1360 /* we are now sure now that tb jumps to tb1 */
1363 /* remove tb from the jmp_first list */
1364 ptb
= &tb_next
->jmp_first
;
1368 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1369 if (n1
== n
&& tb1
== tb
)
1371 ptb
= &tb1
->jmp_next
[n1
];
1373 *ptb
= tb
->jmp_next
[n
];
1374 tb
->jmp_next
[n
] = NULL
;
1376 /* suppress the jump to next tb in generated code */
1377 tb_reset_jump(tb
, n
);
1379 /* suppress jumps in the tb on which we could have jumped */
1380 tb_reset_jump_recursive(tb_next
);
1384 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1386 tb_reset_jump_recursive2(tb
, 0);
1387 tb_reset_jump_recursive2(tb
, 1);
1390 #if defined(TARGET_HAS_ICE)
1391 #if defined(CONFIG_USER_ONLY)
1392 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1394 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1397 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1399 target_phys_addr_t addr
;
1401 ram_addr_t ram_addr
;
1404 addr
= cpu_get_phys_page_debug(env
, pc
);
1405 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1407 pd
= IO_MEM_UNASSIGNED
;
1409 pd
= p
->phys_offset
;
1411 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1412 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1415 #endif /* TARGET_HAS_ICE */
1417 #if defined(CONFIG_USER_ONLY)
1418 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1423 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1424 int flags
, CPUWatchpoint
**watchpoint
)
1429 /* Add a watchpoint. */
1430 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1431 int flags
, CPUWatchpoint
**watchpoint
)
1433 target_ulong len_mask
= ~(len
- 1);
1436 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1437 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1438 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1439 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1442 wp
= qemu_malloc(sizeof(*wp
));
1445 wp
->len_mask
= len_mask
;
1448 /* keep all GDB-injected watchpoints in front */
1450 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1452 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1454 tlb_flush_page(env
, addr
);
1461 /* Remove a specific watchpoint. */
1462 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1465 target_ulong len_mask
= ~(len
- 1);
1468 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1469 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1470 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1471 cpu_watchpoint_remove_by_ref(env
, wp
);
1478 /* Remove a specific watchpoint by reference. */
1479 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1481 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1483 tlb_flush_page(env
, watchpoint
->vaddr
);
1485 qemu_free(watchpoint
);
1488 /* Remove all matching watchpoints. */
1489 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1491 CPUWatchpoint
*wp
, *next
;
1493 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1494 if (wp
->flags
& mask
)
1495 cpu_watchpoint_remove_by_ref(env
, wp
);
1500 /* Add a breakpoint. */
1501 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1502 CPUBreakpoint
**breakpoint
)
1504 #if defined(TARGET_HAS_ICE)
1507 bp
= qemu_malloc(sizeof(*bp
));
1512 /* keep all GDB-injected breakpoints in front */
1514 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1516 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1518 breakpoint_invalidate(env
, pc
);
1528 /* Remove a specific breakpoint. */
1529 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1531 #if defined(TARGET_HAS_ICE)
1534 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1535 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1536 cpu_breakpoint_remove_by_ref(env
, bp
);
1546 /* Remove a specific breakpoint by reference. */
1547 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1549 #if defined(TARGET_HAS_ICE)
1550 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1552 breakpoint_invalidate(env
, breakpoint
->pc
);
1554 qemu_free(breakpoint
);
1558 /* Remove all matching breakpoints. */
1559 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1561 #if defined(TARGET_HAS_ICE)
1562 CPUBreakpoint
*bp
, *next
;
1564 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1565 if (bp
->flags
& mask
)
1566 cpu_breakpoint_remove_by_ref(env
, bp
);
1571 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1572 CPU loop after each instruction */
1573 void cpu_single_step(CPUState
*env
, int enabled
)
1575 #if defined(TARGET_HAS_ICE)
1576 if (env
->singlestep_enabled
!= enabled
) {
1577 env
->singlestep_enabled
= enabled
;
1579 kvm_update_guest_debug(env
, 0);
1581 /* must flush all the translated code to avoid inconsistencies */
1582 /* XXX: only flush what is necessary */
1589 /* enable or disable low levels log */
1590 void cpu_set_log(int log_flags
)
1592 loglevel
= log_flags
;
1593 if (loglevel
&& !logfile
) {
1594 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1596 perror(logfilename
);
1599 #if !defined(CONFIG_SOFTMMU)
1600 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1602 static char logfile_buf
[4096];
1603 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1605 #elif !defined(_WIN32)
1606 /* Win32 doesn't support line-buffering and requires size >= 2 */
1607 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1611 if (!loglevel
&& logfile
) {
1617 void cpu_set_log_filename(const char *filename
)
1619 logfilename
= strdup(filename
);
1624 cpu_set_log(loglevel
);
1627 static void cpu_unlink_tb(CPUState
*env
)
1629 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1630 problem and hope the cpu will stop of its own accord. For userspace
1631 emulation this often isn't actually as bad as it sounds. Often
1632 signals are used primarily to interrupt blocking syscalls. */
1633 TranslationBlock
*tb
;
1634 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1636 spin_lock(&interrupt_lock
);
1637 tb
= env
->current_tb
;
1638 /* if the cpu is currently executing code, we must unlink it and
1639 all the potentially executing TB */
1641 env
->current_tb
= NULL
;
1642 tb_reset_jump_recursive(tb
);
1644 spin_unlock(&interrupt_lock
);
1647 /* mask must never be zero, except for A20 change call */
1648 void cpu_interrupt(CPUState
*env
, int mask
)
1652 old_mask
= env
->interrupt_request
;
1653 env
->interrupt_request
|= mask
;
1654 if (kvm_enabled() && !kvm_irqchip_in_kernel())
1655 kvm_update_interrupt_request(env
);
1657 #ifndef CONFIG_USER_ONLY
1659 * If called from iothread context, wake the target cpu in
1662 if (!qemu_cpu_self(env
)) {
1669 env
->icount_decr
.u16
.high
= 0xffff;
1670 #ifndef CONFIG_USER_ONLY
1672 && (mask
& ~old_mask
) != 0) {
1673 cpu_abort(env
, "Raised interrupt while not in I/O function");
1681 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1683 env
->interrupt_request
&= ~mask
;
1686 void cpu_exit(CPUState
*env
)
1688 env
->exit_request
= 1;
1692 const CPULogItem cpu_log_items
[] = {
1693 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1694 "show generated host assembly code for each compiled TB" },
1695 { CPU_LOG_TB_IN_ASM
, "in_asm",
1696 "show target assembly code for each compiled TB" },
1697 { CPU_LOG_TB_OP
, "op",
1698 "show micro ops for each compiled TB" },
1699 { CPU_LOG_TB_OP_OPT
, "op_opt",
1702 "before eflags optimization and "
1704 "after liveness analysis" },
1705 { CPU_LOG_INT
, "int",
1706 "show interrupts/exceptions in short format" },
1707 { CPU_LOG_EXEC
, "exec",
1708 "show trace before each executed TB (lots of logs)" },
1709 { CPU_LOG_TB_CPU
, "cpu",
1710 "show CPU state before block translation" },
1712 { CPU_LOG_PCALL
, "pcall",
1713 "show protected mode far calls/returns/exceptions" },
1714 { CPU_LOG_RESET
, "cpu_reset",
1715 "show CPU state before CPU resets" },
1718 { CPU_LOG_IOPORT
, "ioport",
1719 "show all i/o ports accesses" },
1724 #ifndef CONFIG_USER_ONLY
1725 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1726 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1728 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1730 ram_addr_t phys_offset
)
1732 CPUPhysMemoryClient
*client
;
1733 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1734 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1738 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1739 target_phys_addr_t end
)
1741 CPUPhysMemoryClient
*client
;
1742 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1743 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1750 static int cpu_notify_migration_log(int enable
)
1752 CPUPhysMemoryClient
*client
;
1753 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1754 int r
= client
->migration_log(client
, enable
);
1761 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1762 int level
, void **lp
)
1770 PhysPageDesc
*pd
= *lp
;
1771 for (i
= 0; i
< L2_SIZE
; ++i
) {
1772 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1773 client
->set_memory(client
, pd
[i
].region_offset
,
1774 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
);
1779 for (i
= 0; i
< L2_SIZE
; ++i
) {
1780 phys_page_for_each_1(client
, level
- 1, pp
+ i
);
1785 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1788 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1789 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1794 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1796 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1797 phys_page_for_each(client
);
1800 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1802 QLIST_REMOVE(client
, list
);
1806 static int cmp1(const char *s1
, int n
, const char *s2
)
1808 if (strlen(s2
) != n
)
1810 return memcmp(s1
, s2
, n
) == 0;
1813 /* takes a comma separated list of log masks. Return 0 if error. */
1814 int cpu_str_to_log_mask(const char *str
)
1816 const CPULogItem
*item
;
1823 p1
= strchr(p
, ',');
1826 if(cmp1(p
,p1
-p
,"all")) {
1827 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1831 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1832 if (cmp1(p
, p1
- p
, item
->name
))
1846 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1853 fprintf(stderr
, "qemu: fatal: ");
1854 vfprintf(stderr
, fmt
, ap
);
1855 fprintf(stderr
, "\n");
1857 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1859 cpu_dump_state(env
, stderr
, fprintf
, 0);
1861 if (qemu_log_enabled()) {
1862 qemu_log("qemu: fatal: ");
1863 qemu_log_vprintf(fmt
, ap2
);
1866 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1868 log_cpu_state(env
, 0);
1875 #if defined(CONFIG_USER_ONLY)
1877 struct sigaction act
;
1878 sigfillset(&act
.sa_mask
);
1879 act
.sa_handler
= SIG_DFL
;
1880 sigaction(SIGABRT
, &act
, NULL
);
1886 CPUState
*cpu_copy(CPUState
*env
)
1888 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1889 CPUState
*next_cpu
= new_env
->next_cpu
;
1890 int cpu_index
= new_env
->cpu_index
;
1891 #if defined(TARGET_HAS_ICE)
1896 memcpy(new_env
, env
, sizeof(CPUState
));
1898 /* Preserve chaining and index. */
1899 new_env
->next_cpu
= next_cpu
;
1900 new_env
->cpu_index
= cpu_index
;
1902 /* Clone all break/watchpoints.
1903 Note: Once we support ptrace with hw-debug register access, make sure
1904 BP_CPU break/watchpoints are handled correctly on clone. */
1905 QTAILQ_INIT(&env
->breakpoints
);
1906 QTAILQ_INIT(&env
->watchpoints
);
1907 #if defined(TARGET_HAS_ICE)
1908 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1909 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1911 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1912 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1920 #if !defined(CONFIG_USER_ONLY)
1922 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1926 /* Discard jump cache entries for any tb which might potentially
1927 overlap the flushed page. */
1928 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1929 memset (&env
->tb_jmp_cache
[i
], 0,
1930 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1932 i
= tb_jmp_cache_hash_page(addr
);
1933 memset (&env
->tb_jmp_cache
[i
], 0,
1934 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1937 static CPUTLBEntry s_cputlb_empty_entry
= {
1944 /* NOTE: if flush_global is true, also flush global entries (not
1946 void tlb_flush(CPUState
*env
, int flush_global
)
1950 #if defined(DEBUG_TLB)
1951 printf("tlb_flush:\n");
1953 /* must reset current TB so that interrupts cannot modify the
1954 links while we are modifying them */
1955 env
->current_tb
= NULL
;
1957 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1959 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1960 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1964 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1966 env
->tlb_flush_addr
= -1;
1967 env
->tlb_flush_mask
= 0;
1971 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1973 if (addr
== (tlb_entry
->addr_read
&
1974 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1975 addr
== (tlb_entry
->addr_write
&
1976 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1977 addr
== (tlb_entry
->addr_code
&
1978 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1979 *tlb_entry
= s_cputlb_empty_entry
;
1983 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1988 #if defined(DEBUG_TLB)
1989 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1991 /* Check if we need to flush due to large pages. */
1992 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1993 #if defined(DEBUG_TLB)
1994 printf("tlb_flush_page: forced full flush ("
1995 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1996 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
2001 /* must reset current TB so that interrupts cannot modify the
2002 links while we are modifying them */
2003 env
->current_tb
= NULL
;
2005 addr
&= TARGET_PAGE_MASK
;
2006 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2007 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2008 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2010 tlb_flush_jmp_cache(env
, addr
);
2013 /* update the TLBs so that writes to code in the virtual page 'addr'
2015 static void tlb_protect_code(ram_addr_t ram_addr
)
2017 cpu_physical_memory_reset_dirty(ram_addr
,
2018 ram_addr
+ TARGET_PAGE_SIZE
,
2022 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2023 tested for self modifying code */
2024 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2027 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2030 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2031 unsigned long start
, unsigned long length
)
2034 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2035 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2036 if ((addr
- start
) < length
) {
2037 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2042 /* Note: start and end must be within the same ram block. */
2043 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2047 unsigned long length
, start1
;
2050 start
&= TARGET_PAGE_MASK
;
2051 end
= TARGET_PAGE_ALIGN(end
);
2053 length
= end
- start
;
2056 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2058 /* we modify the TLB cache so that the dirty bit will be set again
2059 when accessing the range */
2060 start1
= (unsigned long)qemu_get_ram_ptr(start
);
2061 /* Chek that we don't span multiple blocks - this breaks the
2062 address comparisons below. */
2063 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
2064 != (end
- 1) - start
) {
2068 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2070 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2071 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2072 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2078 int cpu_physical_memory_set_dirty_tracking(int enable
)
2081 in_migration
= enable
;
2082 ret
= cpu_notify_migration_log(!!enable
);
2086 int cpu_physical_memory_get_dirty_tracking(void)
2088 return in_migration
;
2091 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2092 target_phys_addr_t end_addr
)
2096 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2100 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2102 ram_addr_t ram_addr
;
2105 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2106 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2107 + tlb_entry
->addend
);
2108 ram_addr
= qemu_ram_addr_from_host(p
);
2109 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2110 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2115 /* update the TLB according to the current state of the dirty bits */
2116 void cpu_tlb_update_dirty(CPUState
*env
)
2120 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2121 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2122 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2126 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2128 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2129 tlb_entry
->addr_write
= vaddr
;
2132 /* update the TLB corresponding to virtual page vaddr
2133 so that it is no longer dirty */
2134 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2139 vaddr
&= TARGET_PAGE_MASK
;
2140 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2141 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2142 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2145 /* Our TLB does not support large pages, so remember the area covered by
2146 large pages and trigger a full TLB flush if these are invalidated. */
2147 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2150 target_ulong mask
= ~(size
- 1);
2152 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2153 env
->tlb_flush_addr
= vaddr
& mask
;
2154 env
->tlb_flush_mask
= mask
;
2157 /* Extend the existing region to include the new page.
2158 This is a compromise between unnecessary flushes and the cost
2159 of maintaining a full variable size TLB. */
2160 mask
&= env
->tlb_flush_mask
;
2161 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2164 env
->tlb_flush_addr
&= mask
;
2165 env
->tlb_flush_mask
= mask
;
2168 /* Add a new TLB entry. At most one entry for a given virtual address
2169 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2170 supplied size is only used by tlb_flush_page. */
2171 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2172 target_phys_addr_t paddr
, int prot
,
2173 int mmu_idx
, target_ulong size
)
2178 target_ulong address
;
2179 target_ulong code_address
;
2180 unsigned long addend
;
2183 target_phys_addr_t iotlb
;
2185 assert(size
>= TARGET_PAGE_SIZE
);
2186 if (size
!= TARGET_PAGE_SIZE
) {
2187 tlb_add_large_page(env
, vaddr
, size
);
2189 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2191 pd
= IO_MEM_UNASSIGNED
;
2193 pd
= p
->phys_offset
;
2195 #if defined(DEBUG_TLB)
2196 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2197 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2201 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2202 /* IO memory case (romd handled later) */
2203 address
|= TLB_MMIO
;
2205 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2206 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2208 iotlb
= pd
& TARGET_PAGE_MASK
;
2209 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2210 iotlb
|= IO_MEM_NOTDIRTY
;
2212 iotlb
|= IO_MEM_ROM
;
2214 /* IO handlers are currently passed a physical address.
2215 It would be nice to pass an offset from the base address
2216 of that region. This would avoid having to special case RAM,
2217 and avoid full address decoding in every device.
2218 We can't use the high bits of pd for this because
2219 IO_MEM_ROMD uses these as a ram address. */
2220 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2222 iotlb
+= p
->region_offset
;
2228 code_address
= address
;
2229 /* Make accesses to pages with watchpoints go via the
2230 watchpoint trap routines. */
2231 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2232 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2233 iotlb
= io_mem_watch
+ paddr
;
2234 /* TODO: The memory case can be optimized by not trapping
2235 reads of pages with a write breakpoint. */
2236 address
|= TLB_MMIO
;
2240 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2241 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2242 te
= &env
->tlb_table
[mmu_idx
][index
];
2243 te
->addend
= addend
- vaddr
;
2244 if (prot
& PAGE_READ
) {
2245 te
->addr_read
= address
;
2250 if (prot
& PAGE_EXEC
) {
2251 te
->addr_code
= code_address
;
2255 if (prot
& PAGE_WRITE
) {
2256 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2257 (pd
& IO_MEM_ROMD
)) {
2258 /* Write access calls the I/O callback. */
2259 te
->addr_write
= address
| TLB_MMIO
;
2260 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2261 !cpu_physical_memory_is_dirty(pd
)) {
2262 te
->addr_write
= address
| TLB_NOTDIRTY
;
2264 te
->addr_write
= address
;
2267 te
->addr_write
= -1;
2273 void tlb_flush(CPUState
*env
, int flush_global
)
2277 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2282 * Walks guest process memory "regions" one by one
2283 * and calls callback function 'fn' for each region.
2286 struct walk_memory_regions_data
2288 walk_memory_regions_fn fn
;
2290 unsigned long start
;
2294 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2295 abi_ulong end
, int new_prot
)
2297 if (data
->start
!= -1ul) {
2298 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2304 data
->start
= (new_prot
? end
: -1ul);
2305 data
->prot
= new_prot
;
2310 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2311 abi_ulong base
, int level
, void **lp
)
2317 return walk_memory_regions_end(data
, base
, 0);
2322 for (i
= 0; i
< L2_SIZE
; ++i
) {
2323 int prot
= pd
[i
].flags
;
2325 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2326 if (prot
!= data
->prot
) {
2327 rc
= walk_memory_regions_end(data
, pa
, prot
);
2335 for (i
= 0; i
< L2_SIZE
; ++i
) {
2336 pa
= base
| ((abi_ulong
)i
<<
2337 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2338 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2348 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2350 struct walk_memory_regions_data data
;
2358 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2359 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2360 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2366 return walk_memory_regions_end(&data
, 0, 0);
2369 static int dump_region(void *priv
, abi_ulong start
,
2370 abi_ulong end
, unsigned long prot
)
2372 FILE *f
= (FILE *)priv
;
2374 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2375 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2376 start
, end
, end
- start
,
2377 ((prot
& PAGE_READ
) ? 'r' : '-'),
2378 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2379 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2384 /* dump memory mappings */
2385 void page_dump(FILE *f
)
2387 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2388 "start", "end", "size", "prot");
2389 walk_memory_regions(f
, dump_region
);
2392 int page_get_flags(target_ulong address
)
2396 p
= page_find(address
>> TARGET_PAGE_BITS
);
2402 /* Modify the flags of a page and invalidate the code if necessary.
2403 The flag PAGE_WRITE_ORG is positioned automatically depending
2404 on PAGE_WRITE. The mmap_lock should already be held. */
2405 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2407 target_ulong addr
, len
;
2409 /* This function should never be called with addresses outside the
2410 guest address space. If this assert fires, it probably indicates
2411 a missing call to h2g_valid. */
2412 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2413 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2415 assert(start
< end
);
2417 start
= start
& TARGET_PAGE_MASK
;
2418 end
= TARGET_PAGE_ALIGN(end
);
2420 if (flags
& PAGE_WRITE
) {
2421 flags
|= PAGE_WRITE_ORG
;
2424 for (addr
= start
, len
= end
- start
;
2426 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2427 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2429 /* If the write protection bit is set, then we invalidate
2431 if (!(p
->flags
& PAGE_WRITE
) &&
2432 (flags
& PAGE_WRITE
) &&
2434 tb_invalidate_phys_page(addr
, 0, NULL
);
2440 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2446 /* This function should never be called with addresses outside the
2447 guest address space. If this assert fires, it probably indicates
2448 a missing call to h2g_valid. */
2449 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2450 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2456 if (start
+ len
- 1 < start
) {
2457 /* We've wrapped around. */
2461 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2462 start
= start
& TARGET_PAGE_MASK
;
2464 for (addr
= start
, len
= end
- start
;
2466 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2467 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2470 if( !(p
->flags
& PAGE_VALID
) )
2473 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2475 if (flags
& PAGE_WRITE
) {
2476 if (!(p
->flags
& PAGE_WRITE_ORG
))
2478 /* unprotect the page if it was put read-only because it
2479 contains translated code */
2480 if (!(p
->flags
& PAGE_WRITE
)) {
2481 if (!page_unprotect(addr
, 0, NULL
))
2490 /* called from signal handler: invalidate the code and unprotect the
2491 page. Return TRUE if the fault was successfully handled. */
2492 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2496 target_ulong host_start
, host_end
, addr
;
2498 /* Technically this isn't safe inside a signal handler. However we
2499 know this only ever happens in a synchronous SEGV handler, so in
2500 practice it seems to be ok. */
2503 p
= page_find(address
>> TARGET_PAGE_BITS
);
2509 /* if the page was really writable, then we change its
2510 protection back to writable */
2511 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2512 host_start
= address
& qemu_host_page_mask
;
2513 host_end
= host_start
+ qemu_host_page_size
;
2516 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2517 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2518 p
->flags
|= PAGE_WRITE
;
2521 /* and since the content will be modified, we must invalidate
2522 the corresponding translated code. */
2523 tb_invalidate_phys_page(addr
, pc
, puc
);
2524 #ifdef DEBUG_TB_CHECK
2525 tb_invalidate_check(addr
);
2528 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2538 static inline void tlb_set_dirty(CPUState
*env
,
2539 unsigned long addr
, target_ulong vaddr
)
2542 #endif /* defined(CONFIG_USER_ONLY) */
2544 #if !defined(CONFIG_USER_ONLY)
2546 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2547 typedef struct subpage_t
{
2548 target_phys_addr_t base
;
2549 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2550 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2553 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2554 ram_addr_t memory
, ram_addr_t region_offset
);
2555 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2556 ram_addr_t orig_memory
,
2557 ram_addr_t region_offset
);
2558 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2561 if (addr > start_addr) \
2564 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2565 if (start_addr2 > 0) \
2569 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2570 end_addr2 = TARGET_PAGE_SIZE - 1; \
2572 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2573 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2578 /* register physical memory.
2579 For RAM, 'size' must be a multiple of the target page size.
2580 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2581 io memory page. The address used when calling the IO function is
2582 the offset from the start of the region, plus region_offset. Both
2583 start_addr and region_offset are rounded down to a page boundary
2584 before calculating this offset. This should not be a problem unless
2585 the low bits of start_addr and region_offset differ. */
2586 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2588 ram_addr_t phys_offset
,
2589 ram_addr_t region_offset
)
2591 target_phys_addr_t addr
, end_addr
;
2594 ram_addr_t orig_size
= size
;
2597 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2599 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2600 region_offset
= start_addr
;
2602 region_offset
&= TARGET_PAGE_MASK
;
2603 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2604 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2605 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2606 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2607 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2608 ram_addr_t orig_memory
= p
->phys_offset
;
2609 target_phys_addr_t start_addr2
, end_addr2
;
2610 int need_subpage
= 0;
2612 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2615 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2616 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2617 &p
->phys_offset
, orig_memory
,
2620 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2623 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2625 p
->region_offset
= 0;
2627 p
->phys_offset
= phys_offset
;
2628 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2629 (phys_offset
& IO_MEM_ROMD
))
2630 phys_offset
+= TARGET_PAGE_SIZE
;
2633 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2634 p
->phys_offset
= phys_offset
;
2635 p
->region_offset
= region_offset
;
2636 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2637 (phys_offset
& IO_MEM_ROMD
)) {
2638 phys_offset
+= TARGET_PAGE_SIZE
;
2640 target_phys_addr_t start_addr2
, end_addr2
;
2641 int need_subpage
= 0;
2643 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2644 end_addr2
, need_subpage
);
2647 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2648 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2649 addr
& TARGET_PAGE_MASK
);
2650 subpage_register(subpage
, start_addr2
, end_addr2
,
2651 phys_offset
, region_offset
);
2652 p
->region_offset
= 0;
2656 region_offset
+= TARGET_PAGE_SIZE
;
2659 /* since each CPU stores ram addresses in its TLB cache, we must
2660 reset the modified entries */
2662 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2667 /* XXX: temporary until new memory mapping API */
2668 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2672 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2674 return IO_MEM_UNASSIGNED
;
2675 return p
->phys_offset
;
2678 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2681 kvm_coalesce_mmio_region(addr
, size
);
2684 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2687 kvm_uncoalesce_mmio_region(addr
, size
);
2690 void qemu_flush_coalesced_mmio_buffer(void)
2693 kvm_flush_coalesced_mmio_buffer();
2696 #if defined(__linux__) && !defined(TARGET_S390X)
2698 #include <sys/vfs.h>
2700 #define HUGETLBFS_MAGIC 0x958458f6
2702 static long gethugepagesize(const char *path
)
2708 ret
= statfs(path
, &fs
);
2709 } while (ret
!= 0 && errno
== EINTR
);
2716 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2717 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2722 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2730 unsigned long hpagesize
;
2732 hpagesize
= gethugepagesize(path
);
2737 if (memory
< hpagesize
) {
2741 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2742 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2746 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2750 fd
= mkstemp(filename
);
2752 perror("unable to create backing store for hugepages");
2759 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2762 * ftruncate is not supported by hugetlbfs in older
2763 * hosts, so don't bother bailing out on errors.
2764 * If anything goes wrong with it under other filesystems,
2767 if (ftruncate(fd
, memory
))
2768 perror("ftruncate");
2771 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2772 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2773 * to sidestep this quirk.
2775 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2776 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2778 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2780 if (area
== MAP_FAILED
) {
2781 perror("file_ram_alloc: can't mmap RAM pages");
2789 ram_addr_t
qemu_ram_map(ram_addr_t size
, void *host
)
2791 RAMBlock
*new_block
;
2793 size
= TARGET_PAGE_ALIGN(size
);
2794 new_block
= qemu_malloc(sizeof(*new_block
));
2796 new_block
->host
= host
;
2798 new_block
->offset
= last_ram_offset
;
2799 new_block
->length
= size
;
2801 new_block
->next
= ram_blocks
;
2802 ram_blocks
= new_block
;
2804 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2805 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2806 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2807 0xff, size
>> TARGET_PAGE_BITS
);
2809 last_ram_offset
+= size
;
2812 kvm_setup_guest_memory(new_block
->host
, size
);
2814 return new_block
->offset
;
2817 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2819 RAMBlock
*new_block
;
2821 size
= TARGET_PAGE_ALIGN(size
);
2822 new_block
= qemu_malloc(sizeof(*new_block
));
2825 #if defined (__linux__) && !defined(TARGET_S390X)
2826 new_block
->host
= file_ram_alloc(size
, mem_path
);
2827 if (!new_block
->host
) {
2828 new_block
->host
= qemu_vmalloc(size
);
2829 #ifdef MADV_MERGEABLE
2830 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2834 fprintf(stderr
, "-mem-path option unsupported\n");
2838 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2839 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2840 new_block
->host
= mmap((void*)0x1000000, size
,
2841 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2842 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2844 new_block
->host
= qemu_vmalloc(size
);
2846 #ifdef MADV_MERGEABLE
2847 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2850 new_block
->offset
= last_ram_offset
;
2851 new_block
->length
= size
;
2853 new_block
->next
= ram_blocks
;
2854 ram_blocks
= new_block
;
2856 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2857 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2858 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2859 0xff, size
>> TARGET_PAGE_BITS
);
2861 last_ram_offset
+= size
;
2864 kvm_setup_guest_memory(new_block
->host
, size
);
2866 return new_block
->offset
;
2869 void qemu_ram_free(ram_addr_t addr
)
2871 /* TODO: implement this. */
2874 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2875 With the exception of the softmmu code in this file, this should
2876 only be used for local memory (e.g. video ram) that the device owns,
2877 and knows it isn't going to access beyond the end of the block.
2879 It should not be used for general purpose DMA.
2880 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2882 void *qemu_get_ram_ptr(ram_addr_t addr
)
2889 prevp
= &ram_blocks
;
2891 while (block
&& (block
->offset
> addr
2892 || block
->offset
+ block
->length
<= addr
)) {
2894 prevp
= &prev
->next
;
2896 block
= block
->next
;
2899 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2902 /* Move this entry to to start of the list. */
2904 prev
->next
= block
->next
;
2905 block
->next
= *prevp
;
2908 return block
->host
+ (addr
- block
->offset
);
2911 int do_qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2914 uint8_t *host
= ptr
;
2917 while (block
&& (block
->host
> host
2918 || block
->host
+ block
->length
<= host
)) {
2919 block
= block
->next
;
2923 *ram_addr
= block
->offset
+ (host
- block
->host
);
2927 /* Some of the softmmu routines need to translate from a host pointer
2928 (typically a TLB entry) back to a ram offset. */
2929 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2931 ram_addr_t ram_addr
;
2933 if (do_qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
2934 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2940 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2942 #ifdef DEBUG_UNASSIGNED
2943 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2945 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2946 do_unassigned_access(addr
, 0, 0, 0, 1);
2951 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2953 #ifdef DEBUG_UNASSIGNED
2954 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2956 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2957 do_unassigned_access(addr
, 0, 0, 0, 2);
2962 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2964 #ifdef DEBUG_UNASSIGNED
2965 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2967 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2968 do_unassigned_access(addr
, 0, 0, 0, 4);
2973 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2975 #ifdef DEBUG_UNASSIGNED
2976 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2978 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2979 do_unassigned_access(addr
, 1, 0, 0, 1);
2983 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2985 #ifdef DEBUG_UNASSIGNED
2986 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2988 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2989 do_unassigned_access(addr
, 1, 0, 0, 2);
2993 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2995 #ifdef DEBUG_UNASSIGNED
2996 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2998 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2999 do_unassigned_access(addr
, 1, 0, 0, 4);
3003 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3004 unassigned_mem_readb
,
3005 unassigned_mem_readw
,
3006 unassigned_mem_readl
,
3009 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3010 unassigned_mem_writeb
,
3011 unassigned_mem_writew
,
3012 unassigned_mem_writel
,
3015 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3019 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3020 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3021 #if !defined(CONFIG_USER_ONLY)
3022 tb_invalidate_phys_page_fast(ram_addr
, 1);
3023 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3026 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3027 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3028 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3029 /* we remove the notdirty callback only if the code has been
3031 if (dirty_flags
== 0xff)
3032 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3035 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3039 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3040 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3041 #if !defined(CONFIG_USER_ONLY)
3042 tb_invalidate_phys_page_fast(ram_addr
, 2);
3043 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3046 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3047 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3048 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3049 /* we remove the notdirty callback only if the code has been
3051 if (dirty_flags
== 0xff)
3052 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3055 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3059 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3060 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3061 #if !defined(CONFIG_USER_ONLY)
3062 tb_invalidate_phys_page_fast(ram_addr
, 4);
3063 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3066 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3067 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3068 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3069 /* we remove the notdirty callback only if the code has been
3071 if (dirty_flags
== 0xff)
3072 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3075 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3076 NULL
, /* never used */
3077 NULL
, /* never used */
3078 NULL
, /* never used */
3081 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3082 notdirty_mem_writeb
,
3083 notdirty_mem_writew
,
3084 notdirty_mem_writel
,
3087 /* Generate a debug exception if a watchpoint has been hit. */
3088 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3090 CPUState
*env
= cpu_single_env
;
3091 target_ulong pc
, cs_base
;
3092 TranslationBlock
*tb
;
3097 if (env
->watchpoint_hit
) {
3098 /* We re-entered the check after replacing the TB. Now raise
3099 * the debug interrupt so that is will trigger after the
3100 * current instruction. */
3101 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3104 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3105 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3106 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3107 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3108 wp
->flags
|= BP_WATCHPOINT_HIT
;
3109 if (!env
->watchpoint_hit
) {
3110 env
->watchpoint_hit
= wp
;
3111 tb
= tb_find_pc(env
->mem_io_pc
);
3113 cpu_abort(env
, "check_watchpoint: could not find TB for "
3114 "pc=%p", (void *)env
->mem_io_pc
);
3116 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
3117 tb_phys_invalidate(tb
, -1);
3118 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3119 env
->exception_index
= EXCP_DEBUG
;
3121 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3122 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3124 cpu_resume_from_signal(env
, NULL
);
3127 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3132 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3133 so these check for a hit then pass through to the normal out-of-line
3135 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3137 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3138 return ldub_phys(addr
);
3141 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3143 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3144 return lduw_phys(addr
);
3147 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3149 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3150 return ldl_phys(addr
);
3153 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3156 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3157 stb_phys(addr
, val
);
3160 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3163 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3164 stw_phys(addr
, val
);
3167 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3170 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3171 stl_phys(addr
, val
);
3174 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3180 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3186 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3187 target_phys_addr_t addr
,
3190 unsigned int idx
= SUBPAGE_IDX(addr
);
3191 #if defined(DEBUG_SUBPAGE)
3192 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3193 mmio
, len
, addr
, idx
);
3196 addr
+= mmio
->region_offset
[idx
];
3197 idx
= mmio
->sub_io_index
[idx
];
3198 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3201 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3202 uint32_t value
, unsigned int len
)
3204 unsigned int idx
= SUBPAGE_IDX(addr
);
3205 #if defined(DEBUG_SUBPAGE)
3206 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3207 __func__
, mmio
, len
, addr
, idx
, value
);
3210 addr
+= mmio
->region_offset
[idx
];
3211 idx
= mmio
->sub_io_index
[idx
];
3212 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3215 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3217 return subpage_readlen(opaque
, addr
, 0);
3220 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3223 subpage_writelen(opaque
, addr
, value
, 0);
3226 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3228 return subpage_readlen(opaque
, addr
, 1);
3231 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3234 subpage_writelen(opaque
, addr
, value
, 1);
3237 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3239 return subpage_readlen(opaque
, addr
, 2);
3242 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3245 subpage_writelen(opaque
, addr
, value
, 2);
3248 static CPUReadMemoryFunc
* const subpage_read
[] = {
3254 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3260 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3261 ram_addr_t memory
, ram_addr_t region_offset
)
3265 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3267 idx
= SUBPAGE_IDX(start
);
3268 eidx
= SUBPAGE_IDX(end
);
3269 #if defined(DEBUG_SUBPAGE)
3270 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3271 mmio
, start
, end
, idx
, eidx
, memory
);
3273 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3274 for (; idx
<= eidx
; idx
++) {
3275 mmio
->sub_io_index
[idx
] = memory
;
3276 mmio
->region_offset
[idx
] = region_offset
;
3282 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3283 ram_addr_t orig_memory
,
3284 ram_addr_t region_offset
)
3289 mmio
= qemu_mallocz(sizeof(subpage_t
));
3292 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3293 #if defined(DEBUG_SUBPAGE)
3294 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3295 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3297 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3298 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3303 static int get_free_io_mem_idx(void)
3307 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3308 if (!io_mem_used
[i
]) {
3312 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3316 /* mem_read and mem_write are arrays of functions containing the
3317 function to access byte (index 0), word (index 1) and dword (index
3318 2). Functions can be omitted with a NULL function pointer.
3319 If io_index is non zero, the corresponding io zone is
3320 modified. If it is zero, a new io zone is allocated. The return
3321 value can be used with cpu_register_physical_memory(). (-1) is
3322 returned if error. */
3323 static int cpu_register_io_memory_fixed(int io_index
,
3324 CPUReadMemoryFunc
* const *mem_read
,
3325 CPUWriteMemoryFunc
* const *mem_write
,
3330 if (io_index
<= 0) {
3331 io_index
= get_free_io_mem_idx();
3335 io_index
>>= IO_MEM_SHIFT
;
3336 if (io_index
>= IO_MEM_NB_ENTRIES
)
3340 for (i
= 0; i
< 3; ++i
) {
3341 io_mem_read
[io_index
][i
]
3342 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3344 for (i
= 0; i
< 3; ++i
) {
3345 io_mem_write
[io_index
][i
]
3346 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3348 io_mem_opaque
[io_index
] = opaque
;
3350 return (io_index
<< IO_MEM_SHIFT
);
3353 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3354 CPUWriteMemoryFunc
* const *mem_write
,
3357 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3360 void cpu_unregister_io_memory(int io_table_address
)
3363 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3365 for (i
=0;i
< 3; i
++) {
3366 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3367 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3369 io_mem_opaque
[io_index
] = NULL
;
3370 io_mem_used
[io_index
] = 0;
3373 static void io_mem_init(void)
3377 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3378 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3379 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3383 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3384 watch_mem_write
, NULL
);
3387 #endif /* !defined(CONFIG_USER_ONLY) */
3389 /* physical memory access (slow version, mainly for debug) */
3390 #if defined(CONFIG_USER_ONLY)
3391 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3392 uint8_t *buf
, int len
, int is_write
)
3399 page
= addr
& TARGET_PAGE_MASK
;
3400 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3403 flags
= page_get_flags(page
);
3404 if (!(flags
& PAGE_VALID
))
3407 if (!(flags
& PAGE_WRITE
))
3409 /* XXX: this code should not depend on lock_user */
3410 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3413 unlock_user(p
, addr
, l
);
3415 if (!(flags
& PAGE_READ
))
3417 /* XXX: this code should not depend on lock_user */
3418 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3421 unlock_user(p
, addr
, 0);
3431 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3432 int len
, int is_write
)
3437 target_phys_addr_t page
;
3442 page
= addr
& TARGET_PAGE_MASK
;
3443 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3446 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3448 pd
= IO_MEM_UNASSIGNED
;
3450 pd
= p
->phys_offset
;
3454 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3455 target_phys_addr_t addr1
= addr
;
3456 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3458 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3459 /* XXX: could force cpu_single_env to NULL to avoid
3461 if (l
>= 4 && ((addr1
& 3) == 0)) {
3462 /* 32 bit write access */
3464 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3466 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3467 /* 16 bit write access */
3469 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3472 /* 8 bit write access */
3474 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3478 unsigned long addr1
;
3479 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3481 ptr
= qemu_get_ram_ptr(addr1
);
3482 memcpy(ptr
, buf
, l
);
3483 if (!cpu_physical_memory_is_dirty(addr1
)) {
3484 /* invalidate code */
3485 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3487 cpu_physical_memory_set_dirty_flags(
3488 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3490 /* qemu doesn't execute guest code directly, but kvm does
3491 therefore flush instruction caches */
3493 flush_icache_range((unsigned long)ptr
,
3494 ((unsigned long)ptr
)+l
);
3497 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3498 !(pd
& IO_MEM_ROMD
)) {
3499 target_phys_addr_t addr1
= addr
;
3501 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3503 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3504 if (l
>= 4 && ((addr1
& 3) == 0)) {
3505 /* 32 bit read access */
3506 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3509 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3510 /* 16 bit read access */
3511 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3515 /* 8 bit read access */
3516 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3522 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3523 (addr
& ~TARGET_PAGE_MASK
);
3524 memcpy(buf
, ptr
, l
);
3533 /* used for ROM loading : can write in RAM and ROM */
3534 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3535 const uint8_t *buf
, int len
)
3539 target_phys_addr_t page
;
3544 page
= addr
& TARGET_PAGE_MASK
;
3545 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3548 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3550 pd
= IO_MEM_UNASSIGNED
;
3552 pd
= p
->phys_offset
;
3555 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3556 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3557 !(pd
& IO_MEM_ROMD
)) {
3560 unsigned long addr1
;
3561 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3563 ptr
= qemu_get_ram_ptr(addr1
);
3564 memcpy(ptr
, buf
, l
);
3574 target_phys_addr_t addr
;
3575 target_phys_addr_t len
;
3578 static BounceBuffer bounce
;
3580 typedef struct MapClient
{
3582 void (*callback
)(void *opaque
);
3583 QLIST_ENTRY(MapClient
) link
;
3586 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3587 = QLIST_HEAD_INITIALIZER(map_client_list
);
3589 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3591 MapClient
*client
= qemu_malloc(sizeof(*client
));
3593 client
->opaque
= opaque
;
3594 client
->callback
= callback
;
3595 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3599 void cpu_unregister_map_client(void *_client
)
3601 MapClient
*client
= (MapClient
*)_client
;
3603 QLIST_REMOVE(client
, link
);
3607 static void cpu_notify_map_clients(void)
3611 while (!QLIST_EMPTY(&map_client_list
)) {
3612 client
= QLIST_FIRST(&map_client_list
);
3613 client
->callback(client
->opaque
);
3614 cpu_unregister_map_client(client
);
3618 /* Map a physical memory region into a host virtual address.
3619 * May map a subset of the requested range, given by and returned in *plen.
3620 * May return NULL if resources needed to perform the mapping are exhausted.
3621 * Use only for reads OR writes - not for read-modify-write operations.
3622 * Use cpu_register_map_client() to know when retrying the map operation is
3623 * likely to succeed.
3625 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3626 target_phys_addr_t
*plen
,
3629 target_phys_addr_t len
= *plen
;
3630 target_phys_addr_t done
= 0;
3632 uint8_t *ret
= NULL
;
3634 target_phys_addr_t page
;
3637 unsigned long addr1
;
3640 page
= addr
& TARGET_PAGE_MASK
;
3641 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3644 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3646 pd
= IO_MEM_UNASSIGNED
;
3648 pd
= p
->phys_offset
;
3651 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3652 if (done
|| bounce
.buffer
) {
3655 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3659 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3661 ptr
= bounce
.buffer
;
3663 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3664 ptr
= qemu_get_ram_ptr(addr1
);
3668 } else if (ret
+ done
!= ptr
) {
3680 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3681 * Will also mark the memory as dirty if is_write == 1. access_len gives
3682 * the amount of memory that was actually read or written by the caller.
3684 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3685 int is_write
, target_phys_addr_t access_len
)
3687 unsigned long flush_len
= (unsigned long)access_len
;
3689 if (buffer
!= bounce
.buffer
) {
3691 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3692 while (access_len
) {
3694 l
= TARGET_PAGE_SIZE
;
3697 if (!cpu_physical_memory_is_dirty(addr1
)) {
3698 /* invalidate code */
3699 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3701 cpu_physical_memory_set_dirty_flags(
3702 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3707 dma_flush_range((unsigned long)buffer
,
3708 (unsigned long)buffer
+ flush_len
);
3713 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3715 qemu_vfree(bounce
.buffer
);
3716 bounce
.buffer
= NULL
;
3717 cpu_notify_map_clients();
3720 /* warning: addr must be aligned */
3721 uint32_t ldl_phys(target_phys_addr_t addr
)
3729 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3731 pd
= IO_MEM_UNASSIGNED
;
3733 pd
= p
->phys_offset
;
3736 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3737 !(pd
& IO_MEM_ROMD
)) {
3739 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3741 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3742 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3745 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3746 (addr
& ~TARGET_PAGE_MASK
);
3752 /* warning: addr must be aligned */
3753 uint64_t ldq_phys(target_phys_addr_t addr
)
3761 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3763 pd
= IO_MEM_UNASSIGNED
;
3765 pd
= p
->phys_offset
;
3768 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3769 !(pd
& IO_MEM_ROMD
)) {
3771 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3773 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3774 #ifdef TARGET_WORDS_BIGENDIAN
3775 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3776 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3778 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3779 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3783 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3784 (addr
& ~TARGET_PAGE_MASK
);
3791 uint32_t ldub_phys(target_phys_addr_t addr
)
3794 cpu_physical_memory_read(addr
, &val
, 1);
3798 /* warning: addr must be aligned */
3799 uint32_t lduw_phys(target_phys_addr_t addr
)
3807 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3809 pd
= IO_MEM_UNASSIGNED
;
3811 pd
= p
->phys_offset
;
3814 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3815 !(pd
& IO_MEM_ROMD
)) {
3817 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3819 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3820 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
3823 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3824 (addr
& ~TARGET_PAGE_MASK
);
3830 /* warning: addr must be aligned. The ram page is not masked as dirty
3831 and the code inside is not invalidated. It is useful if the dirty
3832 bits are used to track modified PTEs */
3833 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3840 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3842 pd
= IO_MEM_UNASSIGNED
;
3844 pd
= p
->phys_offset
;
3847 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3848 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3850 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3851 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3853 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3854 ptr
= qemu_get_ram_ptr(addr1
);
3857 if (unlikely(in_migration
)) {
3858 if (!cpu_physical_memory_is_dirty(addr1
)) {
3859 /* invalidate code */
3860 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3862 cpu_physical_memory_set_dirty_flags(
3863 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3869 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3876 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3878 pd
= IO_MEM_UNASSIGNED
;
3880 pd
= p
->phys_offset
;
3883 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3884 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3886 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3887 #ifdef TARGET_WORDS_BIGENDIAN
3888 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3889 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3891 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3892 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3895 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3896 (addr
& ~TARGET_PAGE_MASK
);
3901 /* warning: addr must be aligned */
3902 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3909 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3911 pd
= IO_MEM_UNASSIGNED
;
3913 pd
= p
->phys_offset
;
3916 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3917 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3919 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3920 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3922 unsigned long addr1
;
3923 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3925 ptr
= qemu_get_ram_ptr(addr1
);
3927 if (!cpu_physical_memory_is_dirty(addr1
)) {
3928 /* invalidate code */
3929 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3931 cpu_physical_memory_set_dirty_flags(addr1
,
3932 (0xff & ~CODE_DIRTY_FLAG
));
3938 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3941 cpu_physical_memory_write(addr
, &v
, 1);
3944 /* warning: addr must be aligned */
3945 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3952 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3954 pd
= IO_MEM_UNASSIGNED
;
3956 pd
= p
->phys_offset
;
3959 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3960 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3962 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3963 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
3965 unsigned long addr1
;
3966 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3968 ptr
= qemu_get_ram_ptr(addr1
);
3970 if (!cpu_physical_memory_is_dirty(addr1
)) {
3971 /* invalidate code */
3972 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
3974 cpu_physical_memory_set_dirty_flags(addr1
,
3975 (0xff & ~CODE_DIRTY_FLAG
));
3981 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3984 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3987 /* virtual memory access for debug (includes writing to ROM) */
3988 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3989 uint8_t *buf
, int len
, int is_write
)
3992 target_phys_addr_t phys_addr
;
3996 page
= addr
& TARGET_PAGE_MASK
;
3997 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3998 /* if no physical page mapped, return an error */
3999 if (phys_addr
== -1)
4001 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4004 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4006 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4008 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4017 /* in deterministic execution mode, instructions doing device I/Os
4018 must be at the end of the TB */
4019 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4021 TranslationBlock
*tb
;
4023 target_ulong pc
, cs_base
;
4026 tb
= tb_find_pc((unsigned long)retaddr
);
4028 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4031 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4032 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
4033 /* Calculate how many instructions had been executed before the fault
4035 n
= n
- env
->icount_decr
.u16
.low
;
4036 /* Generate a new TB ending on the I/O insn. */
4038 /* On MIPS and SH, delay slot instructions can only be restarted if
4039 they were already the first instruction in the TB. If this is not
4040 the first instruction in a TB then re-execute the preceding
4042 #if defined(TARGET_MIPS)
4043 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4044 env
->active_tc
.PC
-= 4;
4045 env
->icount_decr
.u16
.low
++;
4046 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4048 #elif defined(TARGET_SH4)
4049 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4052 env
->icount_decr
.u16
.low
++;
4053 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4056 /* This should never happen. */
4057 if (n
> CF_COUNT_MASK
)
4058 cpu_abort(env
, "TB too big during recompile");
4060 cflags
= n
| CF_LAST_IO
;
4062 cs_base
= tb
->cs_base
;
4064 tb_phys_invalidate(tb
, -1);
4065 /* FIXME: In theory this could raise an exception. In practice
4066 we have already translated the block once so it's probably ok. */
4067 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4068 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4069 the first in the TB) then we end up generating a whole new TB and
4070 repeating the fault, which is horribly inefficient.
4071 Better would be to execute just this insn uncached, or generate a
4073 cpu_resume_from_signal(env
, NULL
);
4076 #if !defined(CONFIG_USER_ONLY)
4078 void dump_exec_info(FILE *f
,
4079 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
4081 int i
, target_code_size
, max_target_code_size
;
4082 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4083 TranslationBlock
*tb
;
4085 target_code_size
= 0;
4086 max_target_code_size
= 0;
4088 direct_jmp_count
= 0;
4089 direct_jmp2_count
= 0;
4090 for(i
= 0; i
< nb_tbs
; i
++) {
4092 target_code_size
+= tb
->size
;
4093 if (tb
->size
> max_target_code_size
)
4094 max_target_code_size
= tb
->size
;
4095 if (tb
->page_addr
[1] != -1)
4097 if (tb
->tb_next_offset
[0] != 0xffff) {
4099 if (tb
->tb_next_offset
[1] != 0xffff) {
4100 direct_jmp2_count
++;
4104 /* XXX: avoid using doubles ? */
4105 cpu_fprintf(f
, "Translation buffer state:\n");
4106 cpu_fprintf(f
, "gen code size %ld/%ld\n",
4107 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4108 cpu_fprintf(f
, "TB count %d/%d\n",
4109 nb_tbs
, code_gen_max_blocks
);
4110 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4111 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4112 max_target_code_size
);
4113 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4114 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4115 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4116 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4118 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4119 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4121 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4123 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4124 cpu_fprintf(f
, "\nStatistics:\n");
4125 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4126 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4127 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4128 #ifdef CONFIG_PROFILER
4129 tcg_dump_info(f
, cpu_fprintf
);
4133 #define MMUSUFFIX _cmmu
4134 #define GETPC() NULL
4135 #define env cpu_single_env
4136 #define SOFTMMU_CODE_ACCESS
4139 #include "softmmu_template.h"
4142 #include "softmmu_template.h"
4145 #include "softmmu_template.h"
4148 #include "softmmu_template.h"