2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
37 #include "cache-utils.h"
39 #if !defined(TARGET_IA64)
47 #include "qemu-timer.h"
48 #if defined(CONFIG_USER_ONLY)
51 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
52 #include <sys/param.h>
53 #if __FreeBSD_version >= 700104
54 #define HAVE_KINFO_GETVMMAP
55 #define sigqueue sigqueue_freebsd /* avoid redefinition */
58 #include <machine/profile.h>
68 //#define DEBUG_TB_INVALIDATE
71 //#define DEBUG_UNASSIGNED
73 /* make various TB consistency checks */
74 //#define DEBUG_TB_CHECK
75 //#define DEBUG_TLB_CHECK
77 //#define DEBUG_IOPORT
78 //#define DEBUG_SUBPAGE
80 #if !defined(CONFIG_USER_ONLY)
81 /* TB consistency checks only implemented for usermode emulation. */
85 #define SMC_BITMAP_USE_THRESHOLD 10
87 static TranslationBlock
*tbs
;
88 int code_gen_max_blocks
;
89 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #elif defined(_WIN32)
102 /* Maximum alignment for Win32 is 16. */
103 #define code_gen_section \
104 __attribute__((aligned (16)))
106 #define code_gen_section \
107 __attribute__((aligned (32)))
110 uint8_t code_gen_prologue
[1024] code_gen_section
;
111 static uint8_t *code_gen_buffer
;
112 static unsigned long code_gen_buffer_size
;
113 /* threshold to flush the translated code buffer */
114 static unsigned long code_gen_buffer_max_size
;
115 uint8_t *code_gen_ptr
;
117 #if !defined(CONFIG_USER_ONLY)
119 static int in_migration
;
121 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
) };
125 /* current CPU in the current thread. It is only valid inside
127 CPUState
*cpu_single_env
;
128 /* 0 = Do not count executed instructions.
129 1 = Precise instruction counting.
130 2 = Adaptive rate instruction counting. */
132 /* Current instruction counter. While executing translated code this may
133 include some instructions that have not yet been executed. */
136 typedef struct PageDesc
{
137 /* list of TBs intersecting this ram page */
138 TranslationBlock
*first_tb
;
139 /* in order to optimize self modifying code, we count the number
140 of lookups we do to a given page to use a bitmap */
141 unsigned int code_write_count
;
142 uint8_t *code_bitmap
;
143 #if defined(CONFIG_USER_ONLY)
148 /* In system mode we want L1_MAP to be based on ram offsets,
149 while in user mode we want it to be based on virtual addresses. */
150 #if !defined(CONFIG_USER_ONLY)
151 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
152 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
154 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
157 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
160 /* Size of the L2 (and L3, etc) page tables. */
162 #define L2_SIZE (1 << L2_BITS)
164 /* The bits remaining after N lower levels of page tables. */
165 #define P_L1_BITS_REM \
166 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
167 #define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
170 /* Size of the L1 page table. Avoid silly small sizes. */
171 #if P_L1_BITS_REM < 4
172 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
174 #define P_L1_BITS P_L1_BITS_REM
177 #if V_L1_BITS_REM < 4
178 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
180 #define V_L1_BITS V_L1_BITS_REM
183 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
184 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
186 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
187 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
189 unsigned long qemu_real_host_page_size
;
190 unsigned long qemu_host_page_bits
;
191 unsigned long qemu_host_page_size
;
192 unsigned long qemu_host_page_mask
;
194 /* This is a multi-level map on the virtual address space.
195 The bottom level has pointers to PageDesc. */
196 static void *l1_map
[V_L1_SIZE
];
198 #if !defined(CONFIG_USER_ONLY)
199 typedef struct PhysPageDesc
{
200 /* offset in host memory of the page + io_index in the low bits */
201 ram_addr_t phys_offset
;
202 ram_addr_t region_offset
;
205 /* This is a multi-level map on the physical address space.
206 The bottom level has pointers to PhysPageDesc. */
207 static void *l1_phys_map
[P_L1_SIZE
];
209 static void io_mem_init(void);
211 /* io memory support */
212 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
213 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
214 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
215 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
216 static int io_mem_watch
;
221 static const char *logfilename
= "qemu.log";
223 static const char *logfilename
= "/tmp/qemu.log";
227 static int log_append
= 0;
230 #if !defined(CONFIG_USER_ONLY)
231 static int tlb_flush_count
;
233 static int tb_flush_count
;
234 static int tb_phys_invalidate_count
;
237 static void map_exec(void *addr
, long size
)
240 VirtualProtect(addr
, size
,
241 PAGE_EXECUTE_READWRITE
, &old_protect
);
245 static void map_exec(void *addr
, long size
)
247 unsigned long start
, end
, page_size
;
249 page_size
= getpagesize();
250 start
= (unsigned long)addr
;
251 start
&= ~(page_size
- 1);
253 end
= (unsigned long)addr
+ size
;
254 end
+= page_size
- 1;
255 end
&= ~(page_size
- 1);
257 mprotect((void *)start
, end
- start
,
258 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
262 static void page_init(void)
264 /* NOTE: we can always suppose that qemu_host_page_size >=
268 SYSTEM_INFO system_info
;
270 GetSystemInfo(&system_info
);
271 qemu_real_host_page_size
= system_info
.dwPageSize
;
274 qemu_real_host_page_size
= getpagesize();
276 if (qemu_host_page_size
== 0)
277 qemu_host_page_size
= qemu_real_host_page_size
;
278 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
279 qemu_host_page_size
= TARGET_PAGE_SIZE
;
280 qemu_host_page_bits
= 0;
281 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
282 qemu_host_page_bits
++;
283 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
285 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
287 #ifdef HAVE_KINFO_GETVMMAP
288 struct kinfo_vmentry
*freep
;
291 freep
= kinfo_getvmmap(getpid(), &cnt
);
294 for (i
= 0; i
< cnt
; i
++) {
295 unsigned long startaddr
, endaddr
;
297 startaddr
= freep
[i
].kve_start
;
298 endaddr
= freep
[i
].kve_end
;
299 if (h2g_valid(startaddr
)) {
300 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
302 if (h2g_valid(endaddr
)) {
303 endaddr
= h2g(endaddr
);
304 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
306 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
308 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
319 last_brk
= (unsigned long)sbrk(0);
321 f
= fopen("/compat/linux/proc/self/maps", "r");
326 unsigned long startaddr
, endaddr
;
329 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
331 if (n
== 2 && h2g_valid(startaddr
)) {
332 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
334 if (h2g_valid(endaddr
)) {
335 endaddr
= h2g(endaddr
);
339 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
351 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
357 #if defined(CONFIG_USER_ONLY)
358 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
359 # define ALLOC(P, SIZE) \
361 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
362 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
365 # define ALLOC(P, SIZE) \
366 do { P = qemu_mallocz(SIZE); } while (0)
369 /* Level 1. Always allocated. */
370 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
373 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
380 ALLOC(p
, sizeof(void *) * L2_SIZE
);
384 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
392 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
398 return pd
+ (index
& (L2_SIZE
- 1));
401 static inline PageDesc
*page_find(tb_page_addr_t index
)
403 return page_find_alloc(index
, 0);
406 #if !defined(CONFIG_USER_ONLY)
407 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
413 /* Level 1. Always allocated. */
414 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
417 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
423 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
425 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
436 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
438 for (i
= 0; i
< L2_SIZE
; i
++) {
439 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
440 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
444 return pd
+ (index
& (L2_SIZE
- 1));
447 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
449 return phys_page_find_alloc(index
, 0);
452 static void tlb_protect_code(ram_addr_t ram_addr
);
453 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
455 #define mmap_lock() do { } while(0)
456 #define mmap_unlock() do { } while(0)
459 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
461 #if defined(CONFIG_USER_ONLY)
462 /* Currently it is not recommended to allocate big chunks of data in
463 user mode. It will change when a dedicated libc will be used */
464 #define USE_STATIC_CODE_GEN_BUFFER
467 #ifdef USE_STATIC_CODE_GEN_BUFFER
468 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
469 __attribute__((aligned (CODE_GEN_ALIGN
)));
472 static void code_gen_alloc(unsigned long tb_size
)
477 #ifdef USE_STATIC_CODE_GEN_BUFFER
478 code_gen_buffer
= static_code_gen_buffer
;
479 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
480 map_exec(code_gen_buffer
, code_gen_buffer_size
);
482 code_gen_buffer_size
= tb_size
;
483 if (code_gen_buffer_size
== 0) {
484 #if defined(CONFIG_USER_ONLY)
485 /* in user mode, phys_ram_size is not meaningful */
486 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
488 /* XXX: needs adjustments */
489 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
492 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
493 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
494 /* The code gen buffer location may have constraints depending on
495 the host cpu and OS */
496 #if defined(__linux__)
501 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
502 #if defined(__x86_64__)
504 /* Cannot map more than that */
505 if (code_gen_buffer_size
> (800 * 1024 * 1024))
506 code_gen_buffer_size
= (800 * 1024 * 1024);
507 #elif defined(__sparc_v9__)
508 // Map the buffer below 2G, so we can use direct calls and branches
510 start
= (void *) 0x60000000UL
;
511 if (code_gen_buffer_size
> (512 * 1024 * 1024))
512 code_gen_buffer_size
= (512 * 1024 * 1024);
513 #elif defined(__arm__)
514 /* Map the buffer below 32M, so we can use direct calls and branches */
516 start
= (void *) 0x01000000UL
;
517 if (code_gen_buffer_size
> 16 * 1024 * 1024)
518 code_gen_buffer_size
= 16 * 1024 * 1024;
519 #elif defined(__s390x__)
520 /* Map the buffer so that we can use direct calls and branches. */
521 /* We have a +- 4GB range on the branches; leave some slop. */
522 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
523 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
525 start
= (void *)0x90000000UL
;
527 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
528 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
530 if (code_gen_buffer
== MAP_FAILED
) {
531 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
535 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
539 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
540 #if defined(__x86_64__)
541 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542 * 0x40000000 is free */
544 addr
= (void *)0x40000000;
545 /* Cannot map more than that */
546 if (code_gen_buffer_size
> (800 * 1024 * 1024))
547 code_gen_buffer_size
= (800 * 1024 * 1024);
549 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
550 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
552 if (code_gen_buffer
== MAP_FAILED
) {
553 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
558 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
559 map_exec(code_gen_buffer
, code_gen_buffer_size
);
561 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
562 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
563 code_gen_buffer_max_size
= code_gen_buffer_size
-
564 (TCG_MAX_OP_SIZE
* OPC_MAX_SIZE
);
565 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
566 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
569 /* Must be called before using the QEMU cpus. 'tb_size' is the size
570 (in bytes) allocated to the translation buffer. Zero means default
572 void cpu_exec_init_all(unsigned long tb_size
)
575 code_gen_alloc(tb_size
);
576 code_gen_ptr
= code_gen_buffer
;
578 #if !defined(CONFIG_USER_ONLY)
581 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
582 /* There's no guest base to take into account, so go ahead and
583 initialize the prologue now. */
584 tcg_prologue_init(&tcg_ctx
);
588 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
590 static int cpu_common_post_load(void *opaque
, int version_id
)
592 CPUState
*env
= opaque
;
594 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
595 version_id is increased. */
596 env
->interrupt_request
&= ~0x01;
602 static const VMStateDescription vmstate_cpu_common
= {
603 .name
= "cpu_common",
605 .minimum_version_id
= 1,
606 .minimum_version_id_old
= 1,
607 .post_load
= cpu_common_post_load
,
608 .fields
= (VMStateField
[]) {
609 VMSTATE_UINT32(halted
, CPUState
),
610 VMSTATE_UINT32(interrupt_request
, CPUState
),
611 VMSTATE_END_OF_LIST()
616 CPUState
*qemu_get_cpu(int cpu
)
618 CPUState
*env
= first_cpu
;
621 if (env
->cpu_index
== cpu
)
629 void cpu_exec_init(CPUState
*env
)
634 #if defined(CONFIG_USER_ONLY)
637 env
->next_cpu
= NULL
;
640 while (*penv
!= NULL
) {
641 penv
= &(*penv
)->next_cpu
;
644 env
->cpu_index
= cpu_index
;
646 QTAILQ_INIT(&env
->breakpoints
);
647 QTAILQ_INIT(&env
->watchpoints
);
649 env
->thread_id
= GetCurrentProcessId();
651 env
->thread_id
= getpid();
654 #if defined(CONFIG_USER_ONLY)
657 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
658 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
659 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
660 cpu_save
, cpu_load
, env
);
664 static inline void invalidate_page_bitmap(PageDesc
*p
)
666 if (p
->code_bitmap
) {
667 qemu_free(p
->code_bitmap
);
668 p
->code_bitmap
= NULL
;
670 p
->code_write_count
= 0;
673 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
675 static void page_flush_tb_1 (int level
, void **lp
)
684 for (i
= 0; i
< L2_SIZE
; ++i
) {
685 pd
[i
].first_tb
= NULL
;
686 invalidate_page_bitmap(pd
+ i
);
690 for (i
= 0; i
< L2_SIZE
; ++i
) {
691 page_flush_tb_1 (level
- 1, pp
+ i
);
696 static void page_flush_tb(void)
699 for (i
= 0; i
< V_L1_SIZE
; i
++) {
700 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
704 /* flush all the translation blocks */
705 /* XXX: tb_flush is currently not thread safe */
706 void tb_flush(CPUState
*env1
)
709 #if defined(DEBUG_FLUSH)
710 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
711 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
713 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
715 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
716 cpu_abort(env1
, "Internal error: code buffer overflow\n");
720 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
721 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
724 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
727 code_gen_ptr
= code_gen_buffer
;
728 /* XXX: flush processor icache at this point if cache flush is
733 #ifdef DEBUG_TB_CHECK
735 static void tb_invalidate_check(target_ulong address
)
737 TranslationBlock
*tb
;
739 address
&= TARGET_PAGE_MASK
;
740 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
741 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
742 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
743 address
>= tb
->pc
+ tb
->size
)) {
744 printf("ERROR invalidate: address=" TARGET_FMT_lx
745 " PC=%08lx size=%04x\n",
746 address
, (long)tb
->pc
, tb
->size
);
752 /* verify that all the pages have correct rights for code */
753 static void tb_page_check(void)
755 TranslationBlock
*tb
;
756 int i
, flags1
, flags2
;
758 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
759 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
760 flags1
= page_get_flags(tb
->pc
);
761 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
762 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
763 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
764 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
772 /* invalidate one TB */
773 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
776 TranslationBlock
*tb1
;
780 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
783 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
787 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
789 TranslationBlock
*tb1
;
795 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
797 *ptb
= tb1
->page_next
[n1
];
800 ptb
= &tb1
->page_next
[n1
];
804 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
806 TranslationBlock
*tb1
, **ptb
;
809 ptb
= &tb
->jmp_next
[n
];
812 /* find tb(n) in circular list */
816 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
817 if (n1
== n
&& tb1
== tb
)
820 ptb
= &tb1
->jmp_first
;
822 ptb
= &tb1
->jmp_next
[n1
];
825 /* now we can suppress tb(n) from the list */
826 *ptb
= tb
->jmp_next
[n
];
828 tb
->jmp_next
[n
] = NULL
;
832 /* reset the jump entry 'n' of a TB so that it is not chained to
834 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
836 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
839 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
844 tb_page_addr_t phys_pc
;
845 TranslationBlock
*tb1
, *tb2
;
847 /* remove the TB from the hash list */
848 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
849 h
= tb_phys_hash_func(phys_pc
);
850 tb_remove(&tb_phys_hash
[h
], tb
,
851 offsetof(TranslationBlock
, phys_hash_next
));
853 /* remove the TB from the page list */
854 if (tb
->page_addr
[0] != page_addr
) {
855 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
856 tb_page_remove(&p
->first_tb
, tb
);
857 invalidate_page_bitmap(p
);
859 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
860 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
861 tb_page_remove(&p
->first_tb
, tb
);
862 invalidate_page_bitmap(p
);
865 tb_invalidated_flag
= 1;
867 /* remove the TB from the hash list */
868 h
= tb_jmp_cache_hash_func(tb
->pc
);
869 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
870 if (env
->tb_jmp_cache
[h
] == tb
)
871 env
->tb_jmp_cache
[h
] = NULL
;
874 /* suppress this TB from the two jump lists */
875 tb_jmp_remove(tb
, 0);
876 tb_jmp_remove(tb
, 1);
878 /* suppress any remaining jumps to this TB */
884 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
885 tb2
= tb1
->jmp_next
[n1
];
886 tb_reset_jump(tb1
, n1
);
887 tb1
->jmp_next
[n1
] = NULL
;
890 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
892 tb_phys_invalidate_count
++;
895 static inline void set_bits(uint8_t *tab
, int start
, int len
)
901 mask
= 0xff << (start
& 7);
902 if ((start
& ~7) == (end
& ~7)) {
904 mask
&= ~(0xff << (end
& 7));
909 start
= (start
+ 8) & ~7;
911 while (start
< end1
) {
916 mask
= ~(0xff << (end
& 7));
922 static void build_page_bitmap(PageDesc
*p
)
924 int n
, tb_start
, tb_end
;
925 TranslationBlock
*tb
;
927 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
932 tb
= (TranslationBlock
*)((long)tb
& ~3);
933 /* NOTE: this is subtle as a TB may span two physical pages */
935 /* NOTE: tb_end may be after the end of the page, but
936 it is not a problem */
937 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
938 tb_end
= tb_start
+ tb
->size
;
939 if (tb_end
> TARGET_PAGE_SIZE
)
940 tb_end
= TARGET_PAGE_SIZE
;
943 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
945 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
946 tb
= tb
->page_next
[n
];
950 TranslationBlock
*tb_gen_code(CPUState
*env
,
951 target_ulong pc
, target_ulong cs_base
,
952 int flags
, int cflags
)
954 TranslationBlock
*tb
;
956 tb_page_addr_t phys_pc
, phys_page2
;
957 target_ulong virt_page2
;
960 phys_pc
= get_page_addr_code(env
, pc
);
963 /* flush must be done */
965 /* cannot fail at this point */
967 /* Don't forget to invalidate previous TB info. */
968 tb_invalidated_flag
= 1;
970 tc_ptr
= code_gen_ptr
;
972 tb
->cs_base
= cs_base
;
975 cpu_gen_code(env
, tb
, &code_gen_size
);
976 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
978 /* check next page if needed */
979 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
981 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
982 phys_page2
= get_page_addr_code(env
, virt_page2
);
984 tb_link_page(tb
, phys_pc
, phys_page2
);
988 /* invalidate all TBs which intersect with the target physical page
989 starting in range [start;end[. NOTE: start and end must refer to
990 the same physical page. 'is_cpu_write_access' should be true if called
991 from a real cpu write access: the virtual CPU will exit the current
992 TB if code is modified inside this TB. */
993 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
994 int is_cpu_write_access
)
996 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
997 CPUState
*env
= cpu_single_env
;
998 tb_page_addr_t tb_start
, tb_end
;
1001 #ifdef TARGET_HAS_PRECISE_SMC
1002 int current_tb_not_found
= is_cpu_write_access
;
1003 TranslationBlock
*current_tb
= NULL
;
1004 int current_tb_modified
= 0;
1005 target_ulong current_pc
= 0;
1006 target_ulong current_cs_base
= 0;
1007 int current_flags
= 0;
1008 #endif /* TARGET_HAS_PRECISE_SMC */
1010 p
= page_find(start
>> TARGET_PAGE_BITS
);
1013 if (!p
->code_bitmap
&&
1014 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1015 is_cpu_write_access
) {
1016 /* build code bitmap */
1017 build_page_bitmap(p
);
1020 /* we remove all the TBs in the range [start, end[ */
1021 /* XXX: see if in some cases it could be faster to invalidate all the code */
1023 while (tb
!= NULL
) {
1025 tb
= (TranslationBlock
*)((long)tb
& ~3);
1026 tb_next
= tb
->page_next
[n
];
1027 /* NOTE: this is subtle as a TB may span two physical pages */
1029 /* NOTE: tb_end may be after the end of the page, but
1030 it is not a problem */
1031 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1032 tb_end
= tb_start
+ tb
->size
;
1034 tb_start
= tb
->page_addr
[1];
1035 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1037 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1038 #ifdef TARGET_HAS_PRECISE_SMC
1039 if (current_tb_not_found
) {
1040 current_tb_not_found
= 0;
1042 if (env
->mem_io_pc
) {
1043 /* now we have a real cpu fault */
1044 current_tb
= tb_find_pc(env
->mem_io_pc
);
1047 if (current_tb
== tb
&&
1048 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1049 /* If we are modifying the current TB, we must stop
1050 its execution. We could be more precise by checking
1051 that the modification is after the current PC, but it
1052 would require a specialized function to partially
1053 restore the CPU state */
1055 current_tb_modified
= 1;
1056 cpu_restore_state(current_tb
, env
,
1057 env
->mem_io_pc
, NULL
);
1058 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1061 #endif /* TARGET_HAS_PRECISE_SMC */
1062 /* we need to do that to handle the case where a signal
1063 occurs while doing tb_phys_invalidate() */
1066 saved_tb
= env
->current_tb
;
1067 env
->current_tb
= NULL
;
1069 tb_phys_invalidate(tb
, -1);
1071 env
->current_tb
= saved_tb
;
1072 if (env
->interrupt_request
&& env
->current_tb
)
1073 cpu_interrupt(env
, env
->interrupt_request
);
1078 #if !defined(CONFIG_USER_ONLY)
1079 /* if no code remaining, no need to continue to use slow writes */
1081 invalidate_page_bitmap(p
);
1082 if (is_cpu_write_access
) {
1083 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1087 #ifdef TARGET_HAS_PRECISE_SMC
1088 if (current_tb_modified
) {
1089 /* we generate a block containing just the instruction
1090 modifying the memory. It will ensure that it cannot modify
1092 env
->current_tb
= NULL
;
1093 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1094 cpu_resume_from_signal(env
, NULL
);
1099 /* len must be <= 8 and start must be a multiple of len */
1100 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1106 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1107 cpu_single_env
->mem_io_vaddr
, len
,
1108 cpu_single_env
->eip
,
1109 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1112 p
= page_find(start
>> TARGET_PAGE_BITS
);
1115 if (p
->code_bitmap
) {
1116 offset
= start
& ~TARGET_PAGE_MASK
;
1117 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1118 if (b
& ((1 << len
) - 1))
1122 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1126 #if !defined(CONFIG_SOFTMMU)
1127 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1128 unsigned long pc
, void *puc
)
1130 TranslationBlock
*tb
;
1133 #ifdef TARGET_HAS_PRECISE_SMC
1134 TranslationBlock
*current_tb
= NULL
;
1135 CPUState
*env
= cpu_single_env
;
1136 int current_tb_modified
= 0;
1137 target_ulong current_pc
= 0;
1138 target_ulong current_cs_base
= 0;
1139 int current_flags
= 0;
1142 addr
&= TARGET_PAGE_MASK
;
1143 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1147 #ifdef TARGET_HAS_PRECISE_SMC
1148 if (tb
&& pc
!= 0) {
1149 current_tb
= tb_find_pc(pc
);
1152 while (tb
!= NULL
) {
1154 tb
= (TranslationBlock
*)((long)tb
& ~3);
1155 #ifdef TARGET_HAS_PRECISE_SMC
1156 if (current_tb
== tb
&&
1157 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1158 /* If we are modifying the current TB, we must stop
1159 its execution. We could be more precise by checking
1160 that the modification is after the current PC, but it
1161 would require a specialized function to partially
1162 restore the CPU state */
1164 current_tb_modified
= 1;
1165 cpu_restore_state(current_tb
, env
, pc
, puc
);
1166 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1169 #endif /* TARGET_HAS_PRECISE_SMC */
1170 tb_phys_invalidate(tb
, addr
);
1171 tb
= tb
->page_next
[n
];
1174 #ifdef TARGET_HAS_PRECISE_SMC
1175 if (current_tb_modified
) {
1176 /* we generate a block containing just the instruction
1177 modifying the memory. It will ensure that it cannot modify
1179 env
->current_tb
= NULL
;
1180 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1181 cpu_resume_from_signal(env
, puc
);
1187 /* add the tb in the target page and protect it if necessary */
1188 static inline void tb_alloc_page(TranslationBlock
*tb
,
1189 unsigned int n
, tb_page_addr_t page_addr
)
1192 TranslationBlock
*last_first_tb
;
1194 tb
->page_addr
[n
] = page_addr
;
1195 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1196 tb
->page_next
[n
] = p
->first_tb
;
1197 last_first_tb
= p
->first_tb
;
1198 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1199 invalidate_page_bitmap(p
);
1201 #if defined(TARGET_HAS_SMC) || 1
1203 #if defined(CONFIG_USER_ONLY)
1204 if (p
->flags
& PAGE_WRITE
) {
1209 /* force the host page as non writable (writes will have a
1210 page fault + mprotect overhead) */
1211 page_addr
&= qemu_host_page_mask
;
1213 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1214 addr
+= TARGET_PAGE_SIZE
) {
1216 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1220 p2
->flags
&= ~PAGE_WRITE
;
1222 mprotect(g2h(page_addr
), qemu_host_page_size
,
1223 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1224 #ifdef DEBUG_TB_INVALIDATE
1225 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1230 /* if some code is already present, then the pages are already
1231 protected. So we handle the case where only the first TB is
1232 allocated in a physical page */
1233 if (!last_first_tb
) {
1234 tlb_protect_code(page_addr
);
1238 #endif /* TARGET_HAS_SMC */
1241 /* Allocate a new translation block. Flush the translation buffer if
1242 too many translation blocks or too much generated code. */
1243 TranslationBlock
*tb_alloc(target_ulong pc
)
1245 TranslationBlock
*tb
;
1247 if (nb_tbs
>= code_gen_max_blocks
||
1248 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1250 tb
= &tbs
[nb_tbs
++];
1256 void tb_free(TranslationBlock
*tb
)
1258 /* In practice this is mostly used for single use temporary TB
1259 Ignore the hard cases and just back up if this TB happens to
1260 be the last one generated. */
1261 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1262 code_gen_ptr
= tb
->tc_ptr
;
1267 /* add a new TB and link it to the physical page tables. phys_page2 is
1268 (-1) to indicate that only one page contains the TB. */
1269 void tb_link_page(TranslationBlock
*tb
,
1270 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1273 TranslationBlock
**ptb
;
1275 /* Grab the mmap lock to stop another thread invalidating this TB
1276 before we are done. */
1278 /* add in the physical hash table */
1279 h
= tb_phys_hash_func(phys_pc
);
1280 ptb
= &tb_phys_hash
[h
];
1281 tb
->phys_hash_next
= *ptb
;
1284 /* add in the page list */
1285 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1286 if (phys_page2
!= -1)
1287 tb_alloc_page(tb
, 1, phys_page2
);
1289 tb
->page_addr
[1] = -1;
1291 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1292 tb
->jmp_next
[0] = NULL
;
1293 tb
->jmp_next
[1] = NULL
;
1295 /* init original jump addresses */
1296 if (tb
->tb_next_offset
[0] != 0xffff)
1297 tb_reset_jump(tb
, 0);
1298 if (tb
->tb_next_offset
[1] != 0xffff)
1299 tb_reset_jump(tb
, 1);
1301 #ifdef DEBUG_TB_CHECK
1307 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1308 tb[1].tc_ptr. Return NULL if not found */
1309 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1311 int m_min
, m_max
, m
;
1313 TranslationBlock
*tb
;
1317 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1318 tc_ptr
>= (unsigned long)code_gen_ptr
)
1320 /* binary search (cf Knuth) */
1323 while (m_min
<= m_max
) {
1324 m
= (m_min
+ m_max
) >> 1;
1326 v
= (unsigned long)tb
->tc_ptr
;
1329 else if (tc_ptr
< v
) {
1338 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1340 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1342 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1345 tb1
= tb
->jmp_next
[n
];
1347 /* find head of list */
1350 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1353 tb1
= tb1
->jmp_next
[n1
];
1355 /* we are now sure now that tb jumps to tb1 */
1358 /* remove tb from the jmp_first list */
1359 ptb
= &tb_next
->jmp_first
;
1363 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1364 if (n1
== n
&& tb1
== tb
)
1366 ptb
= &tb1
->jmp_next
[n1
];
1368 *ptb
= tb
->jmp_next
[n
];
1369 tb
->jmp_next
[n
] = NULL
;
1371 /* suppress the jump to next tb in generated code */
1372 tb_reset_jump(tb
, n
);
1374 /* suppress jumps in the tb on which we could have jumped */
1375 tb_reset_jump_recursive(tb_next
);
1379 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1381 tb_reset_jump_recursive2(tb
, 0);
1382 tb_reset_jump_recursive2(tb
, 1);
1385 #if defined(TARGET_HAS_ICE)
1386 #if defined(CONFIG_USER_ONLY)
1387 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1389 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1392 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1394 target_phys_addr_t addr
;
1396 ram_addr_t ram_addr
;
1399 addr
= cpu_get_phys_page_debug(env
, pc
);
1400 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1402 pd
= IO_MEM_UNASSIGNED
;
1404 pd
= p
->phys_offset
;
1406 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1407 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1410 #endif /* TARGET_HAS_ICE */
1412 #if defined(CONFIG_USER_ONLY)
1413 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1418 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1419 int flags
, CPUWatchpoint
**watchpoint
)
1424 /* Add a watchpoint. */
1425 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1426 int flags
, CPUWatchpoint
**watchpoint
)
1428 target_ulong len_mask
= ~(len
- 1);
1431 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1432 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1433 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1434 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1437 wp
= qemu_malloc(sizeof(*wp
));
1440 wp
->len_mask
= len_mask
;
1443 /* keep all GDB-injected watchpoints in front */
1445 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1447 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1449 tlb_flush_page(env
, addr
);
1456 /* Remove a specific watchpoint. */
1457 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1460 target_ulong len_mask
= ~(len
- 1);
1463 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1464 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1465 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1466 cpu_watchpoint_remove_by_ref(env
, wp
);
1473 /* Remove a specific watchpoint by reference. */
1474 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1476 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1478 tlb_flush_page(env
, watchpoint
->vaddr
);
1480 qemu_free(watchpoint
);
1483 /* Remove all matching watchpoints. */
1484 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1486 CPUWatchpoint
*wp
, *next
;
1488 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1489 if (wp
->flags
& mask
)
1490 cpu_watchpoint_remove_by_ref(env
, wp
);
1495 /* Add a breakpoint. */
1496 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1497 CPUBreakpoint
**breakpoint
)
1499 #if defined(TARGET_HAS_ICE)
1502 bp
= qemu_malloc(sizeof(*bp
));
1507 /* keep all GDB-injected breakpoints in front */
1509 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1511 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1513 breakpoint_invalidate(env
, pc
);
1523 /* Remove a specific breakpoint. */
1524 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1526 #if defined(TARGET_HAS_ICE)
1529 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1530 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1531 cpu_breakpoint_remove_by_ref(env
, bp
);
1541 /* Remove a specific breakpoint by reference. */
1542 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1544 #if defined(TARGET_HAS_ICE)
1545 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1547 breakpoint_invalidate(env
, breakpoint
->pc
);
1549 qemu_free(breakpoint
);
1553 /* Remove all matching breakpoints. */
1554 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1556 #if defined(TARGET_HAS_ICE)
1557 CPUBreakpoint
*bp
, *next
;
1559 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1560 if (bp
->flags
& mask
)
1561 cpu_breakpoint_remove_by_ref(env
, bp
);
1566 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1567 CPU loop after each instruction */
1568 void cpu_single_step(CPUState
*env
, int enabled
)
1570 #if defined(TARGET_HAS_ICE)
1571 if (env
->singlestep_enabled
!= enabled
) {
1572 env
->singlestep_enabled
= enabled
;
1574 kvm_update_guest_debug(env
, 0);
1576 /* must flush all the translated code to avoid inconsistencies */
1577 /* XXX: only flush what is necessary */
1584 /* enable or disable low levels log */
1585 void cpu_set_log(int log_flags
)
1587 loglevel
= log_flags
;
1588 if (loglevel
&& !logfile
) {
1589 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1591 perror(logfilename
);
1594 #if !defined(CONFIG_SOFTMMU)
1595 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1597 static char logfile_buf
[4096];
1598 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1600 #elif !defined(_WIN32)
1601 /* Win32 doesn't support line-buffering and requires size >= 2 */
1602 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1606 if (!loglevel
&& logfile
) {
1612 void cpu_set_log_filename(const char *filename
)
1614 logfilename
= strdup(filename
);
1619 cpu_set_log(loglevel
);
1622 static void cpu_unlink_tb(CPUState
*env
)
1624 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1625 problem and hope the cpu will stop of its own accord. For userspace
1626 emulation this often isn't actually as bad as it sounds. Often
1627 signals are used primarily to interrupt blocking syscalls. */
1628 TranslationBlock
*tb
;
1629 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1631 spin_lock(&interrupt_lock
);
1632 tb
= env
->current_tb
;
1633 /* if the cpu is currently executing code, we must unlink it and
1634 all the potentially executing TB */
1636 env
->current_tb
= NULL
;
1637 tb_reset_jump_recursive(tb
);
1639 spin_unlock(&interrupt_lock
);
1642 /* mask must never be zero, except for A20 change call */
1643 void cpu_interrupt(CPUState
*env
, int mask
)
1647 old_mask
= env
->interrupt_request
;
1648 env
->interrupt_request
|= mask
;
1649 if (kvm_enabled() && !kvm_irqchip_in_kernel())
1650 kvm_update_interrupt_request(env
);
1652 #ifndef CONFIG_USER_ONLY
1654 * If called from iothread context, wake the target cpu in
1657 if (!qemu_cpu_self(env
)) {
1664 env
->icount_decr
.u16
.high
= 0xffff;
1665 #ifndef CONFIG_USER_ONLY
1667 && (mask
& ~old_mask
) != 0) {
1668 cpu_abort(env
, "Raised interrupt while not in I/O function");
1676 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1678 env
->interrupt_request
&= ~mask
;
1681 void cpu_exit(CPUState
*env
)
1683 env
->exit_request
= 1;
1687 const CPULogItem cpu_log_items
[] = {
1688 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1689 "show generated host assembly code for each compiled TB" },
1690 { CPU_LOG_TB_IN_ASM
, "in_asm",
1691 "show target assembly code for each compiled TB" },
1692 { CPU_LOG_TB_OP
, "op",
1693 "show micro ops for each compiled TB" },
1694 { CPU_LOG_TB_OP_OPT
, "op_opt",
1697 "before eflags optimization and "
1699 "after liveness analysis" },
1700 { CPU_LOG_INT
, "int",
1701 "show interrupts/exceptions in short format" },
1702 { CPU_LOG_EXEC
, "exec",
1703 "show trace before each executed TB (lots of logs)" },
1704 { CPU_LOG_TB_CPU
, "cpu",
1705 "show CPU state before block translation" },
1707 { CPU_LOG_PCALL
, "pcall",
1708 "show protected mode far calls/returns/exceptions" },
1709 { CPU_LOG_RESET
, "cpu_reset",
1710 "show CPU state before CPU resets" },
1713 { CPU_LOG_IOPORT
, "ioport",
1714 "show all i/o ports accesses" },
1719 #ifndef CONFIG_USER_ONLY
1720 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1721 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1723 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1725 ram_addr_t phys_offset
)
1727 CPUPhysMemoryClient
*client
;
1728 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1729 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1733 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1734 target_phys_addr_t end
)
1736 CPUPhysMemoryClient
*client
;
1737 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1738 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1745 static int cpu_notify_migration_log(int enable
)
1747 CPUPhysMemoryClient
*client
;
1748 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1749 int r
= client
->migration_log(client
, enable
);
1756 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1757 int level
, void **lp
)
1765 PhysPageDesc
*pd
= *lp
;
1766 for (i
= 0; i
< L2_SIZE
; ++i
) {
1767 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1768 client
->set_memory(client
, pd
[i
].region_offset
,
1769 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
);
1774 for (i
= 0; i
< L2_SIZE
; ++i
) {
1775 phys_page_for_each_1(client
, level
- 1, pp
+ i
);
1780 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1783 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1784 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1789 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1791 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1792 phys_page_for_each(client
);
1795 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1797 QLIST_REMOVE(client
, list
);
1801 static int cmp1(const char *s1
, int n
, const char *s2
)
1803 if (strlen(s2
) != n
)
1805 return memcmp(s1
, s2
, n
) == 0;
1808 /* takes a comma separated list of log masks. Return 0 if error. */
1809 int cpu_str_to_log_mask(const char *str
)
1811 const CPULogItem
*item
;
1818 p1
= strchr(p
, ',');
1821 if(cmp1(p
,p1
-p
,"all")) {
1822 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1826 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1827 if (cmp1(p
, p1
- p
, item
->name
))
1841 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1848 fprintf(stderr
, "qemu: fatal: ");
1849 vfprintf(stderr
, fmt
, ap
);
1850 fprintf(stderr
, "\n");
1852 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1854 cpu_dump_state(env
, stderr
, fprintf
, 0);
1856 if (qemu_log_enabled()) {
1857 qemu_log("qemu: fatal: ");
1858 qemu_log_vprintf(fmt
, ap2
);
1861 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1863 log_cpu_state(env
, 0);
1870 #if defined(CONFIG_USER_ONLY)
1872 struct sigaction act
;
1873 sigfillset(&act
.sa_mask
);
1874 act
.sa_handler
= SIG_DFL
;
1875 sigaction(SIGABRT
, &act
, NULL
);
1881 CPUState
*cpu_copy(CPUState
*env
)
1883 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1884 CPUState
*next_cpu
= new_env
->next_cpu
;
1885 int cpu_index
= new_env
->cpu_index
;
1886 #if defined(TARGET_HAS_ICE)
1891 memcpy(new_env
, env
, sizeof(CPUState
));
1893 /* Preserve chaining and index. */
1894 new_env
->next_cpu
= next_cpu
;
1895 new_env
->cpu_index
= cpu_index
;
1897 /* Clone all break/watchpoints.
1898 Note: Once we support ptrace with hw-debug register access, make sure
1899 BP_CPU break/watchpoints are handled correctly on clone. */
1900 QTAILQ_INIT(&env
->breakpoints
);
1901 QTAILQ_INIT(&env
->watchpoints
);
1902 #if defined(TARGET_HAS_ICE)
1903 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1904 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1906 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1907 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1915 #if !defined(CONFIG_USER_ONLY)
1917 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1921 /* Discard jump cache entries for any tb which might potentially
1922 overlap the flushed page. */
1923 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1924 memset (&env
->tb_jmp_cache
[i
], 0,
1925 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1927 i
= tb_jmp_cache_hash_page(addr
);
1928 memset (&env
->tb_jmp_cache
[i
], 0,
1929 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1932 static CPUTLBEntry s_cputlb_empty_entry
= {
1939 /* NOTE: if flush_global is true, also flush global entries (not
1941 void tlb_flush(CPUState
*env
, int flush_global
)
1945 #if defined(DEBUG_TLB)
1946 printf("tlb_flush:\n");
1948 /* must reset current TB so that interrupts cannot modify the
1949 links while we are modifying them */
1950 env
->current_tb
= NULL
;
1952 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1954 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1955 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1959 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1961 env
->tlb_flush_addr
= -1;
1962 env
->tlb_flush_mask
= 0;
1966 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1968 if (addr
== (tlb_entry
->addr_read
&
1969 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1970 addr
== (tlb_entry
->addr_write
&
1971 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1972 addr
== (tlb_entry
->addr_code
&
1973 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1974 *tlb_entry
= s_cputlb_empty_entry
;
1978 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1983 #if defined(DEBUG_TLB)
1984 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1986 /* Check if we need to flush due to large pages. */
1987 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1988 #if defined(DEBUG_TLB)
1989 printf("tlb_flush_page: forced full flush ("
1990 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1991 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1996 /* must reset current TB so that interrupts cannot modify the
1997 links while we are modifying them */
1998 env
->current_tb
= NULL
;
2000 addr
&= TARGET_PAGE_MASK
;
2001 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2002 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2003 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2005 tlb_flush_jmp_cache(env
, addr
);
2008 /* update the TLBs so that writes to code in the virtual page 'addr'
2010 static void tlb_protect_code(ram_addr_t ram_addr
)
2012 cpu_physical_memory_reset_dirty(ram_addr
,
2013 ram_addr
+ TARGET_PAGE_SIZE
,
2017 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2018 tested for self modifying code */
2019 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2022 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2025 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2026 unsigned long start
, unsigned long length
)
2029 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2030 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2031 if ((addr
- start
) < length
) {
2032 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2037 /* Note: start and end must be within the same ram block. */
2038 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2042 unsigned long length
, start1
;
2045 start
&= TARGET_PAGE_MASK
;
2046 end
= TARGET_PAGE_ALIGN(end
);
2048 length
= end
- start
;
2051 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2053 /* we modify the TLB cache so that the dirty bit will be set again
2054 when accessing the range */
2055 start1
= (unsigned long)qemu_get_ram_ptr(start
);
2056 /* Chek that we don't span multiple blocks - this breaks the
2057 address comparisons below. */
2058 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
2059 != (end
- 1) - start
) {
2063 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2065 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2066 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2067 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2073 int cpu_physical_memory_set_dirty_tracking(int enable
)
2076 in_migration
= enable
;
2077 ret
= cpu_notify_migration_log(!!enable
);
2081 int cpu_physical_memory_get_dirty_tracking(void)
2083 return in_migration
;
2086 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2087 target_phys_addr_t end_addr
)
2091 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2095 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2097 ram_addr_t ram_addr
;
2100 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2101 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2102 + tlb_entry
->addend
);
2103 ram_addr
= qemu_ram_addr_from_host(p
);
2104 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2105 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2110 /* update the TLB according to the current state of the dirty bits */
2111 void cpu_tlb_update_dirty(CPUState
*env
)
2115 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2116 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2117 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2121 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2123 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2124 tlb_entry
->addr_write
= vaddr
;
2127 /* update the TLB corresponding to virtual page vaddr
2128 so that it is no longer dirty */
2129 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2134 vaddr
&= TARGET_PAGE_MASK
;
2135 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2136 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2137 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2140 /* Our TLB does not support large pages, so remember the area covered by
2141 large pages and trigger a full TLB flush if these are invalidated. */
2142 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2145 target_ulong mask
= ~(size
- 1);
2147 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2148 env
->tlb_flush_addr
= vaddr
& mask
;
2149 env
->tlb_flush_mask
= mask
;
2152 /* Extend the existing region to include the new page.
2153 This is a compromise between unnecessary flushes and the cost
2154 of maintaining a full variable size TLB. */
2155 mask
&= env
->tlb_flush_mask
;
2156 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2159 env
->tlb_flush_addr
&= mask
;
2160 env
->tlb_flush_mask
= mask
;
2163 /* Add a new TLB entry. At most one entry for a given virtual address
2164 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2165 supplied size is only used by tlb_flush_page. */
2166 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2167 target_phys_addr_t paddr
, int prot
,
2168 int mmu_idx
, target_ulong size
)
2173 target_ulong address
;
2174 target_ulong code_address
;
2175 unsigned long addend
;
2178 target_phys_addr_t iotlb
;
2180 assert(size
>= TARGET_PAGE_SIZE
);
2181 if (size
!= TARGET_PAGE_SIZE
) {
2182 tlb_add_large_page(env
, vaddr
, size
);
2184 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2186 pd
= IO_MEM_UNASSIGNED
;
2188 pd
= p
->phys_offset
;
2190 #if defined(DEBUG_TLB)
2191 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2192 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2196 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2197 /* IO memory case (romd handled later) */
2198 address
|= TLB_MMIO
;
2200 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2201 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2203 iotlb
= pd
& TARGET_PAGE_MASK
;
2204 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2205 iotlb
|= IO_MEM_NOTDIRTY
;
2207 iotlb
|= IO_MEM_ROM
;
2209 /* IO handlers are currently passed a physical address.
2210 It would be nice to pass an offset from the base address
2211 of that region. This would avoid having to special case RAM,
2212 and avoid full address decoding in every device.
2213 We can't use the high bits of pd for this because
2214 IO_MEM_ROMD uses these as a ram address. */
2215 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2217 iotlb
+= p
->region_offset
;
2223 code_address
= address
;
2224 /* Make accesses to pages with watchpoints go via the
2225 watchpoint trap routines. */
2226 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2227 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2228 /* Avoid trapping reads of pages with a write breakpoint. */
2229 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2230 iotlb
= io_mem_watch
+ paddr
;
2231 address
|= TLB_MMIO
;
2237 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2238 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2239 te
= &env
->tlb_table
[mmu_idx
][index
];
2240 te
->addend
= addend
- vaddr
;
2241 if (prot
& PAGE_READ
) {
2242 te
->addr_read
= address
;
2247 if (prot
& PAGE_EXEC
) {
2248 te
->addr_code
= code_address
;
2252 if (prot
& PAGE_WRITE
) {
2253 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2254 (pd
& IO_MEM_ROMD
)) {
2255 /* Write access calls the I/O callback. */
2256 te
->addr_write
= address
| TLB_MMIO
;
2257 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2258 !cpu_physical_memory_is_dirty(pd
)) {
2259 te
->addr_write
= address
| TLB_NOTDIRTY
;
2261 te
->addr_write
= address
;
2264 te
->addr_write
= -1;
2270 void tlb_flush(CPUState
*env
, int flush_global
)
2274 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2279 * Walks guest process memory "regions" one by one
2280 * and calls callback function 'fn' for each region.
2283 struct walk_memory_regions_data
2285 walk_memory_regions_fn fn
;
2287 unsigned long start
;
2291 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2292 abi_ulong end
, int new_prot
)
2294 if (data
->start
!= -1ul) {
2295 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2301 data
->start
= (new_prot
? end
: -1ul);
2302 data
->prot
= new_prot
;
2307 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2308 abi_ulong base
, int level
, void **lp
)
2314 return walk_memory_regions_end(data
, base
, 0);
2319 for (i
= 0; i
< L2_SIZE
; ++i
) {
2320 int prot
= pd
[i
].flags
;
2322 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2323 if (prot
!= data
->prot
) {
2324 rc
= walk_memory_regions_end(data
, pa
, prot
);
2332 for (i
= 0; i
< L2_SIZE
; ++i
) {
2333 pa
= base
| ((abi_ulong
)i
<<
2334 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2335 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2345 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2347 struct walk_memory_regions_data data
;
2355 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2356 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2357 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2363 return walk_memory_regions_end(&data
, 0, 0);
2366 static int dump_region(void *priv
, abi_ulong start
,
2367 abi_ulong end
, unsigned long prot
)
2369 FILE *f
= (FILE *)priv
;
2371 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2372 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2373 start
, end
, end
- start
,
2374 ((prot
& PAGE_READ
) ? 'r' : '-'),
2375 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2376 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2381 /* dump memory mappings */
2382 void page_dump(FILE *f
)
2384 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2385 "start", "end", "size", "prot");
2386 walk_memory_regions(f
, dump_region
);
2389 int page_get_flags(target_ulong address
)
2393 p
= page_find(address
>> TARGET_PAGE_BITS
);
2399 /* Modify the flags of a page and invalidate the code if necessary.
2400 The flag PAGE_WRITE_ORG is positioned automatically depending
2401 on PAGE_WRITE. The mmap_lock should already be held. */
2402 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2404 target_ulong addr
, len
;
2406 /* This function should never be called with addresses outside the
2407 guest address space. If this assert fires, it probably indicates
2408 a missing call to h2g_valid. */
2409 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2410 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2412 assert(start
< end
);
2414 start
= start
& TARGET_PAGE_MASK
;
2415 end
= TARGET_PAGE_ALIGN(end
);
2417 if (flags
& PAGE_WRITE
) {
2418 flags
|= PAGE_WRITE_ORG
;
2421 for (addr
= start
, len
= end
- start
;
2423 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2424 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2426 /* If the write protection bit is set, then we invalidate
2428 if (!(p
->flags
& PAGE_WRITE
) &&
2429 (flags
& PAGE_WRITE
) &&
2431 tb_invalidate_phys_page(addr
, 0, NULL
);
2437 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2443 /* This function should never be called with addresses outside the
2444 guest address space. If this assert fires, it probably indicates
2445 a missing call to h2g_valid. */
2446 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2447 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2453 if (start
+ len
- 1 < start
) {
2454 /* We've wrapped around. */
2458 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2459 start
= start
& TARGET_PAGE_MASK
;
2461 for (addr
= start
, len
= end
- start
;
2463 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2464 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2467 if( !(p
->flags
& PAGE_VALID
) )
2470 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2472 if (flags
& PAGE_WRITE
) {
2473 if (!(p
->flags
& PAGE_WRITE_ORG
))
2475 /* unprotect the page if it was put read-only because it
2476 contains translated code */
2477 if (!(p
->flags
& PAGE_WRITE
)) {
2478 if (!page_unprotect(addr
, 0, NULL
))
2487 /* called from signal handler: invalidate the code and unprotect the
2488 page. Return TRUE if the fault was successfully handled. */
2489 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2493 target_ulong host_start
, host_end
, addr
;
2495 /* Technically this isn't safe inside a signal handler. However we
2496 know this only ever happens in a synchronous SEGV handler, so in
2497 practice it seems to be ok. */
2500 p
= page_find(address
>> TARGET_PAGE_BITS
);
2506 /* if the page was really writable, then we change its
2507 protection back to writable */
2508 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2509 host_start
= address
& qemu_host_page_mask
;
2510 host_end
= host_start
+ qemu_host_page_size
;
2513 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2514 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2515 p
->flags
|= PAGE_WRITE
;
2518 /* and since the content will be modified, we must invalidate
2519 the corresponding translated code. */
2520 tb_invalidate_phys_page(addr
, pc
, puc
);
2521 #ifdef DEBUG_TB_CHECK
2522 tb_invalidate_check(addr
);
2525 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2535 static inline void tlb_set_dirty(CPUState
*env
,
2536 unsigned long addr
, target_ulong vaddr
)
2539 #endif /* defined(CONFIG_USER_ONLY) */
2541 #if !defined(CONFIG_USER_ONLY)
2543 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2544 typedef struct subpage_t
{
2545 target_phys_addr_t base
;
2546 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2547 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2550 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2551 ram_addr_t memory
, ram_addr_t region_offset
);
2552 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2553 ram_addr_t orig_memory
,
2554 ram_addr_t region_offset
);
2555 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2558 if (addr > start_addr) \
2561 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2562 if (start_addr2 > 0) \
2566 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2567 end_addr2 = TARGET_PAGE_SIZE - 1; \
2569 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2570 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2575 /* register physical memory.
2576 For RAM, 'size' must be a multiple of the target page size.
2577 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2578 io memory page. The address used when calling the IO function is
2579 the offset from the start of the region, plus region_offset. Both
2580 start_addr and region_offset are rounded down to a page boundary
2581 before calculating this offset. This should not be a problem unless
2582 the low bits of start_addr and region_offset differ. */
2583 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2585 ram_addr_t phys_offset
,
2586 ram_addr_t region_offset
)
2588 target_phys_addr_t addr
, end_addr
;
2591 ram_addr_t orig_size
= size
;
2594 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2596 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2597 region_offset
= start_addr
;
2599 region_offset
&= TARGET_PAGE_MASK
;
2600 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2601 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2602 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2603 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2604 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2605 ram_addr_t orig_memory
= p
->phys_offset
;
2606 target_phys_addr_t start_addr2
, end_addr2
;
2607 int need_subpage
= 0;
2609 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2612 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2613 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2614 &p
->phys_offset
, orig_memory
,
2617 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2620 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2622 p
->region_offset
= 0;
2624 p
->phys_offset
= phys_offset
;
2625 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2626 (phys_offset
& IO_MEM_ROMD
))
2627 phys_offset
+= TARGET_PAGE_SIZE
;
2630 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2631 p
->phys_offset
= phys_offset
;
2632 p
->region_offset
= region_offset
;
2633 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2634 (phys_offset
& IO_MEM_ROMD
)) {
2635 phys_offset
+= TARGET_PAGE_SIZE
;
2637 target_phys_addr_t start_addr2
, end_addr2
;
2638 int need_subpage
= 0;
2640 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2641 end_addr2
, need_subpage
);
2644 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2645 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2646 addr
& TARGET_PAGE_MASK
);
2647 subpage_register(subpage
, start_addr2
, end_addr2
,
2648 phys_offset
, region_offset
);
2649 p
->region_offset
= 0;
2653 region_offset
+= TARGET_PAGE_SIZE
;
2656 /* since each CPU stores ram addresses in its TLB cache, we must
2657 reset the modified entries */
2659 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2664 /* XXX: temporary until new memory mapping API */
2665 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2669 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2671 return IO_MEM_UNASSIGNED
;
2672 return p
->phys_offset
;
2675 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2678 kvm_coalesce_mmio_region(addr
, size
);
2681 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2684 kvm_uncoalesce_mmio_region(addr
, size
);
2687 void qemu_flush_coalesced_mmio_buffer(void)
2690 kvm_flush_coalesced_mmio_buffer();
2693 #if defined(__linux__) && !defined(TARGET_S390X)
2695 #include <sys/vfs.h>
2697 #define HUGETLBFS_MAGIC 0x958458f6
2699 static long gethugepagesize(const char *path
)
2705 ret
= statfs(path
, &fs
);
2706 } while (ret
!= 0 && errno
== EINTR
);
2713 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2714 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2719 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2727 unsigned long hpagesize
;
2729 hpagesize
= gethugepagesize(path
);
2734 if (memory
< hpagesize
) {
2738 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2739 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2743 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2747 fd
= mkstemp(filename
);
2749 perror("unable to create backing store for hugepages");
2756 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2759 * ftruncate is not supported by hugetlbfs in older
2760 * hosts, so don't bother bailing out on errors.
2761 * If anything goes wrong with it under other filesystems,
2764 if (ftruncate(fd
, memory
))
2765 perror("ftruncate");
2768 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2769 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2770 * to sidestep this quirk.
2772 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2773 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2775 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2777 if (area
== MAP_FAILED
) {
2778 perror("file_ram_alloc: can't mmap RAM pages");
2786 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2789 ram_addr_t last
= 0;
2791 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2792 last
= MAX(last
, block
->offset
+ block
->length
);
2797 ram_addr_t
qemu_ram_map(ram_addr_t size
, void *host
)
2799 RAMBlock
*new_block
;
2801 size
= TARGET_PAGE_ALIGN(size
);
2802 new_block
= qemu_malloc(sizeof(*new_block
));
2804 new_block
->host
= host
;
2806 new_block
->offset
= find_ram_offset(size
);
2807 new_block
->length
= size
;
2809 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2811 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
2812 (new_block
->offset
+ size
) >> TARGET_PAGE_BITS
);
2813 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2814 0xff, size
>> TARGET_PAGE_BITS
);
2817 kvm_setup_guest_memory(new_block
->host
, size
);
2819 return new_block
->offset
;
2822 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2824 RAMBlock
*new_block
;
2826 size
= TARGET_PAGE_ALIGN(size
);
2827 new_block
= qemu_malloc(sizeof(*new_block
));
2830 #if defined (__linux__) && !defined(TARGET_S390X)
2831 new_block
->host
= file_ram_alloc(size
, mem_path
);
2832 if (!new_block
->host
) {
2833 new_block
->host
= qemu_vmalloc(size
);
2834 #ifdef MADV_MERGEABLE
2835 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2839 fprintf(stderr
, "-mem-path option unsupported\n");
2843 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2844 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2845 new_block
->host
= mmap((void*)0x1000000, size
,
2846 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2847 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2849 new_block
->host
= qemu_vmalloc(size
);
2851 #ifdef MADV_MERGEABLE
2852 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2855 new_block
->offset
= find_ram_offset(size
);
2856 new_block
->length
= size
;
2858 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2860 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
2861 (new_block
->offset
+ size
) >> TARGET_PAGE_BITS
);
2862 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2863 0xff, size
>> TARGET_PAGE_BITS
);
2866 kvm_setup_guest_memory(new_block
->host
, size
);
2868 return new_block
->offset
;
2871 void qemu_ram_free(ram_addr_t addr
)
2873 /* TODO: implement this. */
2876 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2877 With the exception of the softmmu code in this file, this should
2878 only be used for local memory (e.g. video ram) that the device owns,
2879 and knows it isn't going to access beyond the end of the block.
2881 It should not be used for general purpose DMA.
2882 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2884 void *qemu_get_ram_ptr(ram_addr_t addr
)
2888 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2889 if (addr
- block
->offset
< block
->length
) {
2890 QLIST_REMOVE(block
, next
);
2891 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2892 return block
->host
+ (addr
- block
->offset
);
2896 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2902 int do_qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2905 uint8_t *host
= ptr
;
2907 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2908 if (host
- block
->host
< block
->length
) {
2909 *ram_addr
= block
->offset
+ (host
- block
->host
);
2916 /* Some of the softmmu routines need to translate from a host pointer
2917 (typically a TLB entry) back to a ram offset. */
2918 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2920 ram_addr_t ram_addr
;
2922 if (do_qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
2923 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2929 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2931 #ifdef DEBUG_UNASSIGNED
2932 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2934 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2935 do_unassigned_access(addr
, 0, 0, 0, 1);
2940 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2942 #ifdef DEBUG_UNASSIGNED
2943 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2945 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2946 do_unassigned_access(addr
, 0, 0, 0, 2);
2951 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2953 #ifdef DEBUG_UNASSIGNED
2954 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2956 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2957 do_unassigned_access(addr
, 0, 0, 0, 4);
2962 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2964 #ifdef DEBUG_UNASSIGNED
2965 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2967 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2968 do_unassigned_access(addr
, 1, 0, 0, 1);
2972 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2974 #ifdef DEBUG_UNASSIGNED
2975 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2977 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2978 do_unassigned_access(addr
, 1, 0, 0, 2);
2982 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2984 #ifdef DEBUG_UNASSIGNED
2985 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2987 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2988 do_unassigned_access(addr
, 1, 0, 0, 4);
2992 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
2993 unassigned_mem_readb
,
2994 unassigned_mem_readw
,
2995 unassigned_mem_readl
,
2998 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
2999 unassigned_mem_writeb
,
3000 unassigned_mem_writew
,
3001 unassigned_mem_writel
,
3004 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3008 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3009 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3010 #if !defined(CONFIG_USER_ONLY)
3011 tb_invalidate_phys_page_fast(ram_addr
, 1);
3012 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3015 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3016 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3017 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3018 /* we remove the notdirty callback only if the code has been
3020 if (dirty_flags
== 0xff)
3021 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3024 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3028 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3029 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3030 #if !defined(CONFIG_USER_ONLY)
3031 tb_invalidate_phys_page_fast(ram_addr
, 2);
3032 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3035 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3036 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3037 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3038 /* we remove the notdirty callback only if the code has been
3040 if (dirty_flags
== 0xff)
3041 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3044 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3048 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3049 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3050 #if !defined(CONFIG_USER_ONLY)
3051 tb_invalidate_phys_page_fast(ram_addr
, 4);
3052 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3055 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3056 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3057 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3058 /* we remove the notdirty callback only if the code has been
3060 if (dirty_flags
== 0xff)
3061 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3064 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3065 NULL
, /* never used */
3066 NULL
, /* never used */
3067 NULL
, /* never used */
3070 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3071 notdirty_mem_writeb
,
3072 notdirty_mem_writew
,
3073 notdirty_mem_writel
,
3076 /* Generate a debug exception if a watchpoint has been hit. */
3077 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3079 CPUState
*env
= cpu_single_env
;
3080 target_ulong pc
, cs_base
;
3081 TranslationBlock
*tb
;
3086 if (env
->watchpoint_hit
) {
3087 /* We re-entered the check after replacing the TB. Now raise
3088 * the debug interrupt so that is will trigger after the
3089 * current instruction. */
3090 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3093 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3094 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3095 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3096 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3097 wp
->flags
|= BP_WATCHPOINT_HIT
;
3098 if (!env
->watchpoint_hit
) {
3099 env
->watchpoint_hit
= wp
;
3100 tb
= tb_find_pc(env
->mem_io_pc
);
3102 cpu_abort(env
, "check_watchpoint: could not find TB for "
3103 "pc=%p", (void *)env
->mem_io_pc
);
3105 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
3106 tb_phys_invalidate(tb
, -1);
3107 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3108 env
->exception_index
= EXCP_DEBUG
;
3110 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3111 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3113 cpu_resume_from_signal(env
, NULL
);
3116 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3121 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3122 so these check for a hit then pass through to the normal out-of-line
3124 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3126 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3127 return ldub_phys(addr
);
3130 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3132 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3133 return lduw_phys(addr
);
3136 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3138 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3139 return ldl_phys(addr
);
3142 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3145 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3146 stb_phys(addr
, val
);
3149 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3152 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3153 stw_phys(addr
, val
);
3156 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3159 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3160 stl_phys(addr
, val
);
3163 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3169 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3175 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3176 target_phys_addr_t addr
,
3179 unsigned int idx
= SUBPAGE_IDX(addr
);
3180 #if defined(DEBUG_SUBPAGE)
3181 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3182 mmio
, len
, addr
, idx
);
3185 addr
+= mmio
->region_offset
[idx
];
3186 idx
= mmio
->sub_io_index
[idx
];
3187 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3190 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3191 uint32_t value
, unsigned int len
)
3193 unsigned int idx
= SUBPAGE_IDX(addr
);
3194 #if defined(DEBUG_SUBPAGE)
3195 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3196 __func__
, mmio
, len
, addr
, idx
, value
);
3199 addr
+= mmio
->region_offset
[idx
];
3200 idx
= mmio
->sub_io_index
[idx
];
3201 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3204 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3206 return subpage_readlen(opaque
, addr
, 0);
3209 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3212 subpage_writelen(opaque
, addr
, value
, 0);
3215 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3217 return subpage_readlen(opaque
, addr
, 1);
3220 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3223 subpage_writelen(opaque
, addr
, value
, 1);
3226 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3228 return subpage_readlen(opaque
, addr
, 2);
3231 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3234 subpage_writelen(opaque
, addr
, value
, 2);
3237 static CPUReadMemoryFunc
* const subpage_read
[] = {
3243 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3249 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3250 ram_addr_t memory
, ram_addr_t region_offset
)
3254 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3256 idx
= SUBPAGE_IDX(start
);
3257 eidx
= SUBPAGE_IDX(end
);
3258 #if defined(DEBUG_SUBPAGE)
3259 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3260 mmio
, start
, end
, idx
, eidx
, memory
);
3262 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3263 for (; idx
<= eidx
; idx
++) {
3264 mmio
->sub_io_index
[idx
] = memory
;
3265 mmio
->region_offset
[idx
] = region_offset
;
3271 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3272 ram_addr_t orig_memory
,
3273 ram_addr_t region_offset
)
3278 mmio
= qemu_mallocz(sizeof(subpage_t
));
3281 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3282 #if defined(DEBUG_SUBPAGE)
3283 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3284 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3286 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3287 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3292 static int get_free_io_mem_idx(void)
3296 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3297 if (!io_mem_used
[i
]) {
3301 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3305 /* mem_read and mem_write are arrays of functions containing the
3306 function to access byte (index 0), word (index 1) and dword (index
3307 2). Functions can be omitted with a NULL function pointer.
3308 If io_index is non zero, the corresponding io zone is
3309 modified. If it is zero, a new io zone is allocated. The return
3310 value can be used with cpu_register_physical_memory(). (-1) is
3311 returned if error. */
3312 static int cpu_register_io_memory_fixed(int io_index
,
3313 CPUReadMemoryFunc
* const *mem_read
,
3314 CPUWriteMemoryFunc
* const *mem_write
,
3319 if (io_index
<= 0) {
3320 io_index
= get_free_io_mem_idx();
3324 io_index
>>= IO_MEM_SHIFT
;
3325 if (io_index
>= IO_MEM_NB_ENTRIES
)
3329 for (i
= 0; i
< 3; ++i
) {
3330 io_mem_read
[io_index
][i
]
3331 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3333 for (i
= 0; i
< 3; ++i
) {
3334 io_mem_write
[io_index
][i
]
3335 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3337 io_mem_opaque
[io_index
] = opaque
;
3339 return (io_index
<< IO_MEM_SHIFT
);
3342 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3343 CPUWriteMemoryFunc
* const *mem_write
,
3346 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3349 void cpu_unregister_io_memory(int io_table_address
)
3352 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3354 for (i
=0;i
< 3; i
++) {
3355 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3356 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3358 io_mem_opaque
[io_index
] = NULL
;
3359 io_mem_used
[io_index
] = 0;
3362 static void io_mem_init(void)
3366 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3367 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3368 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3372 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3373 watch_mem_write
, NULL
);
3376 #endif /* !defined(CONFIG_USER_ONLY) */
3378 /* physical memory access (slow version, mainly for debug) */
3379 #if defined(CONFIG_USER_ONLY)
3380 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3381 uint8_t *buf
, int len
, int is_write
)
3388 page
= addr
& TARGET_PAGE_MASK
;
3389 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3392 flags
= page_get_flags(page
);
3393 if (!(flags
& PAGE_VALID
))
3396 if (!(flags
& PAGE_WRITE
))
3398 /* XXX: this code should not depend on lock_user */
3399 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3402 unlock_user(p
, addr
, l
);
3404 if (!(flags
& PAGE_READ
))
3406 /* XXX: this code should not depend on lock_user */
3407 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3410 unlock_user(p
, addr
, 0);
3420 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3421 int len
, int is_write
)
3426 target_phys_addr_t page
;
3431 page
= addr
& TARGET_PAGE_MASK
;
3432 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3435 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3437 pd
= IO_MEM_UNASSIGNED
;
3439 pd
= p
->phys_offset
;
3443 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3444 target_phys_addr_t addr1
= addr
;
3445 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3447 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3448 /* XXX: could force cpu_single_env to NULL to avoid
3450 if (l
>= 4 && ((addr1
& 3) == 0)) {
3451 /* 32 bit write access */
3453 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3455 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3456 /* 16 bit write access */
3458 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3461 /* 8 bit write access */
3463 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3467 unsigned long addr1
;
3468 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3470 ptr
= qemu_get_ram_ptr(addr1
);
3471 memcpy(ptr
, buf
, l
);
3472 if (!cpu_physical_memory_is_dirty(addr1
)) {
3473 /* invalidate code */
3474 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3476 cpu_physical_memory_set_dirty_flags(
3477 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3479 /* qemu doesn't execute guest code directly, but kvm does
3480 therefore flush instruction caches */
3482 flush_icache_range((unsigned long)ptr
,
3483 ((unsigned long)ptr
)+l
);
3486 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3487 !(pd
& IO_MEM_ROMD
)) {
3488 target_phys_addr_t addr1
= addr
;
3490 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3492 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3493 if (l
>= 4 && ((addr1
& 3) == 0)) {
3494 /* 32 bit read access */
3495 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3498 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3499 /* 16 bit read access */
3500 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3504 /* 8 bit read access */
3505 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3511 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3512 (addr
& ~TARGET_PAGE_MASK
);
3513 memcpy(buf
, ptr
, l
);
3522 /* used for ROM loading : can write in RAM and ROM */
3523 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3524 const uint8_t *buf
, int len
)
3528 target_phys_addr_t page
;
3533 page
= addr
& TARGET_PAGE_MASK
;
3534 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3537 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3539 pd
= IO_MEM_UNASSIGNED
;
3541 pd
= p
->phys_offset
;
3544 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3545 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3546 !(pd
& IO_MEM_ROMD
)) {
3549 unsigned long addr1
;
3550 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3552 ptr
= qemu_get_ram_ptr(addr1
);
3553 memcpy(ptr
, buf
, l
);
3563 target_phys_addr_t addr
;
3564 target_phys_addr_t len
;
3567 static BounceBuffer bounce
;
3569 typedef struct MapClient
{
3571 void (*callback
)(void *opaque
);
3572 QLIST_ENTRY(MapClient
) link
;
3575 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3576 = QLIST_HEAD_INITIALIZER(map_client_list
);
3578 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3580 MapClient
*client
= qemu_malloc(sizeof(*client
));
3582 client
->opaque
= opaque
;
3583 client
->callback
= callback
;
3584 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3588 void cpu_unregister_map_client(void *_client
)
3590 MapClient
*client
= (MapClient
*)_client
;
3592 QLIST_REMOVE(client
, link
);
3596 static void cpu_notify_map_clients(void)
3600 while (!QLIST_EMPTY(&map_client_list
)) {
3601 client
= QLIST_FIRST(&map_client_list
);
3602 client
->callback(client
->opaque
);
3603 cpu_unregister_map_client(client
);
3607 /* Map a physical memory region into a host virtual address.
3608 * May map a subset of the requested range, given by and returned in *plen.
3609 * May return NULL if resources needed to perform the mapping are exhausted.
3610 * Use only for reads OR writes - not for read-modify-write operations.
3611 * Use cpu_register_map_client() to know when retrying the map operation is
3612 * likely to succeed.
3614 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3615 target_phys_addr_t
*plen
,
3618 target_phys_addr_t len
= *plen
;
3619 target_phys_addr_t done
= 0;
3621 uint8_t *ret
= NULL
;
3623 target_phys_addr_t page
;
3626 unsigned long addr1
;
3629 page
= addr
& TARGET_PAGE_MASK
;
3630 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3633 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3635 pd
= IO_MEM_UNASSIGNED
;
3637 pd
= p
->phys_offset
;
3640 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3641 if (done
|| bounce
.buffer
) {
3644 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3648 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3650 ptr
= bounce
.buffer
;
3652 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3653 ptr
= qemu_get_ram_ptr(addr1
);
3657 } else if (ret
+ done
!= ptr
) {
3669 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3670 * Will also mark the memory as dirty if is_write == 1. access_len gives
3671 * the amount of memory that was actually read or written by the caller.
3673 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3674 int is_write
, target_phys_addr_t access_len
)
3676 unsigned long flush_len
= (unsigned long)access_len
;
3678 if (buffer
!= bounce
.buffer
) {
3680 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3681 while (access_len
) {
3683 l
= TARGET_PAGE_SIZE
;
3686 if (!cpu_physical_memory_is_dirty(addr1
)) {
3687 /* invalidate code */
3688 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3690 cpu_physical_memory_set_dirty_flags(
3691 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3696 dma_flush_range((unsigned long)buffer
,
3697 (unsigned long)buffer
+ flush_len
);
3702 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3704 qemu_vfree(bounce
.buffer
);
3705 bounce
.buffer
= NULL
;
3706 cpu_notify_map_clients();
3709 /* warning: addr must be aligned */
3710 uint32_t ldl_phys(target_phys_addr_t addr
)
3718 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3720 pd
= IO_MEM_UNASSIGNED
;
3722 pd
= p
->phys_offset
;
3725 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3726 !(pd
& IO_MEM_ROMD
)) {
3728 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3730 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3731 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3734 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3735 (addr
& ~TARGET_PAGE_MASK
);
3741 /* warning: addr must be aligned */
3742 uint64_t ldq_phys(target_phys_addr_t addr
)
3750 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3752 pd
= IO_MEM_UNASSIGNED
;
3754 pd
= p
->phys_offset
;
3757 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3758 !(pd
& IO_MEM_ROMD
)) {
3760 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3762 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3763 #ifdef TARGET_WORDS_BIGENDIAN
3764 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3765 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3767 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3768 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3772 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3773 (addr
& ~TARGET_PAGE_MASK
);
3780 uint32_t ldub_phys(target_phys_addr_t addr
)
3783 cpu_physical_memory_read(addr
, &val
, 1);
3787 /* warning: addr must be aligned */
3788 uint32_t lduw_phys(target_phys_addr_t addr
)
3796 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3798 pd
= IO_MEM_UNASSIGNED
;
3800 pd
= p
->phys_offset
;
3803 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3804 !(pd
& IO_MEM_ROMD
)) {
3806 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3808 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3809 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
3812 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3813 (addr
& ~TARGET_PAGE_MASK
);
3819 /* warning: addr must be aligned. The ram page is not masked as dirty
3820 and the code inside is not invalidated. It is useful if the dirty
3821 bits are used to track modified PTEs */
3822 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3829 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3831 pd
= IO_MEM_UNASSIGNED
;
3833 pd
= p
->phys_offset
;
3836 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3837 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3839 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3840 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3842 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3843 ptr
= qemu_get_ram_ptr(addr1
);
3846 if (unlikely(in_migration
)) {
3847 if (!cpu_physical_memory_is_dirty(addr1
)) {
3848 /* invalidate code */
3849 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3851 cpu_physical_memory_set_dirty_flags(
3852 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3858 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3865 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3867 pd
= IO_MEM_UNASSIGNED
;
3869 pd
= p
->phys_offset
;
3872 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3873 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3875 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3876 #ifdef TARGET_WORDS_BIGENDIAN
3877 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3878 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3880 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3881 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3884 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3885 (addr
& ~TARGET_PAGE_MASK
);
3890 /* warning: addr must be aligned */
3891 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3898 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3900 pd
= IO_MEM_UNASSIGNED
;
3902 pd
= p
->phys_offset
;
3905 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3906 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3908 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3909 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3911 unsigned long addr1
;
3912 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3914 ptr
= qemu_get_ram_ptr(addr1
);
3916 if (!cpu_physical_memory_is_dirty(addr1
)) {
3917 /* invalidate code */
3918 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3920 cpu_physical_memory_set_dirty_flags(addr1
,
3921 (0xff & ~CODE_DIRTY_FLAG
));
3927 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3930 cpu_physical_memory_write(addr
, &v
, 1);
3933 /* warning: addr must be aligned */
3934 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3941 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3943 pd
= IO_MEM_UNASSIGNED
;
3945 pd
= p
->phys_offset
;
3948 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3949 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3951 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3952 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
3954 unsigned long addr1
;
3955 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3957 ptr
= qemu_get_ram_ptr(addr1
);
3959 if (!cpu_physical_memory_is_dirty(addr1
)) {
3960 /* invalidate code */
3961 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
3963 cpu_physical_memory_set_dirty_flags(addr1
,
3964 (0xff & ~CODE_DIRTY_FLAG
));
3970 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3973 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3976 /* virtual memory access for debug (includes writing to ROM) */
3977 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3978 uint8_t *buf
, int len
, int is_write
)
3981 target_phys_addr_t phys_addr
;
3985 page
= addr
& TARGET_PAGE_MASK
;
3986 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3987 /* if no physical page mapped, return an error */
3988 if (phys_addr
== -1)
3990 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3993 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3995 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3997 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4006 /* in deterministic execution mode, instructions doing device I/Os
4007 must be at the end of the TB */
4008 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4010 TranslationBlock
*tb
;
4012 target_ulong pc
, cs_base
;
4015 tb
= tb_find_pc((unsigned long)retaddr
);
4017 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4020 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4021 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
4022 /* Calculate how many instructions had been executed before the fault
4024 n
= n
- env
->icount_decr
.u16
.low
;
4025 /* Generate a new TB ending on the I/O insn. */
4027 /* On MIPS and SH, delay slot instructions can only be restarted if
4028 they were already the first instruction in the TB. If this is not
4029 the first instruction in a TB then re-execute the preceding
4031 #if defined(TARGET_MIPS)
4032 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4033 env
->active_tc
.PC
-= 4;
4034 env
->icount_decr
.u16
.low
++;
4035 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4037 #elif defined(TARGET_SH4)
4038 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4041 env
->icount_decr
.u16
.low
++;
4042 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4045 /* This should never happen. */
4046 if (n
> CF_COUNT_MASK
)
4047 cpu_abort(env
, "TB too big during recompile");
4049 cflags
= n
| CF_LAST_IO
;
4051 cs_base
= tb
->cs_base
;
4053 tb_phys_invalidate(tb
, -1);
4054 /* FIXME: In theory this could raise an exception. In practice
4055 we have already translated the block once so it's probably ok. */
4056 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4057 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4058 the first in the TB) then we end up generating a whole new TB and
4059 repeating the fault, which is horribly inefficient.
4060 Better would be to execute just this insn uncached, or generate a
4062 cpu_resume_from_signal(env
, NULL
);
4065 #if !defined(CONFIG_USER_ONLY)
4067 void dump_exec_info(FILE *f
,
4068 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
4070 int i
, target_code_size
, max_target_code_size
;
4071 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4072 TranslationBlock
*tb
;
4074 target_code_size
= 0;
4075 max_target_code_size
= 0;
4077 direct_jmp_count
= 0;
4078 direct_jmp2_count
= 0;
4079 for(i
= 0; i
< nb_tbs
; i
++) {
4081 target_code_size
+= tb
->size
;
4082 if (tb
->size
> max_target_code_size
)
4083 max_target_code_size
= tb
->size
;
4084 if (tb
->page_addr
[1] != -1)
4086 if (tb
->tb_next_offset
[0] != 0xffff) {
4088 if (tb
->tb_next_offset
[1] != 0xffff) {
4089 direct_jmp2_count
++;
4093 /* XXX: avoid using doubles ? */
4094 cpu_fprintf(f
, "Translation buffer state:\n");
4095 cpu_fprintf(f
, "gen code size %ld/%ld\n",
4096 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4097 cpu_fprintf(f
, "TB count %d/%d\n",
4098 nb_tbs
, code_gen_max_blocks
);
4099 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4100 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4101 max_target_code_size
);
4102 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4103 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4104 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4105 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4107 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4108 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4110 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4112 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4113 cpu_fprintf(f
, "\nStatistics:\n");
4114 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4115 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4116 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4117 #ifdef CONFIG_PROFILER
4118 tcg_dump_info(f
, cpu_fprintf
);
4122 #define MMUSUFFIX _cmmu
4123 #define GETPC() NULL
4124 #define env cpu_single_env
4125 #define SOFTMMU_CODE_ACCESS
4128 #include "softmmu_template.h"
4131 #include "softmmu_template.h"
4134 #include "softmmu_template.h"
4137 #include "softmmu_template.h"