2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 //#define DEBUG_TB_INVALIDATE
63 //#define DEBUG_UNASSIGNED
65 /* make various TB consistency checks */
66 //#define DEBUG_TB_CHECK
67 //#define DEBUG_TLB_CHECK
69 //#define DEBUG_IOPORT
70 //#define DEBUG_SUBPAGE
72 #if !defined(CONFIG_USER_ONLY)
73 /* TB consistency checks only implemented for usermode emulation. */
77 #define SMC_BITMAP_USE_THRESHOLD 10
79 static TranslationBlock
*tbs
;
80 static int code_gen_max_blocks
;
81 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
83 /* any access to the tbs or the page table must use this lock */
84 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
86 #if defined(__arm__) || defined(__sparc_v9__)
87 /* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
89 section close to code segment. */
90 #define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
94 /* Maximum alignment for Win32 is 16. */
95 #define code_gen_section \
96 __attribute__((aligned (16)))
98 #define code_gen_section \
99 __attribute__((aligned (32)))
102 uint8_t code_gen_prologue
[1024] code_gen_section
;
103 static uint8_t *code_gen_buffer
;
104 static unsigned long code_gen_buffer_size
;
105 /* threshold to flush the translated code buffer */
106 static unsigned long code_gen_buffer_max_size
;
107 static uint8_t *code_gen_ptr
;
109 #if !defined(CONFIG_USER_ONLY)
111 static int in_migration
;
113 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
115 static MemoryRegion
*system_memory
;
116 static MemoryRegion
*system_io
;
121 /* current CPU in the current thread. It is only valid inside
123 CPUState
*cpu_single_env
;
124 /* 0 = Do not count executed instructions.
125 1 = Precise instruction counting.
126 2 = Adaptive rate instruction counting. */
128 /* Current instruction counter. While executing translated code this may
129 include some instructions that have not yet been executed. */
132 typedef struct PageDesc
{
133 /* list of TBs intersecting this ram page */
134 TranslationBlock
*first_tb
;
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count
;
138 uint8_t *code_bitmap
;
139 #if defined(CONFIG_USER_ONLY)
144 /* In system mode we want L1_MAP to be based on ram offsets,
145 while in user mode we want it to be based on virtual addresses. */
146 #if !defined(CONFIG_USER_ONLY)
147 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
150 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
156 /* Size of the L2 (and L3, etc) page tables. */
158 #define L2_SIZE (1 << L2_BITS)
160 /* The bits remaining after N lower levels of page tables. */
161 #define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163 #define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166 /* Size of the L1 page table. Avoid silly small sizes. */
167 #if P_L1_BITS_REM < 4
168 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
170 #define P_L1_BITS P_L1_BITS_REM
173 #if V_L1_BITS_REM < 4
174 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
176 #define V_L1_BITS V_L1_BITS_REM
179 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
182 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
185 unsigned long qemu_real_host_page_size
;
186 unsigned long qemu_host_page_size
;
187 unsigned long qemu_host_page_mask
;
189 /* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191 static void *l1_map
[V_L1_SIZE
];
193 #if !defined(CONFIG_USER_ONLY)
194 typedef struct PhysPageDesc
{
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset
;
197 ram_addr_t region_offset
;
200 /* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202 static void *l1_phys_map
[P_L1_SIZE
];
204 static void io_mem_init(void);
205 static void memory_map_init(void);
207 /* io memory support */
208 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
209 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
210 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
211 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
212 static int io_mem_watch
;
217 static const char *logfilename
= "qemu.log";
219 static const char *logfilename
= "/tmp/qemu.log";
223 static int log_append
= 0;
226 #if !defined(CONFIG_USER_ONLY)
227 static int tlb_flush_count
;
229 static int tb_flush_count
;
230 static int tb_phys_invalidate_count
;
233 static void map_exec(void *addr
, long size
)
236 VirtualProtect(addr
, size
,
237 PAGE_EXECUTE_READWRITE
, &old_protect
);
241 static void map_exec(void *addr
, long size
)
243 unsigned long start
, end
, page_size
;
245 page_size
= getpagesize();
246 start
= (unsigned long)addr
;
247 start
&= ~(page_size
- 1);
249 end
= (unsigned long)addr
+ size
;
250 end
+= page_size
- 1;
251 end
&= ~(page_size
- 1);
253 mprotect((void *)start
, end
- start
,
254 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
258 static void page_init(void)
260 /* NOTE: we can always suppose that qemu_host_page_size >=
264 SYSTEM_INFO system_info
;
266 GetSystemInfo(&system_info
);
267 qemu_real_host_page_size
= system_info
.dwPageSize
;
270 qemu_real_host_page_size
= getpagesize();
272 if (qemu_host_page_size
== 0)
273 qemu_host_page_size
= qemu_real_host_page_size
;
274 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
275 qemu_host_page_size
= TARGET_PAGE_SIZE
;
276 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
278 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
280 #ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry
*freep
;
284 freep
= kinfo_getvmmap(getpid(), &cnt
);
287 for (i
= 0; i
< cnt
; i
++) {
288 unsigned long startaddr
, endaddr
;
290 startaddr
= freep
[i
].kve_start
;
291 endaddr
= freep
[i
].kve_end
;
292 if (h2g_valid(startaddr
)) {
293 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
295 if (h2g_valid(endaddr
)) {
296 endaddr
= h2g(endaddr
);
297 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
299 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
312 last_brk
= (unsigned long)sbrk(0);
314 f
= fopen("/compat/linux/proc/self/maps", "r");
319 unsigned long startaddr
, endaddr
;
322 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
324 if (n
== 2 && h2g_valid(startaddr
)) {
325 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
327 if (h2g_valid(endaddr
)) {
328 endaddr
= h2g(endaddr
);
332 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
344 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
350 #if defined(CONFIG_USER_ONLY)
351 /* We can't use g_malloc because it may recurse into a locked mutex. */
352 # define ALLOC(P, SIZE) \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
358 # define ALLOC(P, SIZE) \
359 do { P = g_malloc0(SIZE); } while (0)
362 /* Level 1. Always allocated. */
363 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
366 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
373 ALLOC(p
, sizeof(void *) * L2_SIZE
);
377 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
385 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
391 return pd
+ (index
& (L2_SIZE
- 1));
394 static inline PageDesc
*page_find(tb_page_addr_t index
)
396 return page_find_alloc(index
, 0);
399 #if !defined(CONFIG_USER_ONLY)
400 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
406 /* Level 1. Always allocated. */
407 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
410 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
416 *lp
= p
= g_malloc0(sizeof(void *) * L2_SIZE
);
418 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
429 *lp
= pd
= g_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
431 for (i
= 0; i
< L2_SIZE
; i
++) {
432 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
433 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
437 return pd
+ (index
& (L2_SIZE
- 1));
440 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
442 return phys_page_find_alloc(index
, 0);
445 static void tlb_protect_code(ram_addr_t ram_addr
);
446 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
448 #define mmap_lock() do { } while(0)
449 #define mmap_unlock() do { } while(0)
452 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
454 #if defined(CONFIG_USER_ONLY)
455 /* Currently it is not recommended to allocate big chunks of data in
456 user mode. It will change when a dedicated libc will be used */
457 #define USE_STATIC_CODE_GEN_BUFFER
460 #ifdef USE_STATIC_CODE_GEN_BUFFER
461 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
462 __attribute__((aligned (CODE_GEN_ALIGN
)));
465 static void code_gen_alloc(unsigned long tb_size
)
467 #ifdef USE_STATIC_CODE_GEN_BUFFER
468 code_gen_buffer
= static_code_gen_buffer
;
469 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
470 map_exec(code_gen_buffer
, code_gen_buffer_size
);
472 code_gen_buffer_size
= tb_size
;
473 if (code_gen_buffer_size
== 0) {
474 #if defined(CONFIG_USER_ONLY)
475 /* in user mode, phys_ram_size is not meaningful */
476 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
478 /* XXX: needs adjustments */
479 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
482 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
483 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
484 /* The code gen buffer location may have constraints depending on
485 the host cpu and OS */
486 #if defined(__linux__)
491 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
492 #if defined(__x86_64__)
494 /* Cannot map more than that */
495 if (code_gen_buffer_size
> (800 * 1024 * 1024))
496 code_gen_buffer_size
= (800 * 1024 * 1024);
497 #elif defined(__sparc_v9__)
498 // Map the buffer below 2G, so we can use direct calls and branches
500 start
= (void *) 0x60000000UL
;
501 if (code_gen_buffer_size
> (512 * 1024 * 1024))
502 code_gen_buffer_size
= (512 * 1024 * 1024);
503 #elif defined(__arm__)
504 /* Map the buffer below 32M, so we can use direct calls and branches */
506 start
= (void *) 0x01000000UL
;
507 if (code_gen_buffer_size
> 16 * 1024 * 1024)
508 code_gen_buffer_size
= 16 * 1024 * 1024;
509 #elif defined(__s390x__)
510 /* Map the buffer so that we can use direct calls and branches. */
511 /* We have a +- 4GB range on the branches; leave some slop. */
512 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
513 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
515 start
= (void *)0x90000000UL
;
517 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
518 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
520 if (code_gen_buffer
== MAP_FAILED
) {
521 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
525 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
526 || defined(__DragonFly__) || defined(__OpenBSD__) \
527 || defined(__NetBSD__)
531 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
532 #if defined(__x86_64__)
533 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
534 * 0x40000000 is free */
536 addr
= (void *)0x40000000;
537 /* Cannot map more than that */
538 if (code_gen_buffer_size
> (800 * 1024 * 1024))
539 code_gen_buffer_size
= (800 * 1024 * 1024);
540 #elif defined(__sparc_v9__)
541 // Map the buffer below 2G, so we can use direct calls and branches
543 addr
= (void *) 0x60000000UL
;
544 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
545 code_gen_buffer_size
= (512 * 1024 * 1024);
548 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
549 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
551 if (code_gen_buffer
== MAP_FAILED
) {
552 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
557 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
558 map_exec(code_gen_buffer
, code_gen_buffer_size
);
560 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
561 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
562 code_gen_buffer_max_size
= code_gen_buffer_size
-
563 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
564 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
565 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
568 /* Must be called before using the QEMU cpus. 'tb_size' is the size
569 (in bytes) allocated to the translation buffer. Zero means default
571 void tcg_exec_init(unsigned long tb_size
)
574 code_gen_alloc(tb_size
);
575 code_gen_ptr
= code_gen_buffer
;
577 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
578 /* There's no guest base to take into account, so go ahead and
579 initialize the prologue now. */
580 tcg_prologue_init(&tcg_ctx
);
584 bool tcg_enabled(void)
586 return code_gen_buffer
!= NULL
;
589 void cpu_exec_init_all(void)
591 #if !defined(CONFIG_USER_ONLY)
597 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
599 static int cpu_common_post_load(void *opaque
, int version_id
)
601 CPUState
*env
= opaque
;
603 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
604 version_id is increased. */
605 env
->interrupt_request
&= ~0x01;
611 static const VMStateDescription vmstate_cpu_common
= {
612 .name
= "cpu_common",
614 .minimum_version_id
= 1,
615 .minimum_version_id_old
= 1,
616 .post_load
= cpu_common_post_load
,
617 .fields
= (VMStateField
[]) {
618 VMSTATE_UINT32(halted
, CPUState
),
619 VMSTATE_UINT32(interrupt_request
, CPUState
),
620 VMSTATE_END_OF_LIST()
625 CPUState
*qemu_get_cpu(int cpu
)
627 CPUState
*env
= first_cpu
;
630 if (env
->cpu_index
== cpu
)
638 void cpu_exec_init(CPUState
*env
)
643 #if defined(CONFIG_USER_ONLY)
646 env
->next_cpu
= NULL
;
649 while (*penv
!= NULL
) {
650 penv
= &(*penv
)->next_cpu
;
653 env
->cpu_index
= cpu_index
;
655 QTAILQ_INIT(&env
->breakpoints
);
656 QTAILQ_INIT(&env
->watchpoints
);
657 #ifndef CONFIG_USER_ONLY
658 env
->thread_id
= qemu_get_thread_id();
661 #if defined(CONFIG_USER_ONLY)
664 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
665 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
666 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
667 cpu_save
, cpu_load
, env
);
671 /* Allocate a new translation block. Flush the translation buffer if
672 too many translation blocks or too much generated code. */
673 static TranslationBlock
*tb_alloc(target_ulong pc
)
675 TranslationBlock
*tb
;
677 if (nb_tbs
>= code_gen_max_blocks
||
678 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
686 void tb_free(TranslationBlock
*tb
)
688 /* In practice this is mostly used for single use temporary TB
689 Ignore the hard cases and just back up if this TB happens to
690 be the last one generated. */
691 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
692 code_gen_ptr
= tb
->tc_ptr
;
697 static inline void invalidate_page_bitmap(PageDesc
*p
)
699 if (p
->code_bitmap
) {
700 g_free(p
->code_bitmap
);
701 p
->code_bitmap
= NULL
;
703 p
->code_write_count
= 0;
706 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
708 static void page_flush_tb_1 (int level
, void **lp
)
717 for (i
= 0; i
< L2_SIZE
; ++i
) {
718 pd
[i
].first_tb
= NULL
;
719 invalidate_page_bitmap(pd
+ i
);
723 for (i
= 0; i
< L2_SIZE
; ++i
) {
724 page_flush_tb_1 (level
- 1, pp
+ i
);
729 static void page_flush_tb(void)
732 for (i
= 0; i
< V_L1_SIZE
; i
++) {
733 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
737 /* flush all the translation blocks */
738 /* XXX: tb_flush is currently not thread safe */
739 void tb_flush(CPUState
*env1
)
742 #if defined(DEBUG_FLUSH)
743 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
744 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
746 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
748 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
749 cpu_abort(env1
, "Internal error: code buffer overflow\n");
753 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
754 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
757 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
760 code_gen_ptr
= code_gen_buffer
;
761 /* XXX: flush processor icache at this point if cache flush is
766 #ifdef DEBUG_TB_CHECK
768 static void tb_invalidate_check(target_ulong address
)
770 TranslationBlock
*tb
;
772 address
&= TARGET_PAGE_MASK
;
773 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
774 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
775 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
776 address
>= tb
->pc
+ tb
->size
)) {
777 printf("ERROR invalidate: address=" TARGET_FMT_lx
778 " PC=%08lx size=%04x\n",
779 address
, (long)tb
->pc
, tb
->size
);
785 /* verify that all the pages have correct rights for code */
786 static void tb_page_check(void)
788 TranslationBlock
*tb
;
789 int i
, flags1
, flags2
;
791 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
792 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
793 flags1
= page_get_flags(tb
->pc
);
794 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
795 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
796 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
797 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
805 /* invalidate one TB */
806 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
809 TranslationBlock
*tb1
;
813 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
816 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
820 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
822 TranslationBlock
*tb1
;
828 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
830 *ptb
= tb1
->page_next
[n1
];
833 ptb
= &tb1
->page_next
[n1
];
837 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
839 TranslationBlock
*tb1
, **ptb
;
842 ptb
= &tb
->jmp_next
[n
];
845 /* find tb(n) in circular list */
849 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
850 if (n1
== n
&& tb1
== tb
)
853 ptb
= &tb1
->jmp_first
;
855 ptb
= &tb1
->jmp_next
[n1
];
858 /* now we can suppress tb(n) from the list */
859 *ptb
= tb
->jmp_next
[n
];
861 tb
->jmp_next
[n
] = NULL
;
865 /* reset the jump entry 'n' of a TB so that it is not chained to
867 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
869 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
872 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
877 tb_page_addr_t phys_pc
;
878 TranslationBlock
*tb1
, *tb2
;
880 /* remove the TB from the hash list */
881 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
882 h
= tb_phys_hash_func(phys_pc
);
883 tb_remove(&tb_phys_hash
[h
], tb
,
884 offsetof(TranslationBlock
, phys_hash_next
));
886 /* remove the TB from the page list */
887 if (tb
->page_addr
[0] != page_addr
) {
888 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
889 tb_page_remove(&p
->first_tb
, tb
);
890 invalidate_page_bitmap(p
);
892 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
893 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
894 tb_page_remove(&p
->first_tb
, tb
);
895 invalidate_page_bitmap(p
);
898 tb_invalidated_flag
= 1;
900 /* remove the TB from the hash list */
901 h
= tb_jmp_cache_hash_func(tb
->pc
);
902 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
903 if (env
->tb_jmp_cache
[h
] == tb
)
904 env
->tb_jmp_cache
[h
] = NULL
;
907 /* suppress this TB from the two jump lists */
908 tb_jmp_remove(tb
, 0);
909 tb_jmp_remove(tb
, 1);
911 /* suppress any remaining jumps to this TB */
917 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
918 tb2
= tb1
->jmp_next
[n1
];
919 tb_reset_jump(tb1
, n1
);
920 tb1
->jmp_next
[n1
] = NULL
;
923 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
925 tb_phys_invalidate_count
++;
928 static inline void set_bits(uint8_t *tab
, int start
, int len
)
934 mask
= 0xff << (start
& 7);
935 if ((start
& ~7) == (end
& ~7)) {
937 mask
&= ~(0xff << (end
& 7));
942 start
= (start
+ 8) & ~7;
944 while (start
< end1
) {
949 mask
= ~(0xff << (end
& 7));
955 static void build_page_bitmap(PageDesc
*p
)
957 int n
, tb_start
, tb_end
;
958 TranslationBlock
*tb
;
960 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
965 tb
= (TranslationBlock
*)((long)tb
& ~3);
966 /* NOTE: this is subtle as a TB may span two physical pages */
968 /* NOTE: tb_end may be after the end of the page, but
969 it is not a problem */
970 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
971 tb_end
= tb_start
+ tb
->size
;
972 if (tb_end
> TARGET_PAGE_SIZE
)
973 tb_end
= TARGET_PAGE_SIZE
;
976 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
978 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
979 tb
= tb
->page_next
[n
];
983 TranslationBlock
*tb_gen_code(CPUState
*env
,
984 target_ulong pc
, target_ulong cs_base
,
985 int flags
, int cflags
)
987 TranslationBlock
*tb
;
989 tb_page_addr_t phys_pc
, phys_page2
;
990 target_ulong virt_page2
;
993 phys_pc
= get_page_addr_code(env
, pc
);
996 /* flush must be done */
998 /* cannot fail at this point */
1000 /* Don't forget to invalidate previous TB info. */
1001 tb_invalidated_flag
= 1;
1003 tc_ptr
= code_gen_ptr
;
1004 tb
->tc_ptr
= tc_ptr
;
1005 tb
->cs_base
= cs_base
;
1007 tb
->cflags
= cflags
;
1008 cpu_gen_code(env
, tb
, &code_gen_size
);
1009 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1011 /* check next page if needed */
1012 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1014 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1015 phys_page2
= get_page_addr_code(env
, virt_page2
);
1017 tb_link_page(tb
, phys_pc
, phys_page2
);
1021 /* invalidate all TBs which intersect with the target physical page
1022 starting in range [start;end[. NOTE: start and end must refer to
1023 the same physical page. 'is_cpu_write_access' should be true if called
1024 from a real cpu write access: the virtual CPU will exit the current
1025 TB if code is modified inside this TB. */
1026 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1027 int is_cpu_write_access
)
1029 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1030 CPUState
*env
= cpu_single_env
;
1031 tb_page_addr_t tb_start
, tb_end
;
1034 #ifdef TARGET_HAS_PRECISE_SMC
1035 int current_tb_not_found
= is_cpu_write_access
;
1036 TranslationBlock
*current_tb
= NULL
;
1037 int current_tb_modified
= 0;
1038 target_ulong current_pc
= 0;
1039 target_ulong current_cs_base
= 0;
1040 int current_flags
= 0;
1041 #endif /* TARGET_HAS_PRECISE_SMC */
1043 p
= page_find(start
>> TARGET_PAGE_BITS
);
1046 if (!p
->code_bitmap
&&
1047 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1048 is_cpu_write_access
) {
1049 /* build code bitmap */
1050 build_page_bitmap(p
);
1053 /* we remove all the TBs in the range [start, end[ */
1054 /* XXX: see if in some cases it could be faster to invalidate all the code */
1056 while (tb
!= NULL
) {
1058 tb
= (TranslationBlock
*)((long)tb
& ~3);
1059 tb_next
= tb
->page_next
[n
];
1060 /* NOTE: this is subtle as a TB may span two physical pages */
1062 /* NOTE: tb_end may be after the end of the page, but
1063 it is not a problem */
1064 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1065 tb_end
= tb_start
+ tb
->size
;
1067 tb_start
= tb
->page_addr
[1];
1068 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1070 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1071 #ifdef TARGET_HAS_PRECISE_SMC
1072 if (current_tb_not_found
) {
1073 current_tb_not_found
= 0;
1075 if (env
->mem_io_pc
) {
1076 /* now we have a real cpu fault */
1077 current_tb
= tb_find_pc(env
->mem_io_pc
);
1080 if (current_tb
== tb
&&
1081 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1082 /* If we are modifying the current TB, we must stop
1083 its execution. We could be more precise by checking
1084 that the modification is after the current PC, but it
1085 would require a specialized function to partially
1086 restore the CPU state */
1088 current_tb_modified
= 1;
1089 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1090 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1093 #endif /* TARGET_HAS_PRECISE_SMC */
1094 /* we need to do that to handle the case where a signal
1095 occurs while doing tb_phys_invalidate() */
1098 saved_tb
= env
->current_tb
;
1099 env
->current_tb
= NULL
;
1101 tb_phys_invalidate(tb
, -1);
1103 env
->current_tb
= saved_tb
;
1104 if (env
->interrupt_request
&& env
->current_tb
)
1105 cpu_interrupt(env
, env
->interrupt_request
);
1110 #if !defined(CONFIG_USER_ONLY)
1111 /* if no code remaining, no need to continue to use slow writes */
1113 invalidate_page_bitmap(p
);
1114 if (is_cpu_write_access
) {
1115 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1119 #ifdef TARGET_HAS_PRECISE_SMC
1120 if (current_tb_modified
) {
1121 /* we generate a block containing just the instruction
1122 modifying the memory. It will ensure that it cannot modify
1124 env
->current_tb
= NULL
;
1125 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1126 cpu_resume_from_signal(env
, NULL
);
1131 /* len must be <= 8 and start must be a multiple of len */
1132 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1138 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1139 cpu_single_env
->mem_io_vaddr
, len
,
1140 cpu_single_env
->eip
,
1141 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1144 p
= page_find(start
>> TARGET_PAGE_BITS
);
1147 if (p
->code_bitmap
) {
1148 offset
= start
& ~TARGET_PAGE_MASK
;
1149 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1150 if (b
& ((1 << len
) - 1))
1154 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1158 #if !defined(CONFIG_SOFTMMU)
1159 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1160 unsigned long pc
, void *puc
)
1162 TranslationBlock
*tb
;
1165 #ifdef TARGET_HAS_PRECISE_SMC
1166 TranslationBlock
*current_tb
= NULL
;
1167 CPUState
*env
= cpu_single_env
;
1168 int current_tb_modified
= 0;
1169 target_ulong current_pc
= 0;
1170 target_ulong current_cs_base
= 0;
1171 int current_flags
= 0;
1174 addr
&= TARGET_PAGE_MASK
;
1175 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1179 #ifdef TARGET_HAS_PRECISE_SMC
1180 if (tb
&& pc
!= 0) {
1181 current_tb
= tb_find_pc(pc
);
1184 while (tb
!= NULL
) {
1186 tb
= (TranslationBlock
*)((long)tb
& ~3);
1187 #ifdef TARGET_HAS_PRECISE_SMC
1188 if (current_tb
== tb
&&
1189 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1190 /* If we are modifying the current TB, we must stop
1191 its execution. We could be more precise by checking
1192 that the modification is after the current PC, but it
1193 would require a specialized function to partially
1194 restore the CPU state */
1196 current_tb_modified
= 1;
1197 cpu_restore_state(current_tb
, env
, pc
);
1198 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1201 #endif /* TARGET_HAS_PRECISE_SMC */
1202 tb_phys_invalidate(tb
, addr
);
1203 tb
= tb
->page_next
[n
];
1206 #ifdef TARGET_HAS_PRECISE_SMC
1207 if (current_tb_modified
) {
1208 /* we generate a block containing just the instruction
1209 modifying the memory. It will ensure that it cannot modify
1211 env
->current_tb
= NULL
;
1212 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1213 cpu_resume_from_signal(env
, puc
);
1219 /* add the tb in the target page and protect it if necessary */
1220 static inline void tb_alloc_page(TranslationBlock
*tb
,
1221 unsigned int n
, tb_page_addr_t page_addr
)
1224 #ifndef CONFIG_USER_ONLY
1225 bool page_already_protected
;
1228 tb
->page_addr
[n
] = page_addr
;
1229 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1230 tb
->page_next
[n
] = p
->first_tb
;
1231 #ifndef CONFIG_USER_ONLY
1232 page_already_protected
= p
->first_tb
!= NULL
;
1234 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1235 invalidate_page_bitmap(p
);
1237 #if defined(TARGET_HAS_SMC) || 1
1239 #if defined(CONFIG_USER_ONLY)
1240 if (p
->flags
& PAGE_WRITE
) {
1245 /* force the host page as non writable (writes will have a
1246 page fault + mprotect overhead) */
1247 page_addr
&= qemu_host_page_mask
;
1249 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1250 addr
+= TARGET_PAGE_SIZE
) {
1252 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1256 p2
->flags
&= ~PAGE_WRITE
;
1258 mprotect(g2h(page_addr
), qemu_host_page_size
,
1259 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1260 #ifdef DEBUG_TB_INVALIDATE
1261 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1266 /* if some code is already present, then the pages are already
1267 protected. So we handle the case where only the first TB is
1268 allocated in a physical page */
1269 if (!page_already_protected
) {
1270 tlb_protect_code(page_addr
);
1274 #endif /* TARGET_HAS_SMC */
1277 /* add a new TB and link it to the physical page tables. phys_page2 is
1278 (-1) to indicate that only one page contains the TB. */
1279 void tb_link_page(TranslationBlock
*tb
,
1280 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1283 TranslationBlock
**ptb
;
1285 /* Grab the mmap lock to stop another thread invalidating this TB
1286 before we are done. */
1288 /* add in the physical hash table */
1289 h
= tb_phys_hash_func(phys_pc
);
1290 ptb
= &tb_phys_hash
[h
];
1291 tb
->phys_hash_next
= *ptb
;
1294 /* add in the page list */
1295 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1296 if (phys_page2
!= -1)
1297 tb_alloc_page(tb
, 1, phys_page2
);
1299 tb
->page_addr
[1] = -1;
1301 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1302 tb
->jmp_next
[0] = NULL
;
1303 tb
->jmp_next
[1] = NULL
;
1305 /* init original jump addresses */
1306 if (tb
->tb_next_offset
[0] != 0xffff)
1307 tb_reset_jump(tb
, 0);
1308 if (tb
->tb_next_offset
[1] != 0xffff)
1309 tb_reset_jump(tb
, 1);
1311 #ifdef DEBUG_TB_CHECK
1317 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1318 tb[1].tc_ptr. Return NULL if not found */
1319 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1321 int m_min
, m_max
, m
;
1323 TranslationBlock
*tb
;
1327 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1328 tc_ptr
>= (unsigned long)code_gen_ptr
)
1330 /* binary search (cf Knuth) */
1333 while (m_min
<= m_max
) {
1334 m
= (m_min
+ m_max
) >> 1;
1336 v
= (unsigned long)tb
->tc_ptr
;
1339 else if (tc_ptr
< v
) {
1348 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1350 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1352 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1355 tb1
= tb
->jmp_next
[n
];
1357 /* find head of list */
1360 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1363 tb1
= tb1
->jmp_next
[n1
];
1365 /* we are now sure now that tb jumps to tb1 */
1368 /* remove tb from the jmp_first list */
1369 ptb
= &tb_next
->jmp_first
;
1373 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1374 if (n1
== n
&& tb1
== tb
)
1376 ptb
= &tb1
->jmp_next
[n1
];
1378 *ptb
= tb
->jmp_next
[n
];
1379 tb
->jmp_next
[n
] = NULL
;
1381 /* suppress the jump to next tb in generated code */
1382 tb_reset_jump(tb
, n
);
1384 /* suppress jumps in the tb on which we could have jumped */
1385 tb_reset_jump_recursive(tb_next
);
1389 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1391 tb_reset_jump_recursive2(tb
, 0);
1392 tb_reset_jump_recursive2(tb
, 1);
1395 #if defined(TARGET_HAS_ICE)
1396 #if defined(CONFIG_USER_ONLY)
1397 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1399 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1402 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1404 target_phys_addr_t addr
;
1406 ram_addr_t ram_addr
;
1409 addr
= cpu_get_phys_page_debug(env
, pc
);
1410 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1412 pd
= IO_MEM_UNASSIGNED
;
1414 pd
= p
->phys_offset
;
1416 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1417 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1420 #endif /* TARGET_HAS_ICE */
1422 #if defined(CONFIG_USER_ONLY)
1423 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1428 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1429 int flags
, CPUWatchpoint
**watchpoint
)
1434 /* Add a watchpoint. */
1435 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1436 int flags
, CPUWatchpoint
**watchpoint
)
1438 target_ulong len_mask
= ~(len
- 1);
1441 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1442 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1443 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1444 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1447 wp
= g_malloc(sizeof(*wp
));
1450 wp
->len_mask
= len_mask
;
1453 /* keep all GDB-injected watchpoints in front */
1455 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1457 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1459 tlb_flush_page(env
, addr
);
1466 /* Remove a specific watchpoint. */
1467 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1470 target_ulong len_mask
= ~(len
- 1);
1473 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1474 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1475 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1476 cpu_watchpoint_remove_by_ref(env
, wp
);
1483 /* Remove a specific watchpoint by reference. */
1484 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1486 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1488 tlb_flush_page(env
, watchpoint
->vaddr
);
1493 /* Remove all matching watchpoints. */
1494 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1496 CPUWatchpoint
*wp
, *next
;
1498 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1499 if (wp
->flags
& mask
)
1500 cpu_watchpoint_remove_by_ref(env
, wp
);
1505 /* Add a breakpoint. */
1506 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1507 CPUBreakpoint
**breakpoint
)
1509 #if defined(TARGET_HAS_ICE)
1512 bp
= g_malloc(sizeof(*bp
));
1517 /* keep all GDB-injected breakpoints in front */
1519 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1521 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1523 breakpoint_invalidate(env
, pc
);
1533 /* Remove a specific breakpoint. */
1534 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1536 #if defined(TARGET_HAS_ICE)
1539 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1540 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1541 cpu_breakpoint_remove_by_ref(env
, bp
);
1551 /* Remove a specific breakpoint by reference. */
1552 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1554 #if defined(TARGET_HAS_ICE)
1555 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1557 breakpoint_invalidate(env
, breakpoint
->pc
);
1563 /* Remove all matching breakpoints. */
1564 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1566 #if defined(TARGET_HAS_ICE)
1567 CPUBreakpoint
*bp
, *next
;
1569 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1570 if (bp
->flags
& mask
)
1571 cpu_breakpoint_remove_by_ref(env
, bp
);
1576 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1577 CPU loop after each instruction */
1578 void cpu_single_step(CPUState
*env
, int enabled
)
1580 #if defined(TARGET_HAS_ICE)
1581 if (env
->singlestep_enabled
!= enabled
) {
1582 env
->singlestep_enabled
= enabled
;
1584 kvm_update_guest_debug(env
, 0);
1586 /* must flush all the translated code to avoid inconsistencies */
1587 /* XXX: only flush what is necessary */
1594 /* enable or disable low levels log */
1595 void cpu_set_log(int log_flags
)
1597 loglevel
= log_flags
;
1598 if (loglevel
&& !logfile
) {
1599 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1601 perror(logfilename
);
1604 #if !defined(CONFIG_SOFTMMU)
1605 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1607 static char logfile_buf
[4096];
1608 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1610 #elif !defined(_WIN32)
1611 /* Win32 doesn't support line-buffering and requires size >= 2 */
1612 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1616 if (!loglevel
&& logfile
) {
1622 void cpu_set_log_filename(const char *filename
)
1624 logfilename
= strdup(filename
);
1629 cpu_set_log(loglevel
);
1632 static void cpu_unlink_tb(CPUState
*env
)
1634 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1635 problem and hope the cpu will stop of its own accord. For userspace
1636 emulation this often isn't actually as bad as it sounds. Often
1637 signals are used primarily to interrupt blocking syscalls. */
1638 TranslationBlock
*tb
;
1639 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1641 spin_lock(&interrupt_lock
);
1642 tb
= env
->current_tb
;
1643 /* if the cpu is currently executing code, we must unlink it and
1644 all the potentially executing TB */
1646 env
->current_tb
= NULL
;
1647 tb_reset_jump_recursive(tb
);
1649 spin_unlock(&interrupt_lock
);
1652 #ifndef CONFIG_USER_ONLY
1653 /* mask must never be zero, except for A20 change call */
1654 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1658 old_mask
= env
->interrupt_request
;
1659 env
->interrupt_request
|= mask
;
1662 * If called from iothread context, wake the target cpu in
1665 if (!qemu_cpu_is_self(env
)) {
1671 env
->icount_decr
.u16
.high
= 0xffff;
1673 && (mask
& ~old_mask
) != 0) {
1674 cpu_abort(env
, "Raised interrupt while not in I/O function");
1681 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1683 #else /* CONFIG_USER_ONLY */
1685 void cpu_interrupt(CPUState
*env
, int mask
)
1687 env
->interrupt_request
|= mask
;
1690 #endif /* CONFIG_USER_ONLY */
1692 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1694 env
->interrupt_request
&= ~mask
;
1697 void cpu_exit(CPUState
*env
)
1699 env
->exit_request
= 1;
1703 const CPULogItem cpu_log_items
[] = {
1704 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1705 "show generated host assembly code for each compiled TB" },
1706 { CPU_LOG_TB_IN_ASM
, "in_asm",
1707 "show target assembly code for each compiled TB" },
1708 { CPU_LOG_TB_OP
, "op",
1709 "show micro ops for each compiled TB" },
1710 { CPU_LOG_TB_OP_OPT
, "op_opt",
1713 "before eflags optimization and "
1715 "after liveness analysis" },
1716 { CPU_LOG_INT
, "int",
1717 "show interrupts/exceptions in short format" },
1718 { CPU_LOG_EXEC
, "exec",
1719 "show trace before each executed TB (lots of logs)" },
1720 { CPU_LOG_TB_CPU
, "cpu",
1721 "show CPU state before block translation" },
1723 { CPU_LOG_PCALL
, "pcall",
1724 "show protected mode far calls/returns/exceptions" },
1725 { CPU_LOG_RESET
, "cpu_reset",
1726 "show CPU state before CPU resets" },
1729 { CPU_LOG_IOPORT
, "ioport",
1730 "show all i/o ports accesses" },
1735 #ifndef CONFIG_USER_ONLY
1736 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1737 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1739 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1741 ram_addr_t phys_offset
,
1744 CPUPhysMemoryClient
*client
;
1745 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1746 client
->set_memory(client
, start_addr
, size
, phys_offset
, log_dirty
);
1750 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1751 target_phys_addr_t end
)
1753 CPUPhysMemoryClient
*client
;
1754 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1755 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1762 static int cpu_notify_migration_log(int enable
)
1764 CPUPhysMemoryClient
*client
;
1765 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1766 int r
= client
->migration_log(client
, enable
);
1774 target_phys_addr_t start_addr
;
1776 ram_addr_t phys_offset
;
1779 /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1780 * address. Each intermediate table provides the next L2_BITs of guest
1781 * physical address space. The number of levels vary based on host and
1782 * guest configuration, making it efficient to build the final guest
1783 * physical address by seeding the L1 offset and shifting and adding in
1784 * each L2 offset as we recurse through them. */
1785 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
, int level
,
1786 void **lp
, target_phys_addr_t addr
,
1787 struct last_map
*map
)
1795 PhysPageDesc
*pd
= *lp
;
1796 addr
<<= L2_BITS
+ TARGET_PAGE_BITS
;
1797 for (i
= 0; i
< L2_SIZE
; ++i
) {
1798 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1799 target_phys_addr_t start_addr
= addr
| i
<< TARGET_PAGE_BITS
;
1802 start_addr
== map
->start_addr
+ map
->size
&&
1803 pd
[i
].phys_offset
== map
->phys_offset
+ map
->size
) {
1805 map
->size
+= TARGET_PAGE_SIZE
;
1807 } else if (map
->size
) {
1808 client
->set_memory(client
, map
->start_addr
,
1809 map
->size
, map
->phys_offset
, false);
1812 map
->start_addr
= start_addr
;
1813 map
->size
= TARGET_PAGE_SIZE
;
1814 map
->phys_offset
= pd
[i
].phys_offset
;
1819 for (i
= 0; i
< L2_SIZE
; ++i
) {
1820 phys_page_for_each_1(client
, level
- 1, pp
+ i
,
1821 (addr
<< L2_BITS
) | i
, map
);
1826 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1829 struct last_map map
= { };
1831 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1832 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1833 l1_phys_map
+ i
, i
, &map
);
1836 client
->set_memory(client
, map
.start_addr
, map
.size
, map
.phys_offset
,
1841 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1843 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1844 phys_page_for_each(client
);
1847 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1849 QLIST_REMOVE(client
, list
);
1853 static int cmp1(const char *s1
, int n
, const char *s2
)
1855 if (strlen(s2
) != n
)
1857 return memcmp(s1
, s2
, n
) == 0;
1860 /* takes a comma separated list of log masks. Return 0 if error. */
1861 int cpu_str_to_log_mask(const char *str
)
1863 const CPULogItem
*item
;
1870 p1
= strchr(p
, ',');
1873 if(cmp1(p
,p1
-p
,"all")) {
1874 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1878 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1879 if (cmp1(p
, p1
- p
, item
->name
))
1893 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1900 fprintf(stderr
, "qemu: fatal: ");
1901 vfprintf(stderr
, fmt
, ap
);
1902 fprintf(stderr
, "\n");
1904 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1906 cpu_dump_state(env
, stderr
, fprintf
, 0);
1908 if (qemu_log_enabled()) {
1909 qemu_log("qemu: fatal: ");
1910 qemu_log_vprintf(fmt
, ap2
);
1913 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1915 log_cpu_state(env
, 0);
1922 #if defined(CONFIG_USER_ONLY)
1924 struct sigaction act
;
1925 sigfillset(&act
.sa_mask
);
1926 act
.sa_handler
= SIG_DFL
;
1927 sigaction(SIGABRT
, &act
, NULL
);
1933 CPUState
*cpu_copy(CPUState
*env
)
1935 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1936 CPUState
*next_cpu
= new_env
->next_cpu
;
1937 int cpu_index
= new_env
->cpu_index
;
1938 #if defined(TARGET_HAS_ICE)
1943 memcpy(new_env
, env
, sizeof(CPUState
));
1945 /* Preserve chaining and index. */
1946 new_env
->next_cpu
= next_cpu
;
1947 new_env
->cpu_index
= cpu_index
;
1949 /* Clone all break/watchpoints.
1950 Note: Once we support ptrace with hw-debug register access, make sure
1951 BP_CPU break/watchpoints are handled correctly on clone. */
1952 QTAILQ_INIT(&env
->breakpoints
);
1953 QTAILQ_INIT(&env
->watchpoints
);
1954 #if defined(TARGET_HAS_ICE)
1955 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1956 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1958 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1959 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1967 #if !defined(CONFIG_USER_ONLY)
1969 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1973 /* Discard jump cache entries for any tb which might potentially
1974 overlap the flushed page. */
1975 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1976 memset (&env
->tb_jmp_cache
[i
], 0,
1977 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1979 i
= tb_jmp_cache_hash_page(addr
);
1980 memset (&env
->tb_jmp_cache
[i
], 0,
1981 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1984 static CPUTLBEntry s_cputlb_empty_entry
= {
1991 /* NOTE: if flush_global is true, also flush global entries (not
1993 void tlb_flush(CPUState
*env
, int flush_global
)
1997 #if defined(DEBUG_TLB)
1998 printf("tlb_flush:\n");
2000 /* must reset current TB so that interrupts cannot modify the
2001 links while we are modifying them */
2002 env
->current_tb
= NULL
;
2004 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
2006 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2007 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
2011 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
2013 env
->tlb_flush_addr
= -1;
2014 env
->tlb_flush_mask
= 0;
2018 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
2020 if (addr
== (tlb_entry
->addr_read
&
2021 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
2022 addr
== (tlb_entry
->addr_write
&
2023 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
2024 addr
== (tlb_entry
->addr_code
&
2025 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
2026 *tlb_entry
= s_cputlb_empty_entry
;
2030 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2035 #if defined(DEBUG_TLB)
2036 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
2038 /* Check if we need to flush due to large pages. */
2039 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
2040 #if defined(DEBUG_TLB)
2041 printf("tlb_flush_page: forced full flush ("
2042 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
2043 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
2048 /* must reset current TB so that interrupts cannot modify the
2049 links while we are modifying them */
2050 env
->current_tb
= NULL
;
2052 addr
&= TARGET_PAGE_MASK
;
2053 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2054 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2055 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2057 tlb_flush_jmp_cache(env
, addr
);
2060 /* update the TLBs so that writes to code in the virtual page 'addr'
2062 static void tlb_protect_code(ram_addr_t ram_addr
)
2064 cpu_physical_memory_reset_dirty(ram_addr
,
2065 ram_addr
+ TARGET_PAGE_SIZE
,
2069 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2070 tested for self modifying code */
2071 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2074 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2077 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2078 unsigned long start
, unsigned long length
)
2081 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2082 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2083 if ((addr
- start
) < length
) {
2084 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2089 /* Note: start and end must be within the same ram block. */
2090 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2094 unsigned long length
, start1
;
2097 start
&= TARGET_PAGE_MASK
;
2098 end
= TARGET_PAGE_ALIGN(end
);
2100 length
= end
- start
;
2103 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2105 /* we modify the TLB cache so that the dirty bit will be set again
2106 when accessing the range */
2107 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2108 /* Check that we don't span multiple blocks - this breaks the
2109 address comparisons below. */
2110 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2111 != (end
- 1) - start
) {
2115 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2117 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2118 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2119 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2125 int cpu_physical_memory_set_dirty_tracking(int enable
)
2128 in_migration
= enable
;
2129 ret
= cpu_notify_migration_log(!!enable
);
2133 int cpu_physical_memory_get_dirty_tracking(void)
2135 return in_migration
;
2138 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2139 target_phys_addr_t end_addr
)
2143 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2147 int cpu_physical_log_start(target_phys_addr_t start_addr
,
2150 CPUPhysMemoryClient
*client
;
2151 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2152 if (client
->log_start
) {
2153 int r
= client
->log_start(client
, start_addr
, size
);
2162 int cpu_physical_log_stop(target_phys_addr_t start_addr
,
2165 CPUPhysMemoryClient
*client
;
2166 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2167 if (client
->log_stop
) {
2168 int r
= client
->log_stop(client
, start_addr
, size
);
2177 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2179 ram_addr_t ram_addr
;
2182 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2183 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2184 + tlb_entry
->addend
);
2185 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2186 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2187 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2192 /* update the TLB according to the current state of the dirty bits */
2193 void cpu_tlb_update_dirty(CPUState
*env
)
2197 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2198 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2199 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2203 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2205 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2206 tlb_entry
->addr_write
= vaddr
;
2209 /* update the TLB corresponding to virtual page vaddr
2210 so that it is no longer dirty */
2211 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2216 vaddr
&= TARGET_PAGE_MASK
;
2217 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2218 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2219 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2222 /* Our TLB does not support large pages, so remember the area covered by
2223 large pages and trigger a full TLB flush if these are invalidated. */
2224 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2227 target_ulong mask
= ~(size
- 1);
2229 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2230 env
->tlb_flush_addr
= vaddr
& mask
;
2231 env
->tlb_flush_mask
= mask
;
2234 /* Extend the existing region to include the new page.
2235 This is a compromise between unnecessary flushes and the cost
2236 of maintaining a full variable size TLB. */
2237 mask
&= env
->tlb_flush_mask
;
2238 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2241 env
->tlb_flush_addr
&= mask
;
2242 env
->tlb_flush_mask
= mask
;
2245 /* Add a new TLB entry. At most one entry for a given virtual address
2246 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2247 supplied size is only used by tlb_flush_page. */
2248 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2249 target_phys_addr_t paddr
, int prot
,
2250 int mmu_idx
, target_ulong size
)
2255 target_ulong address
;
2256 target_ulong code_address
;
2257 unsigned long addend
;
2260 target_phys_addr_t iotlb
;
2262 assert(size
>= TARGET_PAGE_SIZE
);
2263 if (size
!= TARGET_PAGE_SIZE
) {
2264 tlb_add_large_page(env
, vaddr
, size
);
2266 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2268 pd
= IO_MEM_UNASSIGNED
;
2270 pd
= p
->phys_offset
;
2272 #if defined(DEBUG_TLB)
2273 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2274 " prot=%x idx=%d pd=0x%08lx\n",
2275 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2279 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2280 /* IO memory case (romd handled later) */
2281 address
|= TLB_MMIO
;
2283 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2284 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2286 iotlb
= pd
& TARGET_PAGE_MASK
;
2287 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2288 iotlb
|= IO_MEM_NOTDIRTY
;
2290 iotlb
|= IO_MEM_ROM
;
2292 /* IO handlers are currently passed a physical address.
2293 It would be nice to pass an offset from the base address
2294 of that region. This would avoid having to special case RAM,
2295 and avoid full address decoding in every device.
2296 We can't use the high bits of pd for this because
2297 IO_MEM_ROMD uses these as a ram address. */
2298 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2300 iotlb
+= p
->region_offset
;
2306 code_address
= address
;
2307 /* Make accesses to pages with watchpoints go via the
2308 watchpoint trap routines. */
2309 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2310 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2311 /* Avoid trapping reads of pages with a write breakpoint. */
2312 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2313 iotlb
= io_mem_watch
+ paddr
;
2314 address
|= TLB_MMIO
;
2320 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2321 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2322 te
= &env
->tlb_table
[mmu_idx
][index
];
2323 te
->addend
= addend
- vaddr
;
2324 if (prot
& PAGE_READ
) {
2325 te
->addr_read
= address
;
2330 if (prot
& PAGE_EXEC
) {
2331 te
->addr_code
= code_address
;
2335 if (prot
& PAGE_WRITE
) {
2336 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2337 (pd
& IO_MEM_ROMD
)) {
2338 /* Write access calls the I/O callback. */
2339 te
->addr_write
= address
| TLB_MMIO
;
2340 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2341 !cpu_physical_memory_is_dirty(pd
)) {
2342 te
->addr_write
= address
| TLB_NOTDIRTY
;
2344 te
->addr_write
= address
;
2347 te
->addr_write
= -1;
2353 void tlb_flush(CPUState
*env
, int flush_global
)
2357 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2362 * Walks guest process memory "regions" one by one
2363 * and calls callback function 'fn' for each region.
2366 struct walk_memory_regions_data
2368 walk_memory_regions_fn fn
;
2370 unsigned long start
;
2374 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2375 abi_ulong end
, int new_prot
)
2377 if (data
->start
!= -1ul) {
2378 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2384 data
->start
= (new_prot
? end
: -1ul);
2385 data
->prot
= new_prot
;
2390 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2391 abi_ulong base
, int level
, void **lp
)
2397 return walk_memory_regions_end(data
, base
, 0);
2402 for (i
= 0; i
< L2_SIZE
; ++i
) {
2403 int prot
= pd
[i
].flags
;
2405 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2406 if (prot
!= data
->prot
) {
2407 rc
= walk_memory_regions_end(data
, pa
, prot
);
2415 for (i
= 0; i
< L2_SIZE
; ++i
) {
2416 pa
= base
| ((abi_ulong
)i
<<
2417 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2418 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2428 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2430 struct walk_memory_regions_data data
;
2438 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2439 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2440 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2446 return walk_memory_regions_end(&data
, 0, 0);
2449 static int dump_region(void *priv
, abi_ulong start
,
2450 abi_ulong end
, unsigned long prot
)
2452 FILE *f
= (FILE *)priv
;
2454 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2455 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2456 start
, end
, end
- start
,
2457 ((prot
& PAGE_READ
) ? 'r' : '-'),
2458 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2459 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2464 /* dump memory mappings */
2465 void page_dump(FILE *f
)
2467 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2468 "start", "end", "size", "prot");
2469 walk_memory_regions(f
, dump_region
);
2472 int page_get_flags(target_ulong address
)
2476 p
= page_find(address
>> TARGET_PAGE_BITS
);
2482 /* Modify the flags of a page and invalidate the code if necessary.
2483 The flag PAGE_WRITE_ORG is positioned automatically depending
2484 on PAGE_WRITE. The mmap_lock should already be held. */
2485 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2487 target_ulong addr
, len
;
2489 /* This function should never be called with addresses outside the
2490 guest address space. If this assert fires, it probably indicates
2491 a missing call to h2g_valid. */
2492 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2493 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2495 assert(start
< end
);
2497 start
= start
& TARGET_PAGE_MASK
;
2498 end
= TARGET_PAGE_ALIGN(end
);
2500 if (flags
& PAGE_WRITE
) {
2501 flags
|= PAGE_WRITE_ORG
;
2504 for (addr
= start
, len
= end
- start
;
2506 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2507 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2509 /* If the write protection bit is set, then we invalidate
2511 if (!(p
->flags
& PAGE_WRITE
) &&
2512 (flags
& PAGE_WRITE
) &&
2514 tb_invalidate_phys_page(addr
, 0, NULL
);
2520 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2526 /* This function should never be called with addresses outside the
2527 guest address space. If this assert fires, it probably indicates
2528 a missing call to h2g_valid. */
2529 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2530 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2536 if (start
+ len
- 1 < start
) {
2537 /* We've wrapped around. */
2541 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2542 start
= start
& TARGET_PAGE_MASK
;
2544 for (addr
= start
, len
= end
- start
;
2546 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2547 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2550 if( !(p
->flags
& PAGE_VALID
) )
2553 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2555 if (flags
& PAGE_WRITE
) {
2556 if (!(p
->flags
& PAGE_WRITE_ORG
))
2558 /* unprotect the page if it was put read-only because it
2559 contains translated code */
2560 if (!(p
->flags
& PAGE_WRITE
)) {
2561 if (!page_unprotect(addr
, 0, NULL
))
2570 /* called from signal handler: invalidate the code and unprotect the
2571 page. Return TRUE if the fault was successfully handled. */
2572 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2576 target_ulong host_start
, host_end
, addr
;
2578 /* Technically this isn't safe inside a signal handler. However we
2579 know this only ever happens in a synchronous SEGV handler, so in
2580 practice it seems to be ok. */
2583 p
= page_find(address
>> TARGET_PAGE_BITS
);
2589 /* if the page was really writable, then we change its
2590 protection back to writable */
2591 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2592 host_start
= address
& qemu_host_page_mask
;
2593 host_end
= host_start
+ qemu_host_page_size
;
2596 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2597 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2598 p
->flags
|= PAGE_WRITE
;
2601 /* and since the content will be modified, we must invalidate
2602 the corresponding translated code. */
2603 tb_invalidate_phys_page(addr
, pc
, puc
);
2604 #ifdef DEBUG_TB_CHECK
2605 tb_invalidate_check(addr
);
2608 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2618 static inline void tlb_set_dirty(CPUState
*env
,
2619 unsigned long addr
, target_ulong vaddr
)
2622 #endif /* defined(CONFIG_USER_ONLY) */
2624 #if !defined(CONFIG_USER_ONLY)
2626 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2627 typedef struct subpage_t
{
2628 target_phys_addr_t base
;
2629 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2630 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2633 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2634 ram_addr_t memory
, ram_addr_t region_offset
);
2635 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2636 ram_addr_t orig_memory
,
2637 ram_addr_t region_offset
);
2638 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2641 if (addr > start_addr) \
2644 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2645 if (start_addr2 > 0) \
2649 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2650 end_addr2 = TARGET_PAGE_SIZE - 1; \
2652 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2653 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2658 /* register physical memory.
2659 For RAM, 'size' must be a multiple of the target page size.
2660 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2661 io memory page. The address used when calling the IO function is
2662 the offset from the start of the region, plus region_offset. Both
2663 start_addr and region_offset are rounded down to a page boundary
2664 before calculating this offset. This should not be a problem unless
2665 the low bits of start_addr and region_offset differ. */
2666 void cpu_register_physical_memory_log(target_phys_addr_t start_addr
,
2668 ram_addr_t phys_offset
,
2669 ram_addr_t region_offset
,
2672 target_phys_addr_t addr
, end_addr
;
2675 ram_addr_t orig_size
= size
;
2679 cpu_notify_set_memory(start_addr
, size
, phys_offset
, log_dirty
);
2681 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2682 region_offset
= start_addr
;
2684 region_offset
&= TARGET_PAGE_MASK
;
2685 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2686 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2690 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2691 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2692 ram_addr_t orig_memory
= p
->phys_offset
;
2693 target_phys_addr_t start_addr2
, end_addr2
;
2694 int need_subpage
= 0;
2696 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2699 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2700 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2701 &p
->phys_offset
, orig_memory
,
2704 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2707 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2709 p
->region_offset
= 0;
2711 p
->phys_offset
= phys_offset
;
2712 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2713 (phys_offset
& IO_MEM_ROMD
))
2714 phys_offset
+= TARGET_PAGE_SIZE
;
2717 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2718 p
->phys_offset
= phys_offset
;
2719 p
->region_offset
= region_offset
;
2720 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2721 (phys_offset
& IO_MEM_ROMD
)) {
2722 phys_offset
+= TARGET_PAGE_SIZE
;
2724 target_phys_addr_t start_addr2
, end_addr2
;
2725 int need_subpage
= 0;
2727 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2728 end_addr2
, need_subpage
);
2731 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2732 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2733 addr
& TARGET_PAGE_MASK
);
2734 subpage_register(subpage
, start_addr2
, end_addr2
,
2735 phys_offset
, region_offset
);
2736 p
->region_offset
= 0;
2740 region_offset
+= TARGET_PAGE_SIZE
;
2741 addr
+= TARGET_PAGE_SIZE
;
2742 } while (addr
!= end_addr
);
2744 /* since each CPU stores ram addresses in its TLB cache, we must
2745 reset the modified entries */
2747 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2752 /* XXX: temporary until new memory mapping API */
2753 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2757 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2759 return IO_MEM_UNASSIGNED
;
2760 return p
->phys_offset
;
2763 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2766 kvm_coalesce_mmio_region(addr
, size
);
2769 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2772 kvm_uncoalesce_mmio_region(addr
, size
);
2775 void qemu_flush_coalesced_mmio_buffer(void)
2778 kvm_flush_coalesced_mmio_buffer();
2781 #if defined(__linux__) && !defined(TARGET_S390X)
2783 #include <sys/vfs.h>
2785 #define HUGETLBFS_MAGIC 0x958458f6
2787 static long gethugepagesize(const char *path
)
2793 ret
= statfs(path
, &fs
);
2794 } while (ret
!= 0 && errno
== EINTR
);
2801 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2802 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2807 static void *file_ram_alloc(RAMBlock
*block
,
2817 unsigned long hpagesize
;
2819 hpagesize
= gethugepagesize(path
);
2824 if (memory
< hpagesize
) {
2828 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2829 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2833 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2837 fd
= mkstemp(filename
);
2839 perror("unable to create backing store for hugepages");
2846 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2849 * ftruncate is not supported by hugetlbfs in older
2850 * hosts, so don't bother bailing out on errors.
2851 * If anything goes wrong with it under other filesystems,
2854 if (ftruncate(fd
, memory
))
2855 perror("ftruncate");
2858 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2859 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2860 * to sidestep this quirk.
2862 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2863 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2865 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2867 if (area
== MAP_FAILED
) {
2868 perror("file_ram_alloc: can't mmap RAM pages");
2877 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2879 RAMBlock
*block
, *next_block
;
2880 ram_addr_t offset
= 0, mingap
= RAM_ADDR_MAX
;
2882 if (QLIST_EMPTY(&ram_list
.blocks
))
2885 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2886 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2888 end
= block
->offset
+ block
->length
;
2890 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2891 if (next_block
->offset
>= end
) {
2892 next
= MIN(next
, next_block
->offset
);
2895 if (next
- end
>= size
&& next
- end
< mingap
) {
2897 mingap
= next
- end
;
2903 static ram_addr_t
last_ram_offset(void)
2906 ram_addr_t last
= 0;
2908 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2909 last
= MAX(last
, block
->offset
+ block
->length
);
2914 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
2915 ram_addr_t size
, void *host
)
2917 RAMBlock
*new_block
, *block
;
2919 size
= TARGET_PAGE_ALIGN(size
);
2920 new_block
= g_malloc0(sizeof(*new_block
));
2922 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2923 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2925 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2929 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2931 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2932 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2933 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2939 new_block
->offset
= find_ram_offset(size
);
2941 new_block
->host
= host
;
2942 new_block
->flags
|= RAM_PREALLOC_MASK
;
2945 #if defined (__linux__) && !defined(TARGET_S390X)
2946 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2947 if (!new_block
->host
) {
2948 new_block
->host
= qemu_vmalloc(size
);
2949 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2952 fprintf(stderr
, "-mem-path option unsupported\n");
2956 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2957 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2958 an system defined value, which is at least 256GB. Larger systems
2959 have larger values. We put the guest between the end of data
2960 segment (system break) and this value. We use 32GB as a base to
2961 have enough room for the system break to grow. */
2962 new_block
->host
= mmap((void*)0x800000000, size
,
2963 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2964 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2965 if (new_block
->host
== MAP_FAILED
) {
2966 fprintf(stderr
, "Allocating RAM failed\n");
2970 if (xen_enabled()) {
2971 xen_ram_alloc(new_block
->offset
, size
);
2973 new_block
->host
= qemu_vmalloc(size
);
2976 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2979 new_block
->length
= size
;
2981 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2983 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2984 last_ram_offset() >> TARGET_PAGE_BITS
);
2985 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2986 0xff, size
>> TARGET_PAGE_BITS
);
2989 kvm_setup_guest_memory(new_block
->host
, size
);
2991 return new_block
->offset
;
2994 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
)
2996 return qemu_ram_alloc_from_ptr(dev
, name
, size
, NULL
);
2999 void qemu_ram_free_from_ptr(ram_addr_t addr
)
3003 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3004 if (addr
== block
->offset
) {
3005 QLIST_REMOVE(block
, next
);
3012 void qemu_ram_free(ram_addr_t addr
)
3016 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3017 if (addr
== block
->offset
) {
3018 QLIST_REMOVE(block
, next
);
3019 if (block
->flags
& RAM_PREALLOC_MASK
) {
3021 } else if (mem_path
) {
3022 #if defined (__linux__) && !defined(TARGET_S390X)
3024 munmap(block
->host
, block
->length
);
3027 qemu_vfree(block
->host
);
3033 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3034 munmap(block
->host
, block
->length
);
3036 if (xen_enabled()) {
3037 xen_invalidate_map_cache_entry(block
->host
);
3039 qemu_vfree(block
->host
);
3051 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
3058 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3059 offset
= addr
- block
->offset
;
3060 if (offset
< block
->length
) {
3061 vaddr
= block
->host
+ offset
;
3062 if (block
->flags
& RAM_PREALLOC_MASK
) {
3066 munmap(vaddr
, length
);
3068 #if defined(__linux__) && !defined(TARGET_S390X)
3071 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
3074 flags
|= MAP_PRIVATE
;
3076 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3077 flags
, block
->fd
, offset
);
3079 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3080 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3087 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3088 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
3089 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
3092 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3093 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3097 if (area
!= vaddr
) {
3098 fprintf(stderr
, "Could not remap addr: "
3099 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
3103 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3109 #endif /* !_WIN32 */
3111 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3112 With the exception of the softmmu code in this file, this should
3113 only be used for local memory (e.g. video ram) that the device owns,
3114 and knows it isn't going to access beyond the end of the block.
3116 It should not be used for general purpose DMA.
3117 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3119 void *qemu_get_ram_ptr(ram_addr_t addr
)
3123 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3124 if (addr
- block
->offset
< block
->length
) {
3125 /* Move this entry to to start of the list. */
3126 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3127 QLIST_REMOVE(block
, next
);
3128 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3130 if (xen_enabled()) {
3131 /* We need to check if the requested address is in the RAM
3132 * because we don't want to map the entire memory in QEMU.
3133 * In that case just map until the end of the page.
3135 if (block
->offset
== 0) {
3136 return xen_map_cache(addr
, 0, 0);
3137 } else if (block
->host
== NULL
) {
3139 xen_map_cache(block
->offset
, block
->length
, 1);
3142 return block
->host
+ (addr
- block
->offset
);
3146 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3152 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3153 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3155 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3159 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3160 if (addr
- block
->offset
< block
->length
) {
3161 if (xen_enabled()) {
3162 /* We need to check if the requested address is in the RAM
3163 * because we don't want to map the entire memory in QEMU.
3164 * In that case just map until the end of the page.
3166 if (block
->offset
== 0) {
3167 return xen_map_cache(addr
, 0, 0);
3168 } else if (block
->host
== NULL
) {
3170 xen_map_cache(block
->offset
, block
->length
, 1);
3173 return block
->host
+ (addr
- block
->offset
);
3177 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3183 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3184 * but takes a size argument */
3185 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3190 if (xen_enabled()) {
3191 return xen_map_cache(addr
, *size
, 1);
3195 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3196 if (addr
- block
->offset
< block
->length
) {
3197 if (addr
- block
->offset
+ *size
> block
->length
)
3198 *size
= block
->length
- addr
+ block
->offset
;
3199 return block
->host
+ (addr
- block
->offset
);
3203 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3208 void qemu_put_ram_ptr(void *addr
)
3210 trace_qemu_put_ram_ptr(addr
);
3213 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3216 uint8_t *host
= ptr
;
3218 if (xen_enabled()) {
3219 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3223 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3224 /* This case append when the block is not mapped. */
3225 if (block
->host
== NULL
) {
3228 if (host
- block
->host
< block
->length
) {
3229 *ram_addr
= block
->offset
+ (host
- block
->host
);
3237 /* Some of the softmmu routines need to translate from a host pointer
3238 (typically a TLB entry) back to a ram offset. */
3239 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3241 ram_addr_t ram_addr
;
3243 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3244 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3250 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3252 #ifdef DEBUG_UNASSIGNED
3253 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3255 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3256 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 1);
3261 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3263 #ifdef DEBUG_UNASSIGNED
3264 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3266 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3267 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 2);
3272 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3274 #ifdef DEBUG_UNASSIGNED
3275 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3277 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3278 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 4);
3283 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3285 #ifdef DEBUG_UNASSIGNED
3286 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3288 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3289 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 1);
3293 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3295 #ifdef DEBUG_UNASSIGNED
3296 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3298 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3299 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 2);
3303 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3305 #ifdef DEBUG_UNASSIGNED
3306 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3308 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3309 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 4);
3313 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3314 unassigned_mem_readb
,
3315 unassigned_mem_readw
,
3316 unassigned_mem_readl
,
3319 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3320 unassigned_mem_writeb
,
3321 unassigned_mem_writew
,
3322 unassigned_mem_writel
,
3325 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3329 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3330 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3331 #if !defined(CONFIG_USER_ONLY)
3332 tb_invalidate_phys_page_fast(ram_addr
, 1);
3333 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3336 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3337 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3338 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3339 /* we remove the notdirty callback only if the code has been
3341 if (dirty_flags
== 0xff)
3342 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3345 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3349 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3350 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3351 #if !defined(CONFIG_USER_ONLY)
3352 tb_invalidate_phys_page_fast(ram_addr
, 2);
3353 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3356 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3357 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3358 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3359 /* we remove the notdirty callback only if the code has been
3361 if (dirty_flags
== 0xff)
3362 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3365 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3369 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3370 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3371 #if !defined(CONFIG_USER_ONLY)
3372 tb_invalidate_phys_page_fast(ram_addr
, 4);
3373 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3376 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3377 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3378 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3379 /* we remove the notdirty callback only if the code has been
3381 if (dirty_flags
== 0xff)
3382 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3385 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3386 NULL
, /* never used */
3387 NULL
, /* never used */
3388 NULL
, /* never used */
3391 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3392 notdirty_mem_writeb
,
3393 notdirty_mem_writew
,
3394 notdirty_mem_writel
,
3397 /* Generate a debug exception if a watchpoint has been hit. */
3398 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3400 CPUState
*env
= cpu_single_env
;
3401 target_ulong pc
, cs_base
;
3402 TranslationBlock
*tb
;
3407 if (env
->watchpoint_hit
) {
3408 /* We re-entered the check after replacing the TB. Now raise
3409 * the debug interrupt so that is will trigger after the
3410 * current instruction. */
3411 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3414 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3415 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3416 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3417 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3418 wp
->flags
|= BP_WATCHPOINT_HIT
;
3419 if (!env
->watchpoint_hit
) {
3420 env
->watchpoint_hit
= wp
;
3421 tb
= tb_find_pc(env
->mem_io_pc
);
3423 cpu_abort(env
, "check_watchpoint: could not find TB for "
3424 "pc=%p", (void *)env
->mem_io_pc
);
3426 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3427 tb_phys_invalidate(tb
, -1);
3428 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3429 env
->exception_index
= EXCP_DEBUG
;
3431 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3432 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3434 cpu_resume_from_signal(env
, NULL
);
3437 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3442 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3443 so these check for a hit then pass through to the normal out-of-line
3445 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3447 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3448 return ldub_phys(addr
);
3451 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3453 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3454 return lduw_phys(addr
);
3457 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3459 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3460 return ldl_phys(addr
);
3463 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3466 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3467 stb_phys(addr
, val
);
3470 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3473 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3474 stw_phys(addr
, val
);
3477 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3480 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3481 stl_phys(addr
, val
);
3484 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3490 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3496 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3497 target_phys_addr_t addr
,
3500 unsigned int idx
= SUBPAGE_IDX(addr
);
3501 #if defined(DEBUG_SUBPAGE)
3502 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3503 mmio
, len
, addr
, idx
);
3506 addr
+= mmio
->region_offset
[idx
];
3507 idx
= mmio
->sub_io_index
[idx
];
3508 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3511 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3512 uint32_t value
, unsigned int len
)
3514 unsigned int idx
= SUBPAGE_IDX(addr
);
3515 #if defined(DEBUG_SUBPAGE)
3516 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3517 __func__
, mmio
, len
, addr
, idx
, value
);
3520 addr
+= mmio
->region_offset
[idx
];
3521 idx
= mmio
->sub_io_index
[idx
];
3522 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3525 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3527 return subpage_readlen(opaque
, addr
, 0);
3530 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3533 subpage_writelen(opaque
, addr
, value
, 0);
3536 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3538 return subpage_readlen(opaque
, addr
, 1);
3541 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3544 subpage_writelen(opaque
, addr
, value
, 1);
3547 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3549 return subpage_readlen(opaque
, addr
, 2);
3552 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3555 subpage_writelen(opaque
, addr
, value
, 2);
3558 static CPUReadMemoryFunc
* const subpage_read
[] = {
3564 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3570 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3571 ram_addr_t memory
, ram_addr_t region_offset
)
3575 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3577 idx
= SUBPAGE_IDX(start
);
3578 eidx
= SUBPAGE_IDX(end
);
3579 #if defined(DEBUG_SUBPAGE)
3580 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3581 mmio
, start
, end
, idx
, eidx
, memory
);
3583 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
3584 memory
= IO_MEM_UNASSIGNED
;
3585 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3586 for (; idx
<= eidx
; idx
++) {
3587 mmio
->sub_io_index
[idx
] = memory
;
3588 mmio
->region_offset
[idx
] = region_offset
;
3594 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3595 ram_addr_t orig_memory
,
3596 ram_addr_t region_offset
)
3601 mmio
= g_malloc0(sizeof(subpage_t
));
3604 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
,
3605 DEVICE_NATIVE_ENDIAN
);
3606 #if defined(DEBUG_SUBPAGE)
3607 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3608 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3610 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3611 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3616 static int get_free_io_mem_idx(void)
3620 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3621 if (!io_mem_used
[i
]) {
3625 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3630 * Usually, devices operate in little endian mode. There are devices out
3631 * there that operate in big endian too. Each device gets byte swapped
3632 * mmio if plugged onto a CPU that does the other endianness.
3642 typedef struct SwapEndianContainer
{
3643 CPUReadMemoryFunc
*read
[3];
3644 CPUWriteMemoryFunc
*write
[3];
3646 } SwapEndianContainer
;
3648 static uint32_t swapendian_mem_readb (void *opaque
, target_phys_addr_t addr
)
3651 SwapEndianContainer
*c
= opaque
;
3652 val
= c
->read
[0](c
->opaque
, addr
);
3656 static uint32_t swapendian_mem_readw(void *opaque
, target_phys_addr_t addr
)
3659 SwapEndianContainer
*c
= opaque
;
3660 val
= bswap16(c
->read
[1](c
->opaque
, addr
));
3664 static uint32_t swapendian_mem_readl(void *opaque
, target_phys_addr_t addr
)
3667 SwapEndianContainer
*c
= opaque
;
3668 val
= bswap32(c
->read
[2](c
->opaque
, addr
));
3672 static CPUReadMemoryFunc
* const swapendian_readfn
[3]={
3673 swapendian_mem_readb
,
3674 swapendian_mem_readw
,
3675 swapendian_mem_readl
3678 static void swapendian_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3681 SwapEndianContainer
*c
= opaque
;
3682 c
->write
[0](c
->opaque
, addr
, val
);
3685 static void swapendian_mem_writew(void *opaque
, target_phys_addr_t addr
,
3688 SwapEndianContainer
*c
= opaque
;
3689 c
->write
[1](c
->opaque
, addr
, bswap16(val
));
3692 static void swapendian_mem_writel(void *opaque
, target_phys_addr_t addr
,
3695 SwapEndianContainer
*c
= opaque
;
3696 c
->write
[2](c
->opaque
, addr
, bswap32(val
));
3699 static CPUWriteMemoryFunc
* const swapendian_writefn
[3]={
3700 swapendian_mem_writeb
,
3701 swapendian_mem_writew
,
3702 swapendian_mem_writel
3705 static void swapendian_init(int io_index
)
3707 SwapEndianContainer
*c
= g_malloc(sizeof(SwapEndianContainer
));
3710 /* Swap mmio for big endian targets */
3711 c
->opaque
= io_mem_opaque
[io_index
];
3712 for (i
= 0; i
< 3; i
++) {
3713 c
->read
[i
] = io_mem_read
[io_index
][i
];
3714 c
->write
[i
] = io_mem_write
[io_index
][i
];
3716 io_mem_read
[io_index
][i
] = swapendian_readfn
[i
];
3717 io_mem_write
[io_index
][i
] = swapendian_writefn
[i
];
3719 io_mem_opaque
[io_index
] = c
;
3722 static void swapendian_del(int io_index
)
3724 if (io_mem_read
[io_index
][0] == swapendian_readfn
[0]) {
3725 g_free(io_mem_opaque
[io_index
]);
3729 /* mem_read and mem_write are arrays of functions containing the
3730 function to access byte (index 0), word (index 1) and dword (index
3731 2). Functions can be omitted with a NULL function pointer.
3732 If io_index is non zero, the corresponding io zone is
3733 modified. If it is zero, a new io zone is allocated. The return
3734 value can be used with cpu_register_physical_memory(). (-1) is
3735 returned if error. */
3736 static int cpu_register_io_memory_fixed(int io_index
,
3737 CPUReadMemoryFunc
* const *mem_read
,
3738 CPUWriteMemoryFunc
* const *mem_write
,
3739 void *opaque
, enum device_endian endian
)
3743 if (io_index
<= 0) {
3744 io_index
= get_free_io_mem_idx();
3748 io_index
>>= IO_MEM_SHIFT
;
3749 if (io_index
>= IO_MEM_NB_ENTRIES
)
3753 for (i
= 0; i
< 3; ++i
) {
3754 io_mem_read
[io_index
][i
]
3755 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3757 for (i
= 0; i
< 3; ++i
) {
3758 io_mem_write
[io_index
][i
]
3759 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3761 io_mem_opaque
[io_index
] = opaque
;
3764 case DEVICE_BIG_ENDIAN
:
3765 #ifndef TARGET_WORDS_BIGENDIAN
3766 swapendian_init(io_index
);
3769 case DEVICE_LITTLE_ENDIAN
:
3770 #ifdef TARGET_WORDS_BIGENDIAN
3771 swapendian_init(io_index
);
3774 case DEVICE_NATIVE_ENDIAN
:
3779 return (io_index
<< IO_MEM_SHIFT
);
3782 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3783 CPUWriteMemoryFunc
* const *mem_write
,
3784 void *opaque
, enum device_endian endian
)
3786 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
, endian
);
3789 void cpu_unregister_io_memory(int io_table_address
)
3792 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3794 swapendian_del(io_index
);
3796 for (i
=0;i
< 3; i
++) {
3797 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3798 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3800 io_mem_opaque
[io_index
] = NULL
;
3801 io_mem_used
[io_index
] = 0;
3804 static void io_mem_init(void)
3808 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3809 unassigned_mem_write
, NULL
,
3810 DEVICE_NATIVE_ENDIAN
);
3811 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3812 unassigned_mem_write
, NULL
,
3813 DEVICE_NATIVE_ENDIAN
);
3814 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3815 notdirty_mem_write
, NULL
,
3816 DEVICE_NATIVE_ENDIAN
);
3820 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3821 watch_mem_write
, NULL
,
3822 DEVICE_NATIVE_ENDIAN
);
3825 static void memory_map_init(void)
3827 system_memory
= g_malloc(sizeof(*system_memory
));
3828 memory_region_init(system_memory
, "system", INT64_MAX
);
3829 set_system_memory_map(system_memory
);
3831 system_io
= g_malloc(sizeof(*system_io
));
3832 memory_region_init(system_io
, "io", 65536);
3833 set_system_io_map(system_io
);
3836 MemoryRegion
*get_system_memory(void)
3838 return system_memory
;
3841 MemoryRegion
*get_system_io(void)
3846 #endif /* !defined(CONFIG_USER_ONLY) */
3848 /* physical memory access (slow version, mainly for debug) */
3849 #if defined(CONFIG_USER_ONLY)
3850 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3851 uint8_t *buf
, int len
, int is_write
)
3858 page
= addr
& TARGET_PAGE_MASK
;
3859 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3862 flags
= page_get_flags(page
);
3863 if (!(flags
& PAGE_VALID
))
3866 if (!(flags
& PAGE_WRITE
))
3868 /* XXX: this code should not depend on lock_user */
3869 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3872 unlock_user(p
, addr
, l
);
3874 if (!(flags
& PAGE_READ
))
3876 /* XXX: this code should not depend on lock_user */
3877 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3880 unlock_user(p
, addr
, 0);
3890 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3891 int len
, int is_write
)
3896 target_phys_addr_t page
;
3901 page
= addr
& TARGET_PAGE_MASK
;
3902 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3905 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3907 pd
= IO_MEM_UNASSIGNED
;
3909 pd
= p
->phys_offset
;
3913 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3914 target_phys_addr_t addr1
= addr
;
3915 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3917 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3918 /* XXX: could force cpu_single_env to NULL to avoid
3920 if (l
>= 4 && ((addr1
& 3) == 0)) {
3921 /* 32 bit write access */
3923 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3925 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3926 /* 16 bit write access */
3928 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3931 /* 8 bit write access */
3933 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3938 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3940 ptr
= qemu_get_ram_ptr(addr1
);
3941 memcpy(ptr
, buf
, l
);
3942 if (!cpu_physical_memory_is_dirty(addr1
)) {
3943 /* invalidate code */
3944 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3946 cpu_physical_memory_set_dirty_flags(
3947 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3949 qemu_put_ram_ptr(ptr
);
3952 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3953 !(pd
& IO_MEM_ROMD
)) {
3954 target_phys_addr_t addr1
= addr
;
3956 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3958 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3959 if (l
>= 4 && ((addr1
& 3) == 0)) {
3960 /* 32 bit read access */
3961 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3964 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3965 /* 16 bit read access */
3966 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3970 /* 8 bit read access */
3971 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3977 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
3978 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3979 qemu_put_ram_ptr(ptr
);
3988 /* used for ROM loading : can write in RAM and ROM */
3989 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3990 const uint8_t *buf
, int len
)
3994 target_phys_addr_t page
;
3999 page
= addr
& TARGET_PAGE_MASK
;
4000 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4003 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
4005 pd
= IO_MEM_UNASSIGNED
;
4007 pd
= p
->phys_offset
;
4010 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
4011 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
4012 !(pd
& IO_MEM_ROMD
)) {
4015 unsigned long addr1
;
4016 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4018 ptr
= qemu_get_ram_ptr(addr1
);
4019 memcpy(ptr
, buf
, l
);
4020 qemu_put_ram_ptr(ptr
);
4030 target_phys_addr_t addr
;
4031 target_phys_addr_t len
;
4034 static BounceBuffer bounce
;
4036 typedef struct MapClient
{
4038 void (*callback
)(void *opaque
);
4039 QLIST_ENTRY(MapClient
) link
;
4042 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
4043 = QLIST_HEAD_INITIALIZER(map_client_list
);
4045 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
4047 MapClient
*client
= g_malloc(sizeof(*client
));
4049 client
->opaque
= opaque
;
4050 client
->callback
= callback
;
4051 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
4055 void cpu_unregister_map_client(void *_client
)
4057 MapClient
*client
= (MapClient
*)_client
;
4059 QLIST_REMOVE(client
, link
);
4063 static void cpu_notify_map_clients(void)
4067 while (!QLIST_EMPTY(&map_client_list
)) {
4068 client
= QLIST_FIRST(&map_client_list
);
4069 client
->callback(client
->opaque
);
4070 cpu_unregister_map_client(client
);
4074 /* Map a physical memory region into a host virtual address.
4075 * May map a subset of the requested range, given by and returned in *plen.
4076 * May return NULL if resources needed to perform the mapping are exhausted.
4077 * Use only for reads OR writes - not for read-modify-write operations.
4078 * Use cpu_register_map_client() to know when retrying the map operation is
4079 * likely to succeed.
4081 void *cpu_physical_memory_map(target_phys_addr_t addr
,
4082 target_phys_addr_t
*plen
,
4085 target_phys_addr_t len
= *plen
;
4086 target_phys_addr_t todo
= 0;
4088 target_phys_addr_t page
;
4091 ram_addr_t raddr
= RAM_ADDR_MAX
;
4096 page
= addr
& TARGET_PAGE_MASK
;
4097 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4100 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
4102 pd
= IO_MEM_UNASSIGNED
;
4104 pd
= p
->phys_offset
;
4107 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4108 if (todo
|| bounce
.buffer
) {
4111 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
4115 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
4119 return bounce
.buffer
;
4122 raddr
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4130 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
4135 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4136 * Will also mark the memory as dirty if is_write == 1. access_len gives
4137 * the amount of memory that was actually read or written by the caller.
4139 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
4140 int is_write
, target_phys_addr_t access_len
)
4142 if (buffer
!= bounce
.buffer
) {
4144 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
4145 while (access_len
) {
4147 l
= TARGET_PAGE_SIZE
;
4150 if (!cpu_physical_memory_is_dirty(addr1
)) {
4151 /* invalidate code */
4152 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
4154 cpu_physical_memory_set_dirty_flags(
4155 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4161 if (xen_enabled()) {
4162 xen_invalidate_map_cache_entry(buffer
);
4167 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
4169 qemu_vfree(bounce
.buffer
);
4170 bounce
.buffer
= NULL
;
4171 cpu_notify_map_clients();
4174 /* warning: addr must be aligned */
4175 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
4176 enum device_endian endian
)
4184 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4186 pd
= IO_MEM_UNASSIGNED
;
4188 pd
= p
->phys_offset
;
4191 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4192 !(pd
& IO_MEM_ROMD
)) {
4194 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4196 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4197 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4198 #if defined(TARGET_WORDS_BIGENDIAN)
4199 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4203 if (endian
== DEVICE_BIG_ENDIAN
) {
4209 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4210 (addr
& ~TARGET_PAGE_MASK
);
4212 case DEVICE_LITTLE_ENDIAN
:
4213 val
= ldl_le_p(ptr
);
4215 case DEVICE_BIG_ENDIAN
:
4216 val
= ldl_be_p(ptr
);
4226 uint32_t ldl_phys(target_phys_addr_t addr
)
4228 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4231 uint32_t ldl_le_phys(target_phys_addr_t addr
)
4233 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4236 uint32_t ldl_be_phys(target_phys_addr_t addr
)
4238 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4241 /* warning: addr must be aligned */
4242 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
4243 enum device_endian endian
)
4251 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4253 pd
= IO_MEM_UNASSIGNED
;
4255 pd
= p
->phys_offset
;
4258 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4259 !(pd
& IO_MEM_ROMD
)) {
4261 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4263 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4265 /* XXX This is broken when device endian != cpu endian.
4266 Fix and add "endian" variable check */
4267 #ifdef TARGET_WORDS_BIGENDIAN
4268 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4269 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4271 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4272 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4276 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4277 (addr
& ~TARGET_PAGE_MASK
);
4279 case DEVICE_LITTLE_ENDIAN
:
4280 val
= ldq_le_p(ptr
);
4282 case DEVICE_BIG_ENDIAN
:
4283 val
= ldq_be_p(ptr
);
4293 uint64_t ldq_phys(target_phys_addr_t addr
)
4295 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4298 uint64_t ldq_le_phys(target_phys_addr_t addr
)
4300 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4303 uint64_t ldq_be_phys(target_phys_addr_t addr
)
4305 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4309 uint32_t ldub_phys(target_phys_addr_t addr
)
4312 cpu_physical_memory_read(addr
, &val
, 1);
4316 /* warning: addr must be aligned */
4317 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
4318 enum device_endian endian
)
4326 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4328 pd
= IO_MEM_UNASSIGNED
;
4330 pd
= p
->phys_offset
;
4333 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4334 !(pd
& IO_MEM_ROMD
)) {
4336 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4338 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4339 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4340 #if defined(TARGET_WORDS_BIGENDIAN)
4341 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4345 if (endian
== DEVICE_BIG_ENDIAN
) {
4351 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4352 (addr
& ~TARGET_PAGE_MASK
);
4354 case DEVICE_LITTLE_ENDIAN
:
4355 val
= lduw_le_p(ptr
);
4357 case DEVICE_BIG_ENDIAN
:
4358 val
= lduw_be_p(ptr
);
4368 uint32_t lduw_phys(target_phys_addr_t addr
)
4370 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4373 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4375 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4378 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4380 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4383 /* warning: addr must be aligned. The ram page is not masked as dirty
4384 and the code inside is not invalidated. It is useful if the dirty
4385 bits are used to track modified PTEs */
4386 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4393 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4395 pd
= IO_MEM_UNASSIGNED
;
4397 pd
= p
->phys_offset
;
4400 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4401 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4403 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4404 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4406 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4407 ptr
= qemu_get_ram_ptr(addr1
);
4410 if (unlikely(in_migration
)) {
4411 if (!cpu_physical_memory_is_dirty(addr1
)) {
4412 /* invalidate code */
4413 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4415 cpu_physical_memory_set_dirty_flags(
4416 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4422 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4429 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4431 pd
= IO_MEM_UNASSIGNED
;
4433 pd
= p
->phys_offset
;
4436 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4437 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4439 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4440 #ifdef TARGET_WORDS_BIGENDIAN
4441 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4442 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4444 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4445 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4448 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4449 (addr
& ~TARGET_PAGE_MASK
);
4454 /* warning: addr must be aligned */
4455 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4456 enum device_endian endian
)
4463 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4465 pd
= IO_MEM_UNASSIGNED
;
4467 pd
= p
->phys_offset
;
4470 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4471 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4473 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4474 #if defined(TARGET_WORDS_BIGENDIAN)
4475 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4479 if (endian
== DEVICE_BIG_ENDIAN
) {
4483 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4485 unsigned long addr1
;
4486 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4488 ptr
= qemu_get_ram_ptr(addr1
);
4490 case DEVICE_LITTLE_ENDIAN
:
4493 case DEVICE_BIG_ENDIAN
:
4500 if (!cpu_physical_memory_is_dirty(addr1
)) {
4501 /* invalidate code */
4502 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4504 cpu_physical_memory_set_dirty_flags(addr1
,
4505 (0xff & ~CODE_DIRTY_FLAG
));
4510 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4512 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4515 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4517 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4520 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4522 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4526 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4529 cpu_physical_memory_write(addr
, &v
, 1);
4532 /* warning: addr must be aligned */
4533 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4534 enum device_endian endian
)
4541 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4543 pd
= IO_MEM_UNASSIGNED
;
4545 pd
= p
->phys_offset
;
4548 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4549 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4551 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4552 #if defined(TARGET_WORDS_BIGENDIAN)
4553 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4557 if (endian
== DEVICE_BIG_ENDIAN
) {
4561 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4563 unsigned long addr1
;
4564 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4566 ptr
= qemu_get_ram_ptr(addr1
);
4568 case DEVICE_LITTLE_ENDIAN
:
4571 case DEVICE_BIG_ENDIAN
:
4578 if (!cpu_physical_memory_is_dirty(addr1
)) {
4579 /* invalidate code */
4580 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4582 cpu_physical_memory_set_dirty_flags(addr1
,
4583 (0xff & ~CODE_DIRTY_FLAG
));
4588 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4590 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4593 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4595 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4598 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4600 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4604 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4607 cpu_physical_memory_write(addr
, &val
, 8);
4610 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4612 val
= cpu_to_le64(val
);
4613 cpu_physical_memory_write(addr
, &val
, 8);
4616 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4618 val
= cpu_to_be64(val
);
4619 cpu_physical_memory_write(addr
, &val
, 8);
4622 /* virtual memory access for debug (includes writing to ROM) */
4623 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4624 uint8_t *buf
, int len
, int is_write
)
4627 target_phys_addr_t phys_addr
;
4631 page
= addr
& TARGET_PAGE_MASK
;
4632 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4633 /* if no physical page mapped, return an error */
4634 if (phys_addr
== -1)
4636 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4639 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4641 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4643 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4652 /* in deterministic execution mode, instructions doing device I/Os
4653 must be at the end of the TB */
4654 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4656 TranslationBlock
*tb
;
4658 target_ulong pc
, cs_base
;
4661 tb
= tb_find_pc((unsigned long)retaddr
);
4663 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4666 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4667 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4668 /* Calculate how many instructions had been executed before the fault
4670 n
= n
- env
->icount_decr
.u16
.low
;
4671 /* Generate a new TB ending on the I/O insn. */
4673 /* On MIPS and SH, delay slot instructions can only be restarted if
4674 they were already the first instruction in the TB. If this is not
4675 the first instruction in a TB then re-execute the preceding
4677 #if defined(TARGET_MIPS)
4678 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4679 env
->active_tc
.PC
-= 4;
4680 env
->icount_decr
.u16
.low
++;
4681 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4683 #elif defined(TARGET_SH4)
4684 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4687 env
->icount_decr
.u16
.low
++;
4688 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4691 /* This should never happen. */
4692 if (n
> CF_COUNT_MASK
)
4693 cpu_abort(env
, "TB too big during recompile");
4695 cflags
= n
| CF_LAST_IO
;
4697 cs_base
= tb
->cs_base
;
4699 tb_phys_invalidate(tb
, -1);
4700 /* FIXME: In theory this could raise an exception. In practice
4701 we have already translated the block once so it's probably ok. */
4702 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4703 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4704 the first in the TB) then we end up generating a whole new TB and
4705 repeating the fault, which is horribly inefficient.
4706 Better would be to execute just this insn uncached, or generate a
4708 cpu_resume_from_signal(env
, NULL
);
4711 #if !defined(CONFIG_USER_ONLY)
4713 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4715 int i
, target_code_size
, max_target_code_size
;
4716 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4717 TranslationBlock
*tb
;
4719 target_code_size
= 0;
4720 max_target_code_size
= 0;
4722 direct_jmp_count
= 0;
4723 direct_jmp2_count
= 0;
4724 for(i
= 0; i
< nb_tbs
; i
++) {
4726 target_code_size
+= tb
->size
;
4727 if (tb
->size
> max_target_code_size
)
4728 max_target_code_size
= tb
->size
;
4729 if (tb
->page_addr
[1] != -1)
4731 if (tb
->tb_next_offset
[0] != 0xffff) {
4733 if (tb
->tb_next_offset
[1] != 0xffff) {
4734 direct_jmp2_count
++;
4738 /* XXX: avoid using doubles ? */
4739 cpu_fprintf(f
, "Translation buffer state:\n");
4740 cpu_fprintf(f
, "gen code size %td/%ld\n",
4741 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4742 cpu_fprintf(f
, "TB count %d/%d\n",
4743 nb_tbs
, code_gen_max_blocks
);
4744 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4745 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4746 max_target_code_size
);
4747 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4748 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4749 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4750 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4752 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4753 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4755 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4757 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4758 cpu_fprintf(f
, "\nStatistics:\n");
4759 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4760 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4761 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4762 tcg_dump_info(f
, cpu_fprintf
);
4765 #define MMUSUFFIX _cmmu
4766 #define GETPC() NULL
4767 #define env cpu_single_env
4768 #define SOFTMMU_CODE_ACCESS
4771 #include "softmmu_template.h"
4774 #include "softmmu_template.h"
4777 #include "softmmu_template.h"
4780 #include "softmmu_template.h"