2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
37 #include "cache-utils.h"
39 #if !defined(TARGET_IA64)
47 #include "qemu-timer.h"
48 #if defined(CONFIG_USER_ONLY)
51 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
52 #include <sys/param.h>
53 #if __FreeBSD_version >= 700104
54 #define HAVE_KINFO_GETVMMAP
55 #define sigqueue sigqueue_freebsd /* avoid redefinition */
58 #include <machine/profile.h>
68 //#define DEBUG_TB_INVALIDATE
71 //#define DEBUG_UNASSIGNED
73 /* make various TB consistency checks */
74 //#define DEBUG_TB_CHECK
75 //#define DEBUG_TLB_CHECK
77 //#define DEBUG_IOPORT
78 //#define DEBUG_SUBPAGE
80 #if !defined(CONFIG_USER_ONLY)
81 /* TB consistency checks only implemented for usermode emulation. */
85 #define SMC_BITMAP_USE_THRESHOLD 10
87 static TranslationBlock
*tbs
;
88 int code_gen_max_blocks
;
89 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #elif defined(_WIN32)
102 /* Maximum alignment for Win32 is 16. */
103 #define code_gen_section \
104 __attribute__((aligned (16)))
106 #define code_gen_section \
107 __attribute__((aligned (32)))
110 uint8_t code_gen_prologue
[1024] code_gen_section
;
111 static uint8_t *code_gen_buffer
;
112 static unsigned long code_gen_buffer_size
;
113 /* threshold to flush the translated code buffer */
114 static unsigned long code_gen_buffer_max_size
;
115 uint8_t *code_gen_ptr
;
117 #if !defined(CONFIG_USER_ONLY)
119 uint8_t *phys_ram_dirty
;
120 static int in_migration
;
122 typedef struct RAMBlock
{
126 struct RAMBlock
*next
;
129 static RAMBlock
*ram_blocks
;
130 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
131 then we can no longer assume contiguous ram offsets, and external uses
132 of this variable will break. */
133 ram_addr_t last_ram_offset
;
137 /* current CPU in the current thread. It is only valid inside
139 CPUState
*cpu_single_env
;
140 /* 0 = Do not count executed instructions.
141 1 = Precise instruction counting.
142 2 = Adaptive rate instruction counting. */
144 /* Current instruction counter. While executing translated code this may
145 include some instructions that have not yet been executed. */
148 typedef struct PageDesc
{
149 /* list of TBs intersecting this ram page */
150 TranslationBlock
*first_tb
;
151 /* in order to optimize self modifying code, we count the number
152 of lookups we do to a given page to use a bitmap */
153 unsigned int code_write_count
;
154 uint8_t *code_bitmap
;
155 #if defined(CONFIG_USER_ONLY)
160 /* In system mode we want L1_MAP to be based on ram offsets,
161 while in user mode we want it to be based on virtual addresses. */
162 #if !defined(CONFIG_USER_ONLY)
163 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
164 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
166 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
169 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
172 /* Size of the L2 (and L3, etc) page tables. */
174 #define L2_SIZE (1 << L2_BITS)
176 /* The bits remaining after N lower levels of page tables. */
177 #define P_L1_BITS_REM \
178 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
179 #define V_L1_BITS_REM \
180 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
182 /* Size of the L1 page table. Avoid silly small sizes. */
183 #if P_L1_BITS_REM < 4
184 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
186 #define P_L1_BITS P_L1_BITS_REM
189 #if V_L1_BITS_REM < 4
190 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
192 #define V_L1_BITS V_L1_BITS_REM
195 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
196 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
198 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
199 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
201 unsigned long qemu_real_host_page_size
;
202 unsigned long qemu_host_page_bits
;
203 unsigned long qemu_host_page_size
;
204 unsigned long qemu_host_page_mask
;
206 /* This is a multi-level map on the virtual address space.
207 The bottom level has pointers to PageDesc. */
208 static void *l1_map
[V_L1_SIZE
];
210 #if !defined(CONFIG_USER_ONLY)
211 typedef struct PhysPageDesc
{
212 /* offset in host memory of the page + io_index in the low bits */
213 ram_addr_t phys_offset
;
214 ram_addr_t region_offset
;
217 /* This is a multi-level map on the physical address space.
218 The bottom level has pointers to PhysPageDesc. */
219 static void *l1_phys_map
[P_L1_SIZE
];
221 static void io_mem_init(void);
223 /* io memory support */
224 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
225 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
226 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
227 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
228 static int io_mem_watch
;
233 static const char *logfilename
= "qemu.log";
235 static const char *logfilename
= "/tmp/qemu.log";
239 static int log_append
= 0;
242 #if !defined(CONFIG_USER_ONLY)
243 static int tlb_flush_count
;
245 static int tb_flush_count
;
246 static int tb_phys_invalidate_count
;
249 static void map_exec(void *addr
, long size
)
252 VirtualProtect(addr
, size
,
253 PAGE_EXECUTE_READWRITE
, &old_protect
);
257 static void map_exec(void *addr
, long size
)
259 unsigned long start
, end
, page_size
;
261 page_size
= getpagesize();
262 start
= (unsigned long)addr
;
263 start
&= ~(page_size
- 1);
265 end
= (unsigned long)addr
+ size
;
266 end
+= page_size
- 1;
267 end
&= ~(page_size
- 1);
269 mprotect((void *)start
, end
- start
,
270 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
274 static void page_init(void)
276 /* NOTE: we can always suppose that qemu_host_page_size >=
280 SYSTEM_INFO system_info
;
282 GetSystemInfo(&system_info
);
283 qemu_real_host_page_size
= system_info
.dwPageSize
;
286 qemu_real_host_page_size
= getpagesize();
288 if (qemu_host_page_size
== 0)
289 qemu_host_page_size
= qemu_real_host_page_size
;
290 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
291 qemu_host_page_size
= TARGET_PAGE_SIZE
;
292 qemu_host_page_bits
= 0;
293 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
294 qemu_host_page_bits
++;
295 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
297 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
299 #ifdef HAVE_KINFO_GETVMMAP
300 struct kinfo_vmentry
*freep
;
303 freep
= kinfo_getvmmap(getpid(), &cnt
);
306 for (i
= 0; i
< cnt
; i
++) {
307 unsigned long startaddr
, endaddr
;
309 startaddr
= freep
[i
].kve_start
;
310 endaddr
= freep
[i
].kve_end
;
311 if (h2g_valid(startaddr
)) {
312 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
314 if (h2g_valid(endaddr
)) {
315 endaddr
= h2g(endaddr
);
316 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
318 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
320 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
331 last_brk
= (unsigned long)sbrk(0);
333 f
= fopen("/compat/linux/proc/self/maps", "r");
338 unsigned long startaddr
, endaddr
;
341 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
343 if (n
== 2 && h2g_valid(startaddr
)) {
344 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
346 if (h2g_valid(endaddr
)) {
347 endaddr
= h2g(endaddr
);
351 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
363 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
369 #if defined(CONFIG_USER_ONLY)
370 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
371 # define ALLOC(P, SIZE) \
373 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
374 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
377 # define ALLOC(P, SIZE) \
378 do { P = qemu_mallocz(SIZE); } while (0)
381 /* Level 1. Always allocated. */
382 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
385 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
392 ALLOC(p
, sizeof(void *) * L2_SIZE
);
396 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
404 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
410 return pd
+ (index
& (L2_SIZE
- 1));
413 static inline PageDesc
*page_find(tb_page_addr_t index
)
415 return page_find_alloc(index
, 0);
418 #if !defined(CONFIG_USER_ONLY)
419 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
425 /* Level 1. Always allocated. */
426 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
429 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
435 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
437 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
448 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
450 for (i
= 0; i
< L2_SIZE
; i
++) {
451 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
452 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
456 return pd
+ (index
& (L2_SIZE
- 1));
459 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
461 return phys_page_find_alloc(index
, 0);
464 static void tlb_protect_code(ram_addr_t ram_addr
);
465 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
467 #define mmap_lock() do { } while(0)
468 #define mmap_unlock() do { } while(0)
471 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
473 #if defined(CONFIG_USER_ONLY)
474 /* Currently it is not recommended to allocate big chunks of data in
475 user mode. It will change when a dedicated libc will be used */
476 #define USE_STATIC_CODE_GEN_BUFFER
479 #ifdef USE_STATIC_CODE_GEN_BUFFER
480 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
481 __attribute__((aligned (CODE_GEN_ALIGN
)));
484 static void code_gen_alloc(unsigned long tb_size
)
489 #ifdef USE_STATIC_CODE_GEN_BUFFER
490 code_gen_buffer
= static_code_gen_buffer
;
491 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
492 map_exec(code_gen_buffer
, code_gen_buffer_size
);
494 code_gen_buffer_size
= tb_size
;
495 if (code_gen_buffer_size
== 0) {
496 #if defined(CONFIG_USER_ONLY)
497 /* in user mode, phys_ram_size is not meaningful */
498 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
500 /* XXX: needs adjustments */
501 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
504 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
505 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
506 /* The code gen buffer location may have constraints depending on
507 the host cpu and OS */
508 #if defined(__linux__)
513 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
514 #if defined(__x86_64__)
516 /* Cannot map more than that */
517 if (code_gen_buffer_size
> (800 * 1024 * 1024))
518 code_gen_buffer_size
= (800 * 1024 * 1024);
519 #elif defined(__sparc_v9__)
520 // Map the buffer below 2G, so we can use direct calls and branches
522 start
= (void *) 0x60000000UL
;
523 if (code_gen_buffer_size
> (512 * 1024 * 1024))
524 code_gen_buffer_size
= (512 * 1024 * 1024);
525 #elif defined(__arm__)
526 /* Map the buffer below 32M, so we can use direct calls and branches */
528 start
= (void *) 0x01000000UL
;
529 if (code_gen_buffer_size
> 16 * 1024 * 1024)
530 code_gen_buffer_size
= 16 * 1024 * 1024;
532 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
533 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
535 if (code_gen_buffer
== MAP_FAILED
) {
536 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
540 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
544 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
545 #if defined(__x86_64__)
546 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
547 * 0x40000000 is free */
549 addr
= (void *)0x40000000;
550 /* Cannot map more than that */
551 if (code_gen_buffer_size
> (800 * 1024 * 1024))
552 code_gen_buffer_size
= (800 * 1024 * 1024);
554 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
555 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
557 if (code_gen_buffer
== MAP_FAILED
) {
558 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
563 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
564 map_exec(code_gen_buffer
, code_gen_buffer_size
);
566 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
567 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
568 code_gen_buffer_max_size
= code_gen_buffer_size
-
569 code_gen_max_block_size();
570 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
571 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
574 /* Must be called before using the QEMU cpus. 'tb_size' is the size
575 (in bytes) allocated to the translation buffer. Zero means default
577 void cpu_exec_init_all(unsigned long tb_size
)
580 code_gen_alloc(tb_size
);
581 code_gen_ptr
= code_gen_buffer
;
583 #if !defined(CONFIG_USER_ONLY)
588 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
590 static int cpu_common_post_load(void *opaque
, int version_id
)
592 CPUState
*env
= opaque
;
594 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
595 version_id is increased. */
596 env
->interrupt_request
&= ~0x01;
602 static const VMStateDescription vmstate_cpu_common
= {
603 .name
= "cpu_common",
605 .minimum_version_id
= 1,
606 .minimum_version_id_old
= 1,
607 .post_load
= cpu_common_post_load
,
608 .fields
= (VMStateField
[]) {
609 VMSTATE_UINT32(halted
, CPUState
),
610 VMSTATE_UINT32(interrupt_request
, CPUState
),
611 VMSTATE_END_OF_LIST()
616 CPUState
*qemu_get_cpu(int cpu
)
618 CPUState
*env
= first_cpu
;
621 if (env
->cpu_index
== cpu
)
629 void cpu_exec_init(CPUState
*env
)
634 #if defined(CONFIG_USER_ONLY)
637 env
->next_cpu
= NULL
;
640 while (*penv
!= NULL
) {
641 penv
= &(*penv
)->next_cpu
;
644 env
->cpu_index
= cpu_index
;
646 QTAILQ_INIT(&env
->breakpoints
);
647 QTAILQ_INIT(&env
->watchpoints
);
649 env
->thread_id
= GetCurrentProcessId();
651 env
->thread_id
= getpid();
654 #if defined(CONFIG_USER_ONLY)
657 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
658 vmstate_register(cpu_index
, &vmstate_cpu_common
, env
);
659 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
660 cpu_save
, cpu_load
, env
);
664 static inline void invalidate_page_bitmap(PageDesc
*p
)
666 if (p
->code_bitmap
) {
667 qemu_free(p
->code_bitmap
);
668 p
->code_bitmap
= NULL
;
670 p
->code_write_count
= 0;
673 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
675 static void page_flush_tb_1 (int level
, void **lp
)
684 for (i
= 0; i
< L2_SIZE
; ++i
) {
685 pd
[i
].first_tb
= NULL
;
686 invalidate_page_bitmap(pd
+ i
);
690 for (i
= 0; i
< L2_SIZE
; ++i
) {
691 page_flush_tb_1 (level
- 1, pp
+ i
);
696 static void page_flush_tb(void)
699 for (i
= 0; i
< V_L1_SIZE
; i
++) {
700 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
704 /* flush all the translation blocks */
705 /* XXX: tb_flush is currently not thread safe */
706 void tb_flush(CPUState
*env1
)
709 #if defined(DEBUG_FLUSH)
710 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
711 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
713 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
715 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
716 cpu_abort(env1
, "Internal error: code buffer overflow\n");
720 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
721 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
724 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
727 code_gen_ptr
= code_gen_buffer
;
728 /* XXX: flush processor icache at this point if cache flush is
733 #ifdef DEBUG_TB_CHECK
735 static void tb_invalidate_check(target_ulong address
)
737 TranslationBlock
*tb
;
739 address
&= TARGET_PAGE_MASK
;
740 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
741 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
742 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
743 address
>= tb
->pc
+ tb
->size
)) {
744 printf("ERROR invalidate: address=" TARGET_FMT_lx
745 " PC=%08lx size=%04x\n",
746 address
, (long)tb
->pc
, tb
->size
);
752 /* verify that all the pages have correct rights for code */
753 static void tb_page_check(void)
755 TranslationBlock
*tb
;
756 int i
, flags1
, flags2
;
758 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
759 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
760 flags1
= page_get_flags(tb
->pc
);
761 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
762 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
763 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
764 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
772 /* invalidate one TB */
773 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
776 TranslationBlock
*tb1
;
780 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
783 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
787 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
789 TranslationBlock
*tb1
;
795 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
797 *ptb
= tb1
->page_next
[n1
];
800 ptb
= &tb1
->page_next
[n1
];
804 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
806 TranslationBlock
*tb1
, **ptb
;
809 ptb
= &tb
->jmp_next
[n
];
812 /* find tb(n) in circular list */
816 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
817 if (n1
== n
&& tb1
== tb
)
820 ptb
= &tb1
->jmp_first
;
822 ptb
= &tb1
->jmp_next
[n1
];
825 /* now we can suppress tb(n) from the list */
826 *ptb
= tb
->jmp_next
[n
];
828 tb
->jmp_next
[n
] = NULL
;
832 /* reset the jump entry 'n' of a TB so that it is not chained to
834 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
836 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
839 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
844 tb_page_addr_t phys_pc
;
845 TranslationBlock
*tb1
, *tb2
;
847 /* remove the TB from the hash list */
848 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
849 h
= tb_phys_hash_func(phys_pc
);
850 tb_remove(&tb_phys_hash
[h
], tb
,
851 offsetof(TranslationBlock
, phys_hash_next
));
853 /* remove the TB from the page list */
854 if (tb
->page_addr
[0] != page_addr
) {
855 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
856 tb_page_remove(&p
->first_tb
, tb
);
857 invalidate_page_bitmap(p
);
859 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
860 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
861 tb_page_remove(&p
->first_tb
, tb
);
862 invalidate_page_bitmap(p
);
865 tb_invalidated_flag
= 1;
867 /* remove the TB from the hash list */
868 h
= tb_jmp_cache_hash_func(tb
->pc
);
869 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
870 if (env
->tb_jmp_cache
[h
] == tb
)
871 env
->tb_jmp_cache
[h
] = NULL
;
874 /* suppress this TB from the two jump lists */
875 tb_jmp_remove(tb
, 0);
876 tb_jmp_remove(tb
, 1);
878 /* suppress any remaining jumps to this TB */
884 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
885 tb2
= tb1
->jmp_next
[n1
];
886 tb_reset_jump(tb1
, n1
);
887 tb1
->jmp_next
[n1
] = NULL
;
890 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
892 tb_phys_invalidate_count
++;
895 static inline void set_bits(uint8_t *tab
, int start
, int len
)
901 mask
= 0xff << (start
& 7);
902 if ((start
& ~7) == (end
& ~7)) {
904 mask
&= ~(0xff << (end
& 7));
909 start
= (start
+ 8) & ~7;
911 while (start
< end1
) {
916 mask
= ~(0xff << (end
& 7));
922 static void build_page_bitmap(PageDesc
*p
)
924 int n
, tb_start
, tb_end
;
925 TranslationBlock
*tb
;
927 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
932 tb
= (TranslationBlock
*)((long)tb
& ~3);
933 /* NOTE: this is subtle as a TB may span two physical pages */
935 /* NOTE: tb_end may be after the end of the page, but
936 it is not a problem */
937 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
938 tb_end
= tb_start
+ tb
->size
;
939 if (tb_end
> TARGET_PAGE_SIZE
)
940 tb_end
= TARGET_PAGE_SIZE
;
943 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
945 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
946 tb
= tb
->page_next
[n
];
950 TranslationBlock
*tb_gen_code(CPUState
*env
,
951 target_ulong pc
, target_ulong cs_base
,
952 int flags
, int cflags
)
954 TranslationBlock
*tb
;
956 tb_page_addr_t phys_pc
, phys_page2
;
957 target_ulong virt_page2
;
960 phys_pc
= get_page_addr_code(env
, pc
);
963 /* flush must be done */
965 /* cannot fail at this point */
967 /* Don't forget to invalidate previous TB info. */
968 tb_invalidated_flag
= 1;
970 tc_ptr
= code_gen_ptr
;
972 tb
->cs_base
= cs_base
;
975 cpu_gen_code(env
, tb
, &code_gen_size
);
976 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
978 /* check next page if needed */
979 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
981 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
982 phys_page2
= get_page_addr_code(env
, virt_page2
);
984 tb_link_page(tb
, phys_pc
, phys_page2
);
988 /* invalidate all TBs which intersect with the target physical page
989 starting in range [start;end[. NOTE: start and end must refer to
990 the same physical page. 'is_cpu_write_access' should be true if called
991 from a real cpu write access: the virtual CPU will exit the current
992 TB if code is modified inside this TB. */
993 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
994 int is_cpu_write_access
)
996 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
997 CPUState
*env
= cpu_single_env
;
998 tb_page_addr_t tb_start
, tb_end
;
1001 #ifdef TARGET_HAS_PRECISE_SMC
1002 int current_tb_not_found
= is_cpu_write_access
;
1003 TranslationBlock
*current_tb
= NULL
;
1004 int current_tb_modified
= 0;
1005 target_ulong current_pc
= 0;
1006 target_ulong current_cs_base
= 0;
1007 int current_flags
= 0;
1008 #endif /* TARGET_HAS_PRECISE_SMC */
1010 p
= page_find(start
>> TARGET_PAGE_BITS
);
1013 if (!p
->code_bitmap
&&
1014 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1015 is_cpu_write_access
) {
1016 /* build code bitmap */
1017 build_page_bitmap(p
);
1020 /* we remove all the TBs in the range [start, end[ */
1021 /* XXX: see if in some cases it could be faster to invalidate all the code */
1023 while (tb
!= NULL
) {
1025 tb
= (TranslationBlock
*)((long)tb
& ~3);
1026 tb_next
= tb
->page_next
[n
];
1027 /* NOTE: this is subtle as a TB may span two physical pages */
1029 /* NOTE: tb_end may be after the end of the page, but
1030 it is not a problem */
1031 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1032 tb_end
= tb_start
+ tb
->size
;
1034 tb_start
= tb
->page_addr
[1];
1035 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1037 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1038 #ifdef TARGET_HAS_PRECISE_SMC
1039 if (current_tb_not_found
) {
1040 current_tb_not_found
= 0;
1042 if (env
->mem_io_pc
) {
1043 /* now we have a real cpu fault */
1044 current_tb
= tb_find_pc(env
->mem_io_pc
);
1047 if (current_tb
== tb
&&
1048 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1049 /* If we are modifying the current TB, we must stop
1050 its execution. We could be more precise by checking
1051 that the modification is after the current PC, but it
1052 would require a specialized function to partially
1053 restore the CPU state */
1055 current_tb_modified
= 1;
1056 cpu_restore_state(current_tb
, env
,
1057 env
->mem_io_pc
, NULL
);
1058 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1061 #endif /* TARGET_HAS_PRECISE_SMC */
1062 /* we need to do that to handle the case where a signal
1063 occurs while doing tb_phys_invalidate() */
1066 saved_tb
= env
->current_tb
;
1067 env
->current_tb
= NULL
;
1069 tb_phys_invalidate(tb
, -1);
1071 env
->current_tb
= saved_tb
;
1072 if (env
->interrupt_request
&& env
->current_tb
)
1073 cpu_interrupt(env
, env
->interrupt_request
);
1078 #if !defined(CONFIG_USER_ONLY)
1079 /* if no code remaining, no need to continue to use slow writes */
1081 invalidate_page_bitmap(p
);
1082 if (is_cpu_write_access
) {
1083 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1087 #ifdef TARGET_HAS_PRECISE_SMC
1088 if (current_tb_modified
) {
1089 /* we generate a block containing just the instruction
1090 modifying the memory. It will ensure that it cannot modify
1092 env
->current_tb
= NULL
;
1093 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1094 cpu_resume_from_signal(env
, NULL
);
1099 /* len must be <= 8 and start must be a multiple of len */
1100 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1106 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1107 cpu_single_env
->mem_io_vaddr
, len
,
1108 cpu_single_env
->eip
,
1109 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1112 p
= page_find(start
>> TARGET_PAGE_BITS
);
1115 if (p
->code_bitmap
) {
1116 offset
= start
& ~TARGET_PAGE_MASK
;
1117 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1118 if (b
& ((1 << len
) - 1))
1122 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1126 #if !defined(CONFIG_SOFTMMU)
1127 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1128 unsigned long pc
, void *puc
)
1130 TranslationBlock
*tb
;
1133 #ifdef TARGET_HAS_PRECISE_SMC
1134 TranslationBlock
*current_tb
= NULL
;
1135 CPUState
*env
= cpu_single_env
;
1136 int current_tb_modified
= 0;
1137 target_ulong current_pc
= 0;
1138 target_ulong current_cs_base
= 0;
1139 int current_flags
= 0;
1142 addr
&= TARGET_PAGE_MASK
;
1143 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1147 #ifdef TARGET_HAS_PRECISE_SMC
1148 if (tb
&& pc
!= 0) {
1149 current_tb
= tb_find_pc(pc
);
1152 while (tb
!= NULL
) {
1154 tb
= (TranslationBlock
*)((long)tb
& ~3);
1155 #ifdef TARGET_HAS_PRECISE_SMC
1156 if (current_tb
== tb
&&
1157 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1158 /* If we are modifying the current TB, we must stop
1159 its execution. We could be more precise by checking
1160 that the modification is after the current PC, but it
1161 would require a specialized function to partially
1162 restore the CPU state */
1164 current_tb_modified
= 1;
1165 cpu_restore_state(current_tb
, env
, pc
, puc
);
1166 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1169 #endif /* TARGET_HAS_PRECISE_SMC */
1170 tb_phys_invalidate(tb
, addr
);
1171 tb
= tb
->page_next
[n
];
1174 #ifdef TARGET_HAS_PRECISE_SMC
1175 if (current_tb_modified
) {
1176 /* we generate a block containing just the instruction
1177 modifying the memory. It will ensure that it cannot modify
1179 env
->current_tb
= NULL
;
1180 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1181 cpu_resume_from_signal(env
, puc
);
1187 /* add the tb in the target page and protect it if necessary */
1188 static inline void tb_alloc_page(TranslationBlock
*tb
,
1189 unsigned int n
, tb_page_addr_t page_addr
)
1192 TranslationBlock
*last_first_tb
;
1194 tb
->page_addr
[n
] = page_addr
;
1195 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1196 tb
->page_next
[n
] = p
->first_tb
;
1197 last_first_tb
= p
->first_tb
;
1198 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1199 invalidate_page_bitmap(p
);
1201 #if defined(TARGET_HAS_SMC) || 1
1203 #if defined(CONFIG_USER_ONLY)
1204 if (p
->flags
& PAGE_WRITE
) {
1209 /* force the host page as non writable (writes will have a
1210 page fault + mprotect overhead) */
1211 page_addr
&= qemu_host_page_mask
;
1213 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1214 addr
+= TARGET_PAGE_SIZE
) {
1216 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1220 p2
->flags
&= ~PAGE_WRITE
;
1222 mprotect(g2h(page_addr
), qemu_host_page_size
,
1223 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1224 #ifdef DEBUG_TB_INVALIDATE
1225 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1230 /* if some code is already present, then the pages are already
1231 protected. So we handle the case where only the first TB is
1232 allocated in a physical page */
1233 if (!last_first_tb
) {
1234 tlb_protect_code(page_addr
);
1238 #endif /* TARGET_HAS_SMC */
1241 /* Allocate a new translation block. Flush the translation buffer if
1242 too many translation blocks or too much generated code. */
1243 TranslationBlock
*tb_alloc(target_ulong pc
)
1245 TranslationBlock
*tb
;
1247 if (nb_tbs
>= code_gen_max_blocks
||
1248 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1250 tb
= &tbs
[nb_tbs
++];
1256 void tb_free(TranslationBlock
*tb
)
1258 /* In practice this is mostly used for single use temporary TB
1259 Ignore the hard cases and just back up if this TB happens to
1260 be the last one generated. */
1261 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1262 code_gen_ptr
= tb
->tc_ptr
;
1267 /* add a new TB and link it to the physical page tables. phys_page2 is
1268 (-1) to indicate that only one page contains the TB. */
1269 void tb_link_page(TranslationBlock
*tb
,
1270 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1273 TranslationBlock
**ptb
;
1275 /* Grab the mmap lock to stop another thread invalidating this TB
1276 before we are done. */
1278 /* add in the physical hash table */
1279 h
= tb_phys_hash_func(phys_pc
);
1280 ptb
= &tb_phys_hash
[h
];
1281 tb
->phys_hash_next
= *ptb
;
1284 /* add in the page list */
1285 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1286 if (phys_page2
!= -1)
1287 tb_alloc_page(tb
, 1, phys_page2
);
1289 tb
->page_addr
[1] = -1;
1291 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1292 tb
->jmp_next
[0] = NULL
;
1293 tb
->jmp_next
[1] = NULL
;
1295 /* init original jump addresses */
1296 if (tb
->tb_next_offset
[0] != 0xffff)
1297 tb_reset_jump(tb
, 0);
1298 if (tb
->tb_next_offset
[1] != 0xffff)
1299 tb_reset_jump(tb
, 1);
1301 #ifdef DEBUG_TB_CHECK
1307 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1308 tb[1].tc_ptr. Return NULL if not found */
1309 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1311 int m_min
, m_max
, m
;
1313 TranslationBlock
*tb
;
1317 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1318 tc_ptr
>= (unsigned long)code_gen_ptr
)
1320 /* binary search (cf Knuth) */
1323 while (m_min
<= m_max
) {
1324 m
= (m_min
+ m_max
) >> 1;
1326 v
= (unsigned long)tb
->tc_ptr
;
1329 else if (tc_ptr
< v
) {
1338 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1340 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1342 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1345 tb1
= tb
->jmp_next
[n
];
1347 /* find head of list */
1350 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1353 tb1
= tb1
->jmp_next
[n1
];
1355 /* we are now sure now that tb jumps to tb1 */
1358 /* remove tb from the jmp_first list */
1359 ptb
= &tb_next
->jmp_first
;
1363 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1364 if (n1
== n
&& tb1
== tb
)
1366 ptb
= &tb1
->jmp_next
[n1
];
1368 *ptb
= tb
->jmp_next
[n
];
1369 tb
->jmp_next
[n
] = NULL
;
1371 /* suppress the jump to next tb in generated code */
1372 tb_reset_jump(tb
, n
);
1374 /* suppress jumps in the tb on which we could have jumped */
1375 tb_reset_jump_recursive(tb_next
);
1379 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1381 tb_reset_jump_recursive2(tb
, 0);
1382 tb_reset_jump_recursive2(tb
, 1);
1385 #if defined(TARGET_HAS_ICE)
1386 #if defined(CONFIG_USER_ONLY)
1387 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1389 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1392 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1394 target_phys_addr_t addr
;
1396 ram_addr_t ram_addr
;
1399 addr
= cpu_get_phys_page_debug(env
, pc
);
1400 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1402 pd
= IO_MEM_UNASSIGNED
;
1404 pd
= p
->phys_offset
;
1406 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1407 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1410 #endif /* TARGET_HAS_ICE */
1412 #if defined(CONFIG_USER_ONLY)
1413 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1418 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1419 int flags
, CPUWatchpoint
**watchpoint
)
1424 /* Add a watchpoint. */
1425 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1426 int flags
, CPUWatchpoint
**watchpoint
)
1428 target_ulong len_mask
= ~(len
- 1);
1431 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1432 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1433 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1434 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1437 wp
= qemu_malloc(sizeof(*wp
));
1440 wp
->len_mask
= len_mask
;
1443 /* keep all GDB-injected watchpoints in front */
1445 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1447 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1449 tlb_flush_page(env
, addr
);
1456 /* Remove a specific watchpoint. */
1457 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1460 target_ulong len_mask
= ~(len
- 1);
1463 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1464 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1465 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1466 cpu_watchpoint_remove_by_ref(env
, wp
);
1473 /* Remove a specific watchpoint by reference. */
1474 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1476 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1478 tlb_flush_page(env
, watchpoint
->vaddr
);
1480 qemu_free(watchpoint
);
1483 /* Remove all matching watchpoints. */
1484 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1486 CPUWatchpoint
*wp
, *next
;
1488 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1489 if (wp
->flags
& mask
)
1490 cpu_watchpoint_remove_by_ref(env
, wp
);
1495 /* Add a breakpoint. */
1496 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1497 CPUBreakpoint
**breakpoint
)
1499 #if defined(TARGET_HAS_ICE)
1502 bp
= qemu_malloc(sizeof(*bp
));
1507 /* keep all GDB-injected breakpoints in front */
1509 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1511 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1513 breakpoint_invalidate(env
, pc
);
1523 /* Remove a specific breakpoint. */
1524 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1526 #if defined(TARGET_HAS_ICE)
1529 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1530 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1531 cpu_breakpoint_remove_by_ref(env
, bp
);
1541 /* Remove a specific breakpoint by reference. */
1542 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1544 #if defined(TARGET_HAS_ICE)
1545 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1547 breakpoint_invalidate(env
, breakpoint
->pc
);
1549 qemu_free(breakpoint
);
1553 /* Remove all matching breakpoints. */
1554 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1556 #if defined(TARGET_HAS_ICE)
1557 CPUBreakpoint
*bp
, *next
;
1559 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1560 if (bp
->flags
& mask
)
1561 cpu_breakpoint_remove_by_ref(env
, bp
);
1566 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1567 CPU loop after each instruction */
1568 void cpu_single_step(CPUState
*env
, int enabled
)
1570 #if defined(TARGET_HAS_ICE)
1571 if (env
->singlestep_enabled
!= enabled
) {
1572 env
->singlestep_enabled
= enabled
;
1574 kvm_update_guest_debug(env
, 0);
1576 /* must flush all the translated code to avoid inconsistencies */
1577 /* XXX: only flush what is necessary */
1584 /* enable or disable low levels log */
1585 void cpu_set_log(int log_flags
)
1587 loglevel
= log_flags
;
1588 if (loglevel
&& !logfile
) {
1589 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1591 perror(logfilename
);
1594 #if !defined(CONFIG_SOFTMMU)
1595 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1597 static char logfile_buf
[4096];
1598 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1600 #elif !defined(_WIN32)
1601 /* Win32 doesn't support line-buffering and requires size >= 2 */
1602 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1606 if (!loglevel
&& logfile
) {
1612 void cpu_set_log_filename(const char *filename
)
1614 logfilename
= strdup(filename
);
1619 cpu_set_log(loglevel
);
1622 static void cpu_unlink_tb(CPUState
*env
)
1624 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1625 problem and hope the cpu will stop of its own accord. For userspace
1626 emulation this often isn't actually as bad as it sounds. Often
1627 signals are used primarily to interrupt blocking syscalls. */
1628 TranslationBlock
*tb
;
1629 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1631 spin_lock(&interrupt_lock
);
1632 tb
= env
->current_tb
;
1633 /* if the cpu is currently executing code, we must unlink it and
1634 all the potentially executing TB */
1636 env
->current_tb
= NULL
;
1637 tb_reset_jump_recursive(tb
);
1639 spin_unlock(&interrupt_lock
);
1642 /* mask must never be zero, except for A20 change call */
1643 void cpu_interrupt(CPUState
*env
, int mask
)
1647 old_mask
= env
->interrupt_request
;
1648 env
->interrupt_request
|= mask
;
1649 if (kvm_enabled() && !kvm_irqchip_in_kernel())
1650 kvm_update_interrupt_request(env
);
1652 #ifndef CONFIG_USER_ONLY
1654 * If called from iothread context, wake the target cpu in
1657 if (!qemu_cpu_self(env
)) {
1664 env
->icount_decr
.u16
.high
= 0xffff;
1665 #ifndef CONFIG_USER_ONLY
1667 && (mask
& ~old_mask
) != 0) {
1668 cpu_abort(env
, "Raised interrupt while not in I/O function");
1676 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1678 env
->interrupt_request
&= ~mask
;
1681 void cpu_exit(CPUState
*env
)
1683 env
->exit_request
= 1;
1687 const CPULogItem cpu_log_items
[] = {
1688 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1689 "show generated host assembly code for each compiled TB" },
1690 { CPU_LOG_TB_IN_ASM
, "in_asm",
1691 "show target assembly code for each compiled TB" },
1692 { CPU_LOG_TB_OP
, "op",
1693 "show micro ops for each compiled TB" },
1694 { CPU_LOG_TB_OP_OPT
, "op_opt",
1697 "before eflags optimization and "
1699 "after liveness analysis" },
1700 { CPU_LOG_INT
, "int",
1701 "show interrupts/exceptions in short format" },
1702 { CPU_LOG_EXEC
, "exec",
1703 "show trace before each executed TB (lots of logs)" },
1704 { CPU_LOG_TB_CPU
, "cpu",
1705 "show CPU state before block translation" },
1707 { CPU_LOG_PCALL
, "pcall",
1708 "show protected mode far calls/returns/exceptions" },
1709 { CPU_LOG_RESET
, "cpu_reset",
1710 "show CPU state before CPU resets" },
1713 { CPU_LOG_IOPORT
, "ioport",
1714 "show all i/o ports accesses" },
1719 #ifndef CONFIG_USER_ONLY
1720 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1721 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1723 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1725 ram_addr_t phys_offset
)
1727 CPUPhysMemoryClient
*client
;
1728 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1729 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1733 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1734 target_phys_addr_t end
)
1736 CPUPhysMemoryClient
*client
;
1737 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1738 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1745 static int cpu_notify_migration_log(int enable
)
1747 CPUPhysMemoryClient
*client
;
1748 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1749 int r
= client
->migration_log(client
, enable
);
1756 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1757 int level
, void **lp
)
1765 PhysPageDesc
*pd
= *lp
;
1766 for (i
= 0; i
< L2_SIZE
; ++i
) {
1767 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1768 client
->set_memory(client
, pd
[i
].region_offset
,
1769 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
);
1774 for (i
= 0; i
< L2_SIZE
; ++i
) {
1775 phys_page_for_each_1(client
, level
- 1, pp
+ i
);
1780 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1783 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1784 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1789 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1791 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1792 phys_page_for_each(client
);
1795 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1797 QLIST_REMOVE(client
, list
);
1801 static int cmp1(const char *s1
, int n
, const char *s2
)
1803 if (strlen(s2
) != n
)
1805 return memcmp(s1
, s2
, n
) == 0;
1808 /* takes a comma separated list of log masks. Return 0 if error. */
1809 int cpu_str_to_log_mask(const char *str
)
1811 const CPULogItem
*item
;
1818 p1
= strchr(p
, ',');
1821 if(cmp1(p
,p1
-p
,"all")) {
1822 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1826 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1827 if (cmp1(p
, p1
- p
, item
->name
))
1841 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1848 fprintf(stderr
, "qemu: fatal: ");
1849 vfprintf(stderr
, fmt
, ap
);
1850 fprintf(stderr
, "\n");
1852 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1854 cpu_dump_state(env
, stderr
, fprintf
, 0);
1856 if (qemu_log_enabled()) {
1857 qemu_log("qemu: fatal: ");
1858 qemu_log_vprintf(fmt
, ap2
);
1861 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1863 log_cpu_state(env
, 0);
1870 #if defined(CONFIG_USER_ONLY)
1872 struct sigaction act
;
1873 sigfillset(&act
.sa_mask
);
1874 act
.sa_handler
= SIG_DFL
;
1875 sigaction(SIGABRT
, &act
, NULL
);
1881 CPUState
*cpu_copy(CPUState
*env
)
1883 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1884 CPUState
*next_cpu
= new_env
->next_cpu
;
1885 int cpu_index
= new_env
->cpu_index
;
1886 #if defined(TARGET_HAS_ICE)
1891 memcpy(new_env
, env
, sizeof(CPUState
));
1893 /* Preserve chaining and index. */
1894 new_env
->next_cpu
= next_cpu
;
1895 new_env
->cpu_index
= cpu_index
;
1897 /* Clone all break/watchpoints.
1898 Note: Once we support ptrace with hw-debug register access, make sure
1899 BP_CPU break/watchpoints are handled correctly on clone. */
1900 QTAILQ_INIT(&env
->breakpoints
);
1901 QTAILQ_INIT(&env
->watchpoints
);
1902 #if defined(TARGET_HAS_ICE)
1903 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1904 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1906 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1907 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1915 #if !defined(CONFIG_USER_ONLY)
1917 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1921 /* Discard jump cache entries for any tb which might potentially
1922 overlap the flushed page. */
1923 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1924 memset (&env
->tb_jmp_cache
[i
], 0,
1925 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1927 i
= tb_jmp_cache_hash_page(addr
);
1928 memset (&env
->tb_jmp_cache
[i
], 0,
1929 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1932 static CPUTLBEntry s_cputlb_empty_entry
= {
1939 /* NOTE: if flush_global is true, also flush global entries (not
1941 void tlb_flush(CPUState
*env
, int flush_global
)
1945 #if defined(DEBUG_TLB)
1946 printf("tlb_flush:\n");
1948 /* must reset current TB so that interrupts cannot modify the
1949 links while we are modifying them */
1950 env
->current_tb
= NULL
;
1952 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1954 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1955 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1959 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1961 env
->tlb_flush_addr
= -1;
1962 env
->tlb_flush_mask
= 0;
1966 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1968 if (addr
== (tlb_entry
->addr_read
&
1969 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1970 addr
== (tlb_entry
->addr_write
&
1971 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1972 addr
== (tlb_entry
->addr_code
&
1973 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1974 *tlb_entry
= s_cputlb_empty_entry
;
1978 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1983 #if defined(DEBUG_TLB)
1984 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1986 /* Check if we need to flush due to large pages. */
1987 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1988 #if defined(DEBUG_TLB)
1989 printf("tlb_flush_page: forced full flush ("
1990 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1991 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1996 /* must reset current TB so that interrupts cannot modify the
1997 links while we are modifying them */
1998 env
->current_tb
= NULL
;
2000 addr
&= TARGET_PAGE_MASK
;
2001 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2002 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2003 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2005 tlb_flush_jmp_cache(env
, addr
);
2008 /* update the TLBs so that writes to code in the virtual page 'addr'
2010 static void tlb_protect_code(ram_addr_t ram_addr
)
2012 cpu_physical_memory_reset_dirty(ram_addr
,
2013 ram_addr
+ TARGET_PAGE_SIZE
,
2017 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2018 tested for self modifying code */
2019 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2022 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2025 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2026 unsigned long start
, unsigned long length
)
2029 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2030 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2031 if ((addr
- start
) < length
) {
2032 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2037 /* Note: start and end must be within the same ram block. */
2038 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2042 unsigned long length
, start1
;
2045 start
&= TARGET_PAGE_MASK
;
2046 end
= TARGET_PAGE_ALIGN(end
);
2048 length
= end
- start
;
2051 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2053 /* we modify the TLB cache so that the dirty bit will be set again
2054 when accessing the range */
2055 start1
= (unsigned long)qemu_get_ram_ptr(start
);
2056 /* Chek that we don't span multiple blocks - this breaks the
2057 address comparisons below. */
2058 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
2059 != (end
- 1) - start
) {
2063 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2065 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2066 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2067 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2073 int cpu_physical_memory_set_dirty_tracking(int enable
)
2076 in_migration
= enable
;
2077 ret
= cpu_notify_migration_log(!!enable
);
2081 int cpu_physical_memory_get_dirty_tracking(void)
2083 return in_migration
;
2086 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2087 target_phys_addr_t end_addr
)
2091 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2095 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2097 ram_addr_t ram_addr
;
2100 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2101 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2102 + tlb_entry
->addend
);
2103 ram_addr
= qemu_ram_addr_from_host(p
);
2104 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2105 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2110 /* update the TLB according to the current state of the dirty bits */
2111 void cpu_tlb_update_dirty(CPUState
*env
)
2115 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2116 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2117 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2121 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2123 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2124 tlb_entry
->addr_write
= vaddr
;
2127 /* update the TLB corresponding to virtual page vaddr
2128 so that it is no longer dirty */
2129 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2134 vaddr
&= TARGET_PAGE_MASK
;
2135 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2136 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2137 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2140 /* Our TLB does not support large pages, so remember the area covered by
2141 large pages and trigger a full TLB flush if these are invalidated. */
2142 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2145 target_ulong mask
= ~(size
- 1);
2147 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2148 env
->tlb_flush_addr
= vaddr
& mask
;
2149 env
->tlb_flush_mask
= mask
;
2152 /* Extend the existing region to include the new page.
2153 This is a compromise between unnecessary flushes and the cost
2154 of maintaining a full variable size TLB. */
2155 mask
&= env
->tlb_flush_mask
;
2156 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2159 env
->tlb_flush_addr
&= mask
;
2160 env
->tlb_flush_mask
= mask
;
2163 /* Add a new TLB entry. At most one entry for a given virtual address
2164 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2165 supplied size is only used by tlb_flush_page. */
2166 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2167 target_phys_addr_t paddr
, int prot
,
2168 int mmu_idx
, target_ulong size
)
2173 target_ulong address
;
2174 target_ulong code_address
;
2175 unsigned long addend
;
2178 target_phys_addr_t iotlb
;
2180 assert(size
>= TARGET_PAGE_SIZE
);
2181 if (size
!= TARGET_PAGE_SIZE
) {
2182 tlb_add_large_page(env
, vaddr
, size
);
2184 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2186 pd
= IO_MEM_UNASSIGNED
;
2188 pd
= p
->phys_offset
;
2190 #if defined(DEBUG_TLB)
2191 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2192 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2196 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2197 /* IO memory case (romd handled later) */
2198 address
|= TLB_MMIO
;
2200 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2201 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2203 iotlb
= pd
& TARGET_PAGE_MASK
;
2204 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2205 iotlb
|= IO_MEM_NOTDIRTY
;
2207 iotlb
|= IO_MEM_ROM
;
2209 /* IO handlers are currently passed a physical address.
2210 It would be nice to pass an offset from the base address
2211 of that region. This would avoid having to special case RAM,
2212 and avoid full address decoding in every device.
2213 We can't use the high bits of pd for this because
2214 IO_MEM_ROMD uses these as a ram address. */
2215 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2217 iotlb
+= p
->region_offset
;
2223 code_address
= address
;
2224 /* Make accesses to pages with watchpoints go via the
2225 watchpoint trap routines. */
2226 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2227 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2228 iotlb
= io_mem_watch
+ paddr
;
2229 /* TODO: The memory case can be optimized by not trapping
2230 reads of pages with a write breakpoint. */
2231 address
|= TLB_MMIO
;
2235 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2236 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2237 te
= &env
->tlb_table
[mmu_idx
][index
];
2238 te
->addend
= addend
- vaddr
;
2239 if (prot
& PAGE_READ
) {
2240 te
->addr_read
= address
;
2245 if (prot
& PAGE_EXEC
) {
2246 te
->addr_code
= code_address
;
2250 if (prot
& PAGE_WRITE
) {
2251 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2252 (pd
& IO_MEM_ROMD
)) {
2253 /* Write access calls the I/O callback. */
2254 te
->addr_write
= address
| TLB_MMIO
;
2255 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2256 !cpu_physical_memory_is_dirty(pd
)) {
2257 te
->addr_write
= address
| TLB_NOTDIRTY
;
2259 te
->addr_write
= address
;
2262 te
->addr_write
= -1;
2268 void tlb_flush(CPUState
*env
, int flush_global
)
2272 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2277 * Walks guest process memory "regions" one by one
2278 * and calls callback function 'fn' for each region.
2281 struct walk_memory_regions_data
2283 walk_memory_regions_fn fn
;
2285 unsigned long start
;
2289 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2290 abi_ulong end
, int new_prot
)
2292 if (data
->start
!= -1ul) {
2293 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2299 data
->start
= (new_prot
? end
: -1ul);
2300 data
->prot
= new_prot
;
2305 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2306 abi_ulong base
, int level
, void **lp
)
2312 return walk_memory_regions_end(data
, base
, 0);
2317 for (i
= 0; i
< L2_SIZE
; ++i
) {
2318 int prot
= pd
[i
].flags
;
2320 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2321 if (prot
!= data
->prot
) {
2322 rc
= walk_memory_regions_end(data
, pa
, prot
);
2330 for (i
= 0; i
< L2_SIZE
; ++i
) {
2331 pa
= base
| ((abi_ulong
)i
<<
2332 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2333 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2343 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2345 struct walk_memory_regions_data data
;
2353 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2354 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2355 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2361 return walk_memory_regions_end(&data
, 0, 0);
2364 static int dump_region(void *priv
, abi_ulong start
,
2365 abi_ulong end
, unsigned long prot
)
2367 FILE *f
= (FILE *)priv
;
2369 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2370 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2371 start
, end
, end
- start
,
2372 ((prot
& PAGE_READ
) ? 'r' : '-'),
2373 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2374 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2379 /* dump memory mappings */
2380 void page_dump(FILE *f
)
2382 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2383 "start", "end", "size", "prot");
2384 walk_memory_regions(f
, dump_region
);
2387 int page_get_flags(target_ulong address
)
2391 p
= page_find(address
>> TARGET_PAGE_BITS
);
2397 /* Modify the flags of a page and invalidate the code if necessary.
2398 The flag PAGE_WRITE_ORG is positioned automatically depending
2399 on PAGE_WRITE. The mmap_lock should already be held. */
2400 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2402 target_ulong addr
, len
;
2404 /* This function should never be called with addresses outside the
2405 guest address space. If this assert fires, it probably indicates
2406 a missing call to h2g_valid. */
2407 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2408 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2410 assert(start
< end
);
2412 start
= start
& TARGET_PAGE_MASK
;
2413 end
= TARGET_PAGE_ALIGN(end
);
2415 if (flags
& PAGE_WRITE
) {
2416 flags
|= PAGE_WRITE_ORG
;
2419 for (addr
= start
, len
= end
- start
;
2421 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2422 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2424 /* If the write protection bit is set, then we invalidate
2426 if (!(p
->flags
& PAGE_WRITE
) &&
2427 (flags
& PAGE_WRITE
) &&
2429 tb_invalidate_phys_page(addr
, 0, NULL
);
2435 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2441 /* This function should never be called with addresses outside the
2442 guest address space. If this assert fires, it probably indicates
2443 a missing call to h2g_valid. */
2444 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2445 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2451 if (start
+ len
- 1 < start
) {
2452 /* We've wrapped around. */
2456 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2457 start
= start
& TARGET_PAGE_MASK
;
2459 for (addr
= start
, len
= end
- start
;
2461 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2462 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2465 if( !(p
->flags
& PAGE_VALID
) )
2468 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2470 if (flags
& PAGE_WRITE
) {
2471 if (!(p
->flags
& PAGE_WRITE_ORG
))
2473 /* unprotect the page if it was put read-only because it
2474 contains translated code */
2475 if (!(p
->flags
& PAGE_WRITE
)) {
2476 if (!page_unprotect(addr
, 0, NULL
))
2485 /* called from signal handler: invalidate the code and unprotect the
2486 page. Return TRUE if the fault was successfully handled. */
2487 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2491 target_ulong host_start
, host_end
, addr
;
2493 /* Technically this isn't safe inside a signal handler. However we
2494 know this only ever happens in a synchronous SEGV handler, so in
2495 practice it seems to be ok. */
2498 p
= page_find(address
>> TARGET_PAGE_BITS
);
2504 /* if the page was really writable, then we change its
2505 protection back to writable */
2506 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2507 host_start
= address
& qemu_host_page_mask
;
2508 host_end
= host_start
+ qemu_host_page_size
;
2511 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2512 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2513 p
->flags
|= PAGE_WRITE
;
2516 /* and since the content will be modified, we must invalidate
2517 the corresponding translated code. */
2518 tb_invalidate_phys_page(addr
, pc
, puc
);
2519 #ifdef DEBUG_TB_CHECK
2520 tb_invalidate_check(addr
);
2523 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2533 static inline void tlb_set_dirty(CPUState
*env
,
2534 unsigned long addr
, target_ulong vaddr
)
2537 #endif /* defined(CONFIG_USER_ONLY) */
2539 #if !defined(CONFIG_USER_ONLY)
2541 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2542 typedef struct subpage_t
{
2543 target_phys_addr_t base
;
2544 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2545 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2548 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2549 ram_addr_t memory
, ram_addr_t region_offset
);
2550 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2551 ram_addr_t orig_memory
,
2552 ram_addr_t region_offset
);
2553 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2556 if (addr > start_addr) \
2559 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2560 if (start_addr2 > 0) \
2564 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2565 end_addr2 = TARGET_PAGE_SIZE - 1; \
2567 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2568 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2573 /* register physical memory.
2574 For RAM, 'size' must be a multiple of the target page size.
2575 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2576 io memory page. The address used when calling the IO function is
2577 the offset from the start of the region, plus region_offset. Both
2578 start_addr and region_offset are rounded down to a page boundary
2579 before calculating this offset. This should not be a problem unless
2580 the low bits of start_addr and region_offset differ. */
2581 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2583 ram_addr_t phys_offset
,
2584 ram_addr_t region_offset
)
2586 target_phys_addr_t addr
, end_addr
;
2589 ram_addr_t orig_size
= size
;
2592 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2594 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2595 region_offset
= start_addr
;
2597 region_offset
&= TARGET_PAGE_MASK
;
2598 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2599 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2600 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2601 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2602 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2603 ram_addr_t orig_memory
= p
->phys_offset
;
2604 target_phys_addr_t start_addr2
, end_addr2
;
2605 int need_subpage
= 0;
2607 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2610 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2611 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2612 &p
->phys_offset
, orig_memory
,
2615 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2618 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2620 p
->region_offset
= 0;
2622 p
->phys_offset
= phys_offset
;
2623 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2624 (phys_offset
& IO_MEM_ROMD
))
2625 phys_offset
+= TARGET_PAGE_SIZE
;
2628 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2629 p
->phys_offset
= phys_offset
;
2630 p
->region_offset
= region_offset
;
2631 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2632 (phys_offset
& IO_MEM_ROMD
)) {
2633 phys_offset
+= TARGET_PAGE_SIZE
;
2635 target_phys_addr_t start_addr2
, end_addr2
;
2636 int need_subpage
= 0;
2638 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2639 end_addr2
, need_subpage
);
2642 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2643 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2644 addr
& TARGET_PAGE_MASK
);
2645 subpage_register(subpage
, start_addr2
, end_addr2
,
2646 phys_offset
, region_offset
);
2647 p
->region_offset
= 0;
2651 region_offset
+= TARGET_PAGE_SIZE
;
2654 /* since each CPU stores ram addresses in its TLB cache, we must
2655 reset the modified entries */
2657 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2662 /* XXX: temporary until new memory mapping API */
2663 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2667 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2669 return IO_MEM_UNASSIGNED
;
2670 return p
->phys_offset
;
2673 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2676 kvm_coalesce_mmio_region(addr
, size
);
2679 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2682 kvm_uncoalesce_mmio_region(addr
, size
);
2685 void qemu_flush_coalesced_mmio_buffer(void)
2688 kvm_flush_coalesced_mmio_buffer();
2691 #if defined(__linux__) && !defined(TARGET_S390X)
2693 #include <sys/vfs.h>
2695 #define HUGETLBFS_MAGIC 0x958458f6
2697 static long gethugepagesize(const char *path
)
2703 ret
= statfs(path
, &fs
);
2704 } while (ret
!= 0 && errno
== EINTR
);
2711 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2712 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2717 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2725 unsigned long hpagesize
;
2727 hpagesize
= gethugepagesize(path
);
2732 if (memory
< hpagesize
) {
2736 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2737 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2741 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2745 fd
= mkstemp(filename
);
2747 perror("unable to create backing store for hugepages");
2754 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2757 * ftruncate is not supported by hugetlbfs in older
2758 * hosts, so don't bother bailing out on errors.
2759 * If anything goes wrong with it under other filesystems,
2762 if (ftruncate(fd
, memory
))
2763 perror("ftruncate");
2766 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2767 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2768 * to sidestep this quirk.
2770 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2771 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2773 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2775 if (area
== MAP_FAILED
) {
2776 perror("file_ram_alloc: can't mmap RAM pages");
2784 ram_addr_t
qemu_ram_map(ram_addr_t size
, void *host
)
2786 RAMBlock
*new_block
;
2788 size
= TARGET_PAGE_ALIGN(size
);
2789 new_block
= qemu_malloc(sizeof(*new_block
));
2791 new_block
->host
= host
;
2793 new_block
->offset
= last_ram_offset
;
2794 new_block
->length
= size
;
2796 new_block
->next
= ram_blocks
;
2797 ram_blocks
= new_block
;
2799 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2800 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2801 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2802 0xff, size
>> TARGET_PAGE_BITS
);
2804 last_ram_offset
+= size
;
2807 kvm_setup_guest_memory(new_block
->host
, size
);
2809 return new_block
->offset
;
2812 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2814 RAMBlock
*new_block
;
2816 size
= TARGET_PAGE_ALIGN(size
);
2817 new_block
= qemu_malloc(sizeof(*new_block
));
2820 #if defined (__linux__) && !defined(TARGET_S390X)
2821 new_block
->host
= file_ram_alloc(size
, mem_path
);
2822 if (!new_block
->host
) {
2823 new_block
->host
= qemu_vmalloc(size
);
2824 #ifdef MADV_MERGEABLE
2825 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2829 fprintf(stderr
, "-mem-path option unsupported\n");
2833 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2834 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2835 new_block
->host
= mmap((void*)0x1000000, size
,
2836 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2837 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2839 new_block
->host
= qemu_vmalloc(size
);
2841 #ifdef MADV_MERGEABLE
2842 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2845 new_block
->offset
= last_ram_offset
;
2846 new_block
->length
= size
;
2848 new_block
->next
= ram_blocks
;
2849 ram_blocks
= new_block
;
2851 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2852 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2853 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2854 0xff, size
>> TARGET_PAGE_BITS
);
2856 last_ram_offset
+= size
;
2859 kvm_setup_guest_memory(new_block
->host
, size
);
2861 return new_block
->offset
;
2864 void qemu_ram_free(ram_addr_t addr
)
2866 /* TODO: implement this. */
2869 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2870 With the exception of the softmmu code in this file, this should
2871 only be used for local memory (e.g. video ram) that the device owns,
2872 and knows it isn't going to access beyond the end of the block.
2874 It should not be used for general purpose DMA.
2875 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2877 void *qemu_get_ram_ptr(ram_addr_t addr
)
2884 prevp
= &ram_blocks
;
2886 while (block
&& (block
->offset
> addr
2887 || block
->offset
+ block
->length
<= addr
)) {
2889 prevp
= &prev
->next
;
2891 block
= block
->next
;
2894 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2897 /* Move this entry to to start of the list. */
2899 prev
->next
= block
->next
;
2900 block
->next
= *prevp
;
2903 return block
->host
+ (addr
- block
->offset
);
2906 int do_qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2909 uint8_t *host
= ptr
;
2912 while (block
&& (block
->host
> host
2913 || block
->host
+ block
->length
<= host
)) {
2914 block
= block
->next
;
2918 *ram_addr
= block
->offset
+ (host
- block
->host
);
2922 /* Some of the softmmu routines need to translate from a host pointer
2923 (typically a TLB entry) back to a ram offset. */
2924 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2926 ram_addr_t ram_addr
;
2928 if (do_qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
2929 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2935 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2937 #ifdef DEBUG_UNASSIGNED
2938 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2940 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2941 do_unassigned_access(addr
, 0, 0, 0, 1);
2946 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2948 #ifdef DEBUG_UNASSIGNED
2949 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2951 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2952 do_unassigned_access(addr
, 0, 0, 0, 2);
2957 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2959 #ifdef DEBUG_UNASSIGNED
2960 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2962 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2963 do_unassigned_access(addr
, 0, 0, 0, 4);
2968 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2970 #ifdef DEBUG_UNASSIGNED
2971 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2973 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2974 do_unassigned_access(addr
, 1, 0, 0, 1);
2978 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2980 #ifdef DEBUG_UNASSIGNED
2981 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2983 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2984 do_unassigned_access(addr
, 1, 0, 0, 2);
2988 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2990 #ifdef DEBUG_UNASSIGNED
2991 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2993 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2994 do_unassigned_access(addr
, 1, 0, 0, 4);
2998 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
2999 unassigned_mem_readb
,
3000 unassigned_mem_readw
,
3001 unassigned_mem_readl
,
3004 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3005 unassigned_mem_writeb
,
3006 unassigned_mem_writew
,
3007 unassigned_mem_writel
,
3010 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3014 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3015 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3016 #if !defined(CONFIG_USER_ONLY)
3017 tb_invalidate_phys_page_fast(ram_addr
, 1);
3018 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3021 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3022 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3023 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3024 /* we remove the notdirty callback only if the code has been
3026 if (dirty_flags
== 0xff)
3027 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3030 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3034 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3035 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3036 #if !defined(CONFIG_USER_ONLY)
3037 tb_invalidate_phys_page_fast(ram_addr
, 2);
3038 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3041 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3042 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3043 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3044 /* we remove the notdirty callback only if the code has been
3046 if (dirty_flags
== 0xff)
3047 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3050 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3054 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3055 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3056 #if !defined(CONFIG_USER_ONLY)
3057 tb_invalidate_phys_page_fast(ram_addr
, 4);
3058 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3061 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3062 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3063 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3064 /* we remove the notdirty callback only if the code has been
3066 if (dirty_flags
== 0xff)
3067 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3070 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3071 NULL
, /* never used */
3072 NULL
, /* never used */
3073 NULL
, /* never used */
3076 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3077 notdirty_mem_writeb
,
3078 notdirty_mem_writew
,
3079 notdirty_mem_writel
,
3082 /* Generate a debug exception if a watchpoint has been hit. */
3083 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3085 CPUState
*env
= cpu_single_env
;
3086 target_ulong pc
, cs_base
;
3087 TranslationBlock
*tb
;
3092 if (env
->watchpoint_hit
) {
3093 /* We re-entered the check after replacing the TB. Now raise
3094 * the debug interrupt so that is will trigger after the
3095 * current instruction. */
3096 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3099 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3100 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3101 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3102 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3103 wp
->flags
|= BP_WATCHPOINT_HIT
;
3104 if (!env
->watchpoint_hit
) {
3105 env
->watchpoint_hit
= wp
;
3106 tb
= tb_find_pc(env
->mem_io_pc
);
3108 cpu_abort(env
, "check_watchpoint: could not find TB for "
3109 "pc=%p", (void *)env
->mem_io_pc
);
3111 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
3112 tb_phys_invalidate(tb
, -1);
3113 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3114 env
->exception_index
= EXCP_DEBUG
;
3116 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3117 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3119 cpu_resume_from_signal(env
, NULL
);
3122 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3127 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3128 so these check for a hit then pass through to the normal out-of-line
3130 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3132 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3133 return ldub_phys(addr
);
3136 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3138 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3139 return lduw_phys(addr
);
3142 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3144 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3145 return ldl_phys(addr
);
3148 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3151 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3152 stb_phys(addr
, val
);
3155 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3158 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3159 stw_phys(addr
, val
);
3162 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3165 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3166 stl_phys(addr
, val
);
3169 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3175 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3181 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3182 target_phys_addr_t addr
,
3185 unsigned int idx
= SUBPAGE_IDX(addr
);
3186 #if defined(DEBUG_SUBPAGE)
3187 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3188 mmio
, len
, addr
, idx
);
3191 addr
+= mmio
->region_offset
[idx
];
3192 idx
= mmio
->sub_io_index
[idx
];
3193 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3196 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3197 uint32_t value
, unsigned int len
)
3199 unsigned int idx
= SUBPAGE_IDX(addr
);
3200 #if defined(DEBUG_SUBPAGE)
3201 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3202 __func__
, mmio
, len
, addr
, idx
, value
);
3205 addr
+= mmio
->region_offset
[idx
];
3206 idx
= mmio
->sub_io_index
[idx
];
3207 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3210 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3212 return subpage_readlen(opaque
, addr
, 0);
3215 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3218 subpage_writelen(opaque
, addr
, value
, 0);
3221 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3223 return subpage_readlen(opaque
, addr
, 1);
3226 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3229 subpage_writelen(opaque
, addr
, value
, 1);
3232 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3234 return subpage_readlen(opaque
, addr
, 2);
3237 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3240 subpage_writelen(opaque
, addr
, value
, 2);
3243 static CPUReadMemoryFunc
* const subpage_read
[] = {
3249 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3255 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3256 ram_addr_t memory
, ram_addr_t region_offset
)
3260 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3262 idx
= SUBPAGE_IDX(start
);
3263 eidx
= SUBPAGE_IDX(end
);
3264 #if defined(DEBUG_SUBPAGE)
3265 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3266 mmio
, start
, end
, idx
, eidx
, memory
);
3268 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3269 for (; idx
<= eidx
; idx
++) {
3270 mmio
->sub_io_index
[idx
] = memory
;
3271 mmio
->region_offset
[idx
] = region_offset
;
3277 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3278 ram_addr_t orig_memory
,
3279 ram_addr_t region_offset
)
3284 mmio
= qemu_mallocz(sizeof(subpage_t
));
3287 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3288 #if defined(DEBUG_SUBPAGE)
3289 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3290 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3292 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3293 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3298 static int get_free_io_mem_idx(void)
3302 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3303 if (!io_mem_used
[i
]) {
3307 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3311 /* mem_read and mem_write are arrays of functions containing the
3312 function to access byte (index 0), word (index 1) and dword (index
3313 2). Functions can be omitted with a NULL function pointer.
3314 If io_index is non zero, the corresponding io zone is
3315 modified. If it is zero, a new io zone is allocated. The return
3316 value can be used with cpu_register_physical_memory(). (-1) is
3317 returned if error. */
3318 static int cpu_register_io_memory_fixed(int io_index
,
3319 CPUReadMemoryFunc
* const *mem_read
,
3320 CPUWriteMemoryFunc
* const *mem_write
,
3325 if (io_index
<= 0) {
3326 io_index
= get_free_io_mem_idx();
3330 io_index
>>= IO_MEM_SHIFT
;
3331 if (io_index
>= IO_MEM_NB_ENTRIES
)
3335 for (i
= 0; i
< 3; ++i
) {
3336 io_mem_read
[io_index
][i
]
3337 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3339 for (i
= 0; i
< 3; ++i
) {
3340 io_mem_write
[io_index
][i
]
3341 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3343 io_mem_opaque
[io_index
] = opaque
;
3345 return (io_index
<< IO_MEM_SHIFT
);
3348 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3349 CPUWriteMemoryFunc
* const *mem_write
,
3352 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3355 void cpu_unregister_io_memory(int io_table_address
)
3358 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3360 for (i
=0;i
< 3; i
++) {
3361 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3362 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3364 io_mem_opaque
[io_index
] = NULL
;
3365 io_mem_used
[io_index
] = 0;
3368 static void io_mem_init(void)
3372 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3373 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3374 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3378 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3379 watch_mem_write
, NULL
);
3382 #endif /* !defined(CONFIG_USER_ONLY) */
3384 /* physical memory access (slow version, mainly for debug) */
3385 #if defined(CONFIG_USER_ONLY)
3386 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3387 uint8_t *buf
, int len
, int is_write
)
3394 page
= addr
& TARGET_PAGE_MASK
;
3395 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3398 flags
= page_get_flags(page
);
3399 if (!(flags
& PAGE_VALID
))
3402 if (!(flags
& PAGE_WRITE
))
3404 /* XXX: this code should not depend on lock_user */
3405 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3408 unlock_user(p
, addr
, l
);
3410 if (!(flags
& PAGE_READ
))
3412 /* XXX: this code should not depend on lock_user */
3413 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3416 unlock_user(p
, addr
, 0);
3426 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3427 int len
, int is_write
)
3432 target_phys_addr_t page
;
3437 page
= addr
& TARGET_PAGE_MASK
;
3438 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3441 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3443 pd
= IO_MEM_UNASSIGNED
;
3445 pd
= p
->phys_offset
;
3449 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3450 target_phys_addr_t addr1
= addr
;
3451 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3453 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3454 /* XXX: could force cpu_single_env to NULL to avoid
3456 if (l
>= 4 && ((addr1
& 3) == 0)) {
3457 /* 32 bit write access */
3459 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3461 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3462 /* 16 bit write access */
3464 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3467 /* 8 bit write access */
3469 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3473 unsigned long addr1
;
3474 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3476 ptr
= qemu_get_ram_ptr(addr1
);
3477 memcpy(ptr
, buf
, l
);
3478 if (!cpu_physical_memory_is_dirty(addr1
)) {
3479 /* invalidate code */
3480 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3482 cpu_physical_memory_set_dirty_flags(
3483 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3485 /* qemu doesn't execute guest code directly, but kvm does
3486 therefore flush instruction caches */
3488 flush_icache_range((unsigned long)ptr
,
3489 ((unsigned long)ptr
)+l
);
3492 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3493 !(pd
& IO_MEM_ROMD
)) {
3494 target_phys_addr_t addr1
= addr
;
3496 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3498 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3499 if (l
>= 4 && ((addr1
& 3) == 0)) {
3500 /* 32 bit read access */
3501 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3504 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3505 /* 16 bit read access */
3506 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3510 /* 8 bit read access */
3511 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3517 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3518 (addr
& ~TARGET_PAGE_MASK
);
3519 memcpy(buf
, ptr
, l
);
3528 /* used for ROM loading : can write in RAM and ROM */
3529 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3530 const uint8_t *buf
, int len
)
3534 target_phys_addr_t page
;
3539 page
= addr
& TARGET_PAGE_MASK
;
3540 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3543 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3545 pd
= IO_MEM_UNASSIGNED
;
3547 pd
= p
->phys_offset
;
3550 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3551 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3552 !(pd
& IO_MEM_ROMD
)) {
3555 unsigned long addr1
;
3556 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3558 ptr
= qemu_get_ram_ptr(addr1
);
3559 memcpy(ptr
, buf
, l
);
3569 target_phys_addr_t addr
;
3570 target_phys_addr_t len
;
3573 static BounceBuffer bounce
;
3575 typedef struct MapClient
{
3577 void (*callback
)(void *opaque
);
3578 QLIST_ENTRY(MapClient
) link
;
3581 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3582 = QLIST_HEAD_INITIALIZER(map_client_list
);
3584 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3586 MapClient
*client
= qemu_malloc(sizeof(*client
));
3588 client
->opaque
= opaque
;
3589 client
->callback
= callback
;
3590 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3594 void cpu_unregister_map_client(void *_client
)
3596 MapClient
*client
= (MapClient
*)_client
;
3598 QLIST_REMOVE(client
, link
);
3602 static void cpu_notify_map_clients(void)
3606 while (!QLIST_EMPTY(&map_client_list
)) {
3607 client
= QLIST_FIRST(&map_client_list
);
3608 client
->callback(client
->opaque
);
3609 cpu_unregister_map_client(client
);
3613 /* Map a physical memory region into a host virtual address.
3614 * May map a subset of the requested range, given by and returned in *plen.
3615 * May return NULL if resources needed to perform the mapping are exhausted.
3616 * Use only for reads OR writes - not for read-modify-write operations.
3617 * Use cpu_register_map_client() to know when retrying the map operation is
3618 * likely to succeed.
3620 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3621 target_phys_addr_t
*plen
,
3624 target_phys_addr_t len
= *plen
;
3625 target_phys_addr_t done
= 0;
3627 uint8_t *ret
= NULL
;
3629 target_phys_addr_t page
;
3632 unsigned long addr1
;
3635 page
= addr
& TARGET_PAGE_MASK
;
3636 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3639 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3641 pd
= IO_MEM_UNASSIGNED
;
3643 pd
= p
->phys_offset
;
3646 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3647 if (done
|| bounce
.buffer
) {
3650 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3654 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3656 ptr
= bounce
.buffer
;
3658 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3659 ptr
= qemu_get_ram_ptr(addr1
);
3663 } else if (ret
+ done
!= ptr
) {
3675 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3676 * Will also mark the memory as dirty if is_write == 1. access_len gives
3677 * the amount of memory that was actually read or written by the caller.
3679 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3680 int is_write
, target_phys_addr_t access_len
)
3682 unsigned long flush_len
= (unsigned long)access_len
;
3684 if (buffer
!= bounce
.buffer
) {
3686 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3687 while (access_len
) {
3689 l
= TARGET_PAGE_SIZE
;
3692 if (!cpu_physical_memory_is_dirty(addr1
)) {
3693 /* invalidate code */
3694 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3696 cpu_physical_memory_set_dirty_flags(
3697 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3702 dma_flush_range((unsigned long)buffer
,
3703 (unsigned long)buffer
+ flush_len
);
3708 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3710 qemu_vfree(bounce
.buffer
);
3711 bounce
.buffer
= NULL
;
3712 cpu_notify_map_clients();
3715 /* warning: addr must be aligned */
3716 uint32_t ldl_phys(target_phys_addr_t addr
)
3724 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3726 pd
= IO_MEM_UNASSIGNED
;
3728 pd
= p
->phys_offset
;
3731 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3732 !(pd
& IO_MEM_ROMD
)) {
3734 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3736 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3737 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3740 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3741 (addr
& ~TARGET_PAGE_MASK
);
3747 /* warning: addr must be aligned */
3748 uint64_t ldq_phys(target_phys_addr_t addr
)
3756 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3758 pd
= IO_MEM_UNASSIGNED
;
3760 pd
= p
->phys_offset
;
3763 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3764 !(pd
& IO_MEM_ROMD
)) {
3766 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3768 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3769 #ifdef TARGET_WORDS_BIGENDIAN
3770 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3771 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3773 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3774 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3778 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3779 (addr
& ~TARGET_PAGE_MASK
);
3786 uint32_t ldub_phys(target_phys_addr_t addr
)
3789 cpu_physical_memory_read(addr
, &val
, 1);
3793 /* warning: addr must be aligned */
3794 uint32_t lduw_phys(target_phys_addr_t addr
)
3802 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3804 pd
= IO_MEM_UNASSIGNED
;
3806 pd
= p
->phys_offset
;
3809 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3810 !(pd
& IO_MEM_ROMD
)) {
3812 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3814 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3815 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
3818 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3819 (addr
& ~TARGET_PAGE_MASK
);
3825 /* warning: addr must be aligned. The ram page is not masked as dirty
3826 and the code inside is not invalidated. It is useful if the dirty
3827 bits are used to track modified PTEs */
3828 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3835 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3837 pd
= IO_MEM_UNASSIGNED
;
3839 pd
= p
->phys_offset
;
3842 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3843 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3845 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3846 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3848 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3849 ptr
= qemu_get_ram_ptr(addr1
);
3852 if (unlikely(in_migration
)) {
3853 if (!cpu_physical_memory_is_dirty(addr1
)) {
3854 /* invalidate code */
3855 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3857 cpu_physical_memory_set_dirty_flags(
3858 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3864 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3871 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3873 pd
= IO_MEM_UNASSIGNED
;
3875 pd
= p
->phys_offset
;
3878 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3879 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3881 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3882 #ifdef TARGET_WORDS_BIGENDIAN
3883 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3884 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3886 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3887 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3890 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3891 (addr
& ~TARGET_PAGE_MASK
);
3896 /* warning: addr must be aligned */
3897 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3904 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3906 pd
= IO_MEM_UNASSIGNED
;
3908 pd
= p
->phys_offset
;
3911 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3912 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3914 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3915 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3917 unsigned long addr1
;
3918 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3920 ptr
= qemu_get_ram_ptr(addr1
);
3922 if (!cpu_physical_memory_is_dirty(addr1
)) {
3923 /* invalidate code */
3924 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3926 cpu_physical_memory_set_dirty_flags(addr1
,
3927 (0xff & ~CODE_DIRTY_FLAG
));
3933 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3936 cpu_physical_memory_write(addr
, &v
, 1);
3939 /* warning: addr must be aligned */
3940 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3947 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3949 pd
= IO_MEM_UNASSIGNED
;
3951 pd
= p
->phys_offset
;
3954 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3955 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3957 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3958 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
3960 unsigned long addr1
;
3961 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3963 ptr
= qemu_get_ram_ptr(addr1
);
3965 if (!cpu_physical_memory_is_dirty(addr1
)) {
3966 /* invalidate code */
3967 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
3969 cpu_physical_memory_set_dirty_flags(addr1
,
3970 (0xff & ~CODE_DIRTY_FLAG
));
3976 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3979 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3982 /* virtual memory access for debug (includes writing to ROM) */
3983 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3984 uint8_t *buf
, int len
, int is_write
)
3987 target_phys_addr_t phys_addr
;
3991 page
= addr
& TARGET_PAGE_MASK
;
3992 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3993 /* if no physical page mapped, return an error */
3994 if (phys_addr
== -1)
3996 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3999 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4001 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4003 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4012 /* in deterministic execution mode, instructions doing device I/Os
4013 must be at the end of the TB */
4014 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4016 TranslationBlock
*tb
;
4018 target_ulong pc
, cs_base
;
4021 tb
= tb_find_pc((unsigned long)retaddr
);
4023 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4026 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4027 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
4028 /* Calculate how many instructions had been executed before the fault
4030 n
= n
- env
->icount_decr
.u16
.low
;
4031 /* Generate a new TB ending on the I/O insn. */
4033 /* On MIPS and SH, delay slot instructions can only be restarted if
4034 they were already the first instruction in the TB. If this is not
4035 the first instruction in a TB then re-execute the preceding
4037 #if defined(TARGET_MIPS)
4038 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4039 env
->active_tc
.PC
-= 4;
4040 env
->icount_decr
.u16
.low
++;
4041 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4043 #elif defined(TARGET_SH4)
4044 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4047 env
->icount_decr
.u16
.low
++;
4048 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4051 /* This should never happen. */
4052 if (n
> CF_COUNT_MASK
)
4053 cpu_abort(env
, "TB too big during recompile");
4055 cflags
= n
| CF_LAST_IO
;
4057 cs_base
= tb
->cs_base
;
4059 tb_phys_invalidate(tb
, -1);
4060 /* FIXME: In theory this could raise an exception. In practice
4061 we have already translated the block once so it's probably ok. */
4062 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4063 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4064 the first in the TB) then we end up generating a whole new TB and
4065 repeating the fault, which is horribly inefficient.
4066 Better would be to execute just this insn uncached, or generate a
4068 cpu_resume_from_signal(env
, NULL
);
4071 #if !defined(CONFIG_USER_ONLY)
4073 void dump_exec_info(FILE *f
,
4074 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
4076 int i
, target_code_size
, max_target_code_size
;
4077 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4078 TranslationBlock
*tb
;
4080 target_code_size
= 0;
4081 max_target_code_size
= 0;
4083 direct_jmp_count
= 0;
4084 direct_jmp2_count
= 0;
4085 for(i
= 0; i
< nb_tbs
; i
++) {
4087 target_code_size
+= tb
->size
;
4088 if (tb
->size
> max_target_code_size
)
4089 max_target_code_size
= tb
->size
;
4090 if (tb
->page_addr
[1] != -1)
4092 if (tb
->tb_next_offset
[0] != 0xffff) {
4094 if (tb
->tb_next_offset
[1] != 0xffff) {
4095 direct_jmp2_count
++;
4099 /* XXX: avoid using doubles ? */
4100 cpu_fprintf(f
, "Translation buffer state:\n");
4101 cpu_fprintf(f
, "gen code size %ld/%ld\n",
4102 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4103 cpu_fprintf(f
, "TB count %d/%d\n",
4104 nb_tbs
, code_gen_max_blocks
);
4105 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4106 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4107 max_target_code_size
);
4108 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4109 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4110 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4111 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4113 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4114 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4116 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4118 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4119 cpu_fprintf(f
, "\nStatistics:\n");
4120 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4121 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4122 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4123 #ifdef CONFIG_PROFILER
4124 tcg_dump_info(f
, cpu_fprintf
);
4128 #define MMUSUFFIX _cmmu
4129 #define GETPC() NULL
4130 #define env cpu_single_env
4131 #define SOFTMMU_CODE_ACCESS
4134 #include "softmmu_template.h"
4137 #include "softmmu_template.h"
4140 #include "softmmu_template.h"
4143 #include "softmmu_template.h"