2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
30 #include "cache-utils.h"
32 #if !defined(TARGET_IA64)
41 #include "qemu-timer.h"
42 #if defined(CONFIG_USER_ONLY)
45 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
46 #include <sys/param.h>
47 #if __FreeBSD_version >= 700104
48 #define HAVE_KINFO_GETVMMAP
49 #define sigqueue sigqueue_freebsd /* avoid redefinition */
52 #include <machine/profile.h>
62 //#define DEBUG_TB_INVALIDATE
65 //#define DEBUG_UNASSIGNED
67 /* make various TB consistency checks */
68 //#define DEBUG_TB_CHECK
69 //#define DEBUG_TLB_CHECK
71 //#define DEBUG_IOPORT
72 //#define DEBUG_SUBPAGE
74 #if !defined(CONFIG_USER_ONLY)
75 /* TB consistency checks only implemented for usermode emulation. */
79 #define SMC_BITMAP_USE_THRESHOLD 10
81 static TranslationBlock
*tbs
;
82 static int code_gen_max_blocks
;
83 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
85 /* any access to the tbs or the page table must use this lock */
86 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
88 #if defined(__arm__) || defined(__sparc_v9__)
89 /* The prologue must be reachable with a direct jump. ARM and Sparc64
90 have limited branch ranges (possibly also PPC) so place it in a
91 section close to code segment. */
92 #define code_gen_section \
93 __attribute__((__section__(".gen_code"))) \
94 __attribute__((aligned (32)))
96 /* Maximum alignment for Win32 is 16. */
97 #define code_gen_section \
98 __attribute__((aligned (16)))
100 #define code_gen_section \
101 __attribute__((aligned (32)))
104 uint8_t code_gen_prologue
[1024] code_gen_section
;
105 static uint8_t *code_gen_buffer
;
106 static unsigned long code_gen_buffer_size
;
107 /* threshold to flush the translated code buffer */
108 static unsigned long code_gen_buffer_max_size
;
109 static uint8_t *code_gen_ptr
;
111 #if !defined(CONFIG_USER_ONLY)
113 static int in_migration
;
115 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
) };
119 /* current CPU in the current thread. It is only valid inside
121 CPUState
*cpu_single_env
;
122 /* 0 = Do not count executed instructions.
123 1 = Precise instruction counting.
124 2 = Adaptive rate instruction counting. */
126 /* Current instruction counter. While executing translated code this may
127 include some instructions that have not yet been executed. */
130 typedef struct PageDesc
{
131 /* list of TBs intersecting this ram page */
132 TranslationBlock
*first_tb
;
133 /* in order to optimize self modifying code, we count the number
134 of lookups we do to a given page to use a bitmap */
135 unsigned int code_write_count
;
136 uint8_t *code_bitmap
;
137 #if defined(CONFIG_USER_ONLY)
142 /* In system mode we want L1_MAP to be based on ram offsets,
143 while in user mode we want it to be based on virtual addresses. */
144 #if !defined(CONFIG_USER_ONLY)
145 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
146 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
148 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
151 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
154 /* Size of the L2 (and L3, etc) page tables. */
156 #define L2_SIZE (1 << L2_BITS)
158 /* The bits remaining after N lower levels of page tables. */
159 #define P_L1_BITS_REM \
160 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
161 #define V_L1_BITS_REM \
162 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
164 /* Size of the L1 page table. Avoid silly small sizes. */
165 #if P_L1_BITS_REM < 4
166 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
168 #define P_L1_BITS P_L1_BITS_REM
171 #if V_L1_BITS_REM < 4
172 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
174 #define V_L1_BITS V_L1_BITS_REM
177 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
178 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
180 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
181 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
183 unsigned long qemu_real_host_page_size
;
184 unsigned long qemu_host_page_bits
;
185 unsigned long qemu_host_page_size
;
186 unsigned long qemu_host_page_mask
;
188 /* This is a multi-level map on the virtual address space.
189 The bottom level has pointers to PageDesc. */
190 static void *l1_map
[V_L1_SIZE
];
192 #if !defined(CONFIG_USER_ONLY)
193 typedef struct PhysPageDesc
{
194 /* offset in host memory of the page + io_index in the low bits */
195 ram_addr_t phys_offset
;
196 ram_addr_t region_offset
;
199 /* This is a multi-level map on the physical address space.
200 The bottom level has pointers to PhysPageDesc. */
201 static void *l1_phys_map
[P_L1_SIZE
];
203 static void io_mem_init(void);
205 /* io memory support */
206 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
207 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
208 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
209 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
210 static int io_mem_watch
;
215 static const char *logfilename
= "qemu.log";
217 static const char *logfilename
= "/tmp/qemu.log";
221 static int log_append
= 0;
224 #if !defined(CONFIG_USER_ONLY)
225 static int tlb_flush_count
;
227 static int tb_flush_count
;
228 static int tb_phys_invalidate_count
;
231 static void map_exec(void *addr
, long size
)
234 VirtualProtect(addr
, size
,
235 PAGE_EXECUTE_READWRITE
, &old_protect
);
239 static void map_exec(void *addr
, long size
)
241 unsigned long start
, end
, page_size
;
243 page_size
= getpagesize();
244 start
= (unsigned long)addr
;
245 start
&= ~(page_size
- 1);
247 end
= (unsigned long)addr
+ size
;
248 end
+= page_size
- 1;
249 end
&= ~(page_size
- 1);
251 mprotect((void *)start
, end
- start
,
252 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
256 static void page_init(void)
258 /* NOTE: we can always suppose that qemu_host_page_size >=
262 SYSTEM_INFO system_info
;
264 GetSystemInfo(&system_info
);
265 qemu_real_host_page_size
= system_info
.dwPageSize
;
268 qemu_real_host_page_size
= getpagesize();
270 if (qemu_host_page_size
== 0)
271 qemu_host_page_size
= qemu_real_host_page_size
;
272 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
273 qemu_host_page_size
= TARGET_PAGE_SIZE
;
274 qemu_host_page_bits
= 0;
275 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
276 qemu_host_page_bits
++;
277 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
279 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
281 #ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry
*freep
;
285 freep
= kinfo_getvmmap(getpid(), &cnt
);
288 for (i
= 0; i
< cnt
; i
++) {
289 unsigned long startaddr
, endaddr
;
291 startaddr
= freep
[i
].kve_start
;
292 endaddr
= freep
[i
].kve_end
;
293 if (h2g_valid(startaddr
)) {
294 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
296 if (h2g_valid(endaddr
)) {
297 endaddr
= h2g(endaddr
);
298 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
300 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
302 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
313 last_brk
= (unsigned long)sbrk(0);
315 f
= fopen("/compat/linux/proc/self/maps", "r");
320 unsigned long startaddr
, endaddr
;
323 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
325 if (n
== 2 && h2g_valid(startaddr
)) {
326 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
328 if (h2g_valid(endaddr
)) {
329 endaddr
= h2g(endaddr
);
333 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
345 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
351 #if defined(CONFIG_USER_ONLY)
352 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
353 # define ALLOC(P, SIZE) \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
359 # define ALLOC(P, SIZE) \
360 do { P = qemu_mallocz(SIZE); } while (0)
363 /* Level 1. Always allocated. */
364 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
367 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
374 ALLOC(p
, sizeof(void *) * L2_SIZE
);
378 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
386 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
392 return pd
+ (index
& (L2_SIZE
- 1));
395 static inline PageDesc
*page_find(tb_page_addr_t index
)
397 return page_find_alloc(index
, 0);
400 #if !defined(CONFIG_USER_ONLY)
401 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
407 /* Level 1. Always allocated. */
408 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
411 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
417 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
419 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
430 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
432 for (i
= 0; i
< L2_SIZE
; i
++) {
433 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
434 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
438 return pd
+ (index
& (L2_SIZE
- 1));
441 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
443 return phys_page_find_alloc(index
, 0);
446 static void tlb_protect_code(ram_addr_t ram_addr
);
447 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
449 #define mmap_lock() do { } while(0)
450 #define mmap_unlock() do { } while(0)
453 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
455 #if defined(CONFIG_USER_ONLY)
456 /* Currently it is not recommended to allocate big chunks of data in
457 user mode. It will change when a dedicated libc will be used */
458 #define USE_STATIC_CODE_GEN_BUFFER
461 #ifdef USE_STATIC_CODE_GEN_BUFFER
462 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
463 __attribute__((aligned (CODE_GEN_ALIGN
)));
466 static void code_gen_alloc(unsigned long tb_size
)
471 #ifdef USE_STATIC_CODE_GEN_BUFFER
472 code_gen_buffer
= static_code_gen_buffer
;
473 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
474 map_exec(code_gen_buffer
, code_gen_buffer_size
);
476 code_gen_buffer_size
= tb_size
;
477 if (code_gen_buffer_size
== 0) {
478 #if defined(CONFIG_USER_ONLY)
479 /* in user mode, phys_ram_size is not meaningful */
480 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
482 /* XXX: needs adjustments */
483 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
486 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
487 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
488 /* The code gen buffer location may have constraints depending on
489 the host cpu and OS */
490 #if defined(__linux__)
495 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
496 #if defined(__x86_64__)
498 /* Cannot map more than that */
499 if (code_gen_buffer_size
> (800 * 1024 * 1024))
500 code_gen_buffer_size
= (800 * 1024 * 1024);
501 #elif defined(__sparc_v9__)
502 // Map the buffer below 2G, so we can use direct calls and branches
504 start
= (void *) 0x60000000UL
;
505 if (code_gen_buffer_size
> (512 * 1024 * 1024))
506 code_gen_buffer_size
= (512 * 1024 * 1024);
507 #elif defined(__arm__)
508 /* Map the buffer below 32M, so we can use direct calls and branches */
510 start
= (void *) 0x01000000UL
;
511 if (code_gen_buffer_size
> 16 * 1024 * 1024)
512 code_gen_buffer_size
= 16 * 1024 * 1024;
513 #elif defined(__s390x__)
514 /* Map the buffer so that we can use direct calls and branches. */
515 /* We have a +- 4GB range on the branches; leave some slop. */
516 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
517 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
519 start
= (void *)0x90000000UL
;
521 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
522 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
524 if (code_gen_buffer
== MAP_FAILED
) {
525 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
529 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
530 || defined(__DragonFly__) || defined(__OpenBSD__)
534 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
535 #if defined(__x86_64__)
536 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
537 * 0x40000000 is free */
539 addr
= (void *)0x40000000;
540 /* Cannot map more than that */
541 if (code_gen_buffer_size
> (800 * 1024 * 1024))
542 code_gen_buffer_size
= (800 * 1024 * 1024);
543 #elif defined(__sparc_v9__)
544 // Map the buffer below 2G, so we can use direct calls and branches
546 addr
= (void *) 0x60000000UL
;
547 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
548 code_gen_buffer_size
= (512 * 1024 * 1024);
551 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
552 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
554 if (code_gen_buffer
== MAP_FAILED
) {
555 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
560 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
561 map_exec(code_gen_buffer
, code_gen_buffer_size
);
563 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
564 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
565 code_gen_buffer_max_size
= code_gen_buffer_size
-
566 (TCG_MAX_OP_SIZE
* OPC_MAX_SIZE
);
567 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
568 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
571 /* Must be called before using the QEMU cpus. 'tb_size' is the size
572 (in bytes) allocated to the translation buffer. Zero means default
574 void cpu_exec_init_all(unsigned long tb_size
)
577 code_gen_alloc(tb_size
);
578 code_gen_ptr
= code_gen_buffer
;
580 #if !defined(CONFIG_USER_ONLY)
583 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
584 /* There's no guest base to take into account, so go ahead and
585 initialize the prologue now. */
586 tcg_prologue_init(&tcg_ctx
);
590 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
592 static int cpu_common_post_load(void *opaque
, int version_id
)
594 CPUState
*env
= opaque
;
596 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
597 version_id is increased. */
598 env
->interrupt_request
&= ~0x01;
604 static const VMStateDescription vmstate_cpu_common
= {
605 .name
= "cpu_common",
607 .minimum_version_id
= 1,
608 .minimum_version_id_old
= 1,
609 .post_load
= cpu_common_post_load
,
610 .fields
= (VMStateField
[]) {
611 VMSTATE_UINT32(halted
, CPUState
),
612 VMSTATE_UINT32(interrupt_request
, CPUState
),
613 VMSTATE_END_OF_LIST()
618 CPUState
*qemu_get_cpu(int cpu
)
620 CPUState
*env
= first_cpu
;
623 if (env
->cpu_index
== cpu
)
631 void cpu_exec_init(CPUState
*env
)
636 #if defined(CONFIG_USER_ONLY)
639 env
->next_cpu
= NULL
;
642 while (*penv
!= NULL
) {
643 penv
= &(*penv
)->next_cpu
;
646 env
->cpu_index
= cpu_index
;
648 QTAILQ_INIT(&env
->breakpoints
);
649 QTAILQ_INIT(&env
->watchpoints
);
651 env
->thread_id
= GetCurrentProcessId();
653 env
->thread_id
= getpid();
656 #if defined(CONFIG_USER_ONLY)
659 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
660 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
661 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
662 cpu_save
, cpu_load
, env
);
666 /* Allocate a new translation block. Flush the translation buffer if
667 too many translation blocks or too much generated code. */
668 static TranslationBlock
*tb_alloc(target_ulong pc
)
670 TranslationBlock
*tb
;
672 if (nb_tbs
>= code_gen_max_blocks
||
673 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
681 void tb_free(TranslationBlock
*tb
)
683 /* In practice this is mostly used for single use temporary TB
684 Ignore the hard cases and just back up if this TB happens to
685 be the last one generated. */
686 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
687 code_gen_ptr
= tb
->tc_ptr
;
692 static inline void invalidate_page_bitmap(PageDesc
*p
)
694 if (p
->code_bitmap
) {
695 qemu_free(p
->code_bitmap
);
696 p
->code_bitmap
= NULL
;
698 p
->code_write_count
= 0;
701 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
703 static void page_flush_tb_1 (int level
, void **lp
)
712 for (i
= 0; i
< L2_SIZE
; ++i
) {
713 pd
[i
].first_tb
= NULL
;
714 invalidate_page_bitmap(pd
+ i
);
718 for (i
= 0; i
< L2_SIZE
; ++i
) {
719 page_flush_tb_1 (level
- 1, pp
+ i
);
724 static void page_flush_tb(void)
727 for (i
= 0; i
< V_L1_SIZE
; i
++) {
728 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
732 /* flush all the translation blocks */
733 /* XXX: tb_flush is currently not thread safe */
734 void tb_flush(CPUState
*env1
)
737 #if defined(DEBUG_FLUSH)
738 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
739 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
741 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
743 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
744 cpu_abort(env1
, "Internal error: code buffer overflow\n");
748 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
749 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
752 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
755 code_gen_ptr
= code_gen_buffer
;
756 /* XXX: flush processor icache at this point if cache flush is
761 #ifdef DEBUG_TB_CHECK
763 static void tb_invalidate_check(target_ulong address
)
765 TranslationBlock
*tb
;
767 address
&= TARGET_PAGE_MASK
;
768 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
769 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
770 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
771 address
>= tb
->pc
+ tb
->size
)) {
772 printf("ERROR invalidate: address=" TARGET_FMT_lx
773 " PC=%08lx size=%04x\n",
774 address
, (long)tb
->pc
, tb
->size
);
780 /* verify that all the pages have correct rights for code */
781 static void tb_page_check(void)
783 TranslationBlock
*tb
;
784 int i
, flags1
, flags2
;
786 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
787 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
788 flags1
= page_get_flags(tb
->pc
);
789 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
790 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
791 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
792 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
800 /* invalidate one TB */
801 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
804 TranslationBlock
*tb1
;
808 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
811 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
815 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
817 TranslationBlock
*tb1
;
823 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
825 *ptb
= tb1
->page_next
[n1
];
828 ptb
= &tb1
->page_next
[n1
];
832 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
834 TranslationBlock
*tb1
, **ptb
;
837 ptb
= &tb
->jmp_next
[n
];
840 /* find tb(n) in circular list */
844 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
845 if (n1
== n
&& tb1
== tb
)
848 ptb
= &tb1
->jmp_first
;
850 ptb
= &tb1
->jmp_next
[n1
];
853 /* now we can suppress tb(n) from the list */
854 *ptb
= tb
->jmp_next
[n
];
856 tb
->jmp_next
[n
] = NULL
;
860 /* reset the jump entry 'n' of a TB so that it is not chained to
862 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
864 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
867 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
872 tb_page_addr_t phys_pc
;
873 TranslationBlock
*tb1
, *tb2
;
875 /* remove the TB from the hash list */
876 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
877 h
= tb_phys_hash_func(phys_pc
);
878 tb_remove(&tb_phys_hash
[h
], tb
,
879 offsetof(TranslationBlock
, phys_hash_next
));
881 /* remove the TB from the page list */
882 if (tb
->page_addr
[0] != page_addr
) {
883 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
884 tb_page_remove(&p
->first_tb
, tb
);
885 invalidate_page_bitmap(p
);
887 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
888 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
889 tb_page_remove(&p
->first_tb
, tb
);
890 invalidate_page_bitmap(p
);
893 tb_invalidated_flag
= 1;
895 /* remove the TB from the hash list */
896 h
= tb_jmp_cache_hash_func(tb
->pc
);
897 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
898 if (env
->tb_jmp_cache
[h
] == tb
)
899 env
->tb_jmp_cache
[h
] = NULL
;
902 /* suppress this TB from the two jump lists */
903 tb_jmp_remove(tb
, 0);
904 tb_jmp_remove(tb
, 1);
906 /* suppress any remaining jumps to this TB */
912 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
913 tb2
= tb1
->jmp_next
[n1
];
914 tb_reset_jump(tb1
, n1
);
915 tb1
->jmp_next
[n1
] = NULL
;
918 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
920 tb_phys_invalidate_count
++;
923 static inline void set_bits(uint8_t *tab
, int start
, int len
)
929 mask
= 0xff << (start
& 7);
930 if ((start
& ~7) == (end
& ~7)) {
932 mask
&= ~(0xff << (end
& 7));
937 start
= (start
+ 8) & ~7;
939 while (start
< end1
) {
944 mask
= ~(0xff << (end
& 7));
950 static void build_page_bitmap(PageDesc
*p
)
952 int n
, tb_start
, tb_end
;
953 TranslationBlock
*tb
;
955 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
960 tb
= (TranslationBlock
*)((long)tb
& ~3);
961 /* NOTE: this is subtle as a TB may span two physical pages */
963 /* NOTE: tb_end may be after the end of the page, but
964 it is not a problem */
965 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
966 tb_end
= tb_start
+ tb
->size
;
967 if (tb_end
> TARGET_PAGE_SIZE
)
968 tb_end
= TARGET_PAGE_SIZE
;
971 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
973 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
974 tb
= tb
->page_next
[n
];
978 TranslationBlock
*tb_gen_code(CPUState
*env
,
979 target_ulong pc
, target_ulong cs_base
,
980 int flags
, int cflags
)
982 TranslationBlock
*tb
;
984 tb_page_addr_t phys_pc
, phys_page2
;
985 target_ulong virt_page2
;
988 phys_pc
= get_page_addr_code(env
, pc
);
991 /* flush must be done */
993 /* cannot fail at this point */
995 /* Don't forget to invalidate previous TB info. */
996 tb_invalidated_flag
= 1;
998 tc_ptr
= code_gen_ptr
;
1000 tb
->cs_base
= cs_base
;
1002 tb
->cflags
= cflags
;
1003 cpu_gen_code(env
, tb
, &code_gen_size
);
1004 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1006 /* check next page if needed */
1007 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1009 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1010 phys_page2
= get_page_addr_code(env
, virt_page2
);
1012 tb_link_page(tb
, phys_pc
, phys_page2
);
1016 /* invalidate all TBs which intersect with the target physical page
1017 starting in range [start;end[. NOTE: start and end must refer to
1018 the same physical page. 'is_cpu_write_access' should be true if called
1019 from a real cpu write access: the virtual CPU will exit the current
1020 TB if code is modified inside this TB. */
1021 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1022 int is_cpu_write_access
)
1024 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1025 CPUState
*env
= cpu_single_env
;
1026 tb_page_addr_t tb_start
, tb_end
;
1029 #ifdef TARGET_HAS_PRECISE_SMC
1030 int current_tb_not_found
= is_cpu_write_access
;
1031 TranslationBlock
*current_tb
= NULL
;
1032 int current_tb_modified
= 0;
1033 target_ulong current_pc
= 0;
1034 target_ulong current_cs_base
= 0;
1035 int current_flags
= 0;
1036 #endif /* TARGET_HAS_PRECISE_SMC */
1038 p
= page_find(start
>> TARGET_PAGE_BITS
);
1041 if (!p
->code_bitmap
&&
1042 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1043 is_cpu_write_access
) {
1044 /* build code bitmap */
1045 build_page_bitmap(p
);
1048 /* we remove all the TBs in the range [start, end[ */
1049 /* XXX: see if in some cases it could be faster to invalidate all the code */
1051 while (tb
!= NULL
) {
1053 tb
= (TranslationBlock
*)((long)tb
& ~3);
1054 tb_next
= tb
->page_next
[n
];
1055 /* NOTE: this is subtle as a TB may span two physical pages */
1057 /* NOTE: tb_end may be after the end of the page, but
1058 it is not a problem */
1059 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1060 tb_end
= tb_start
+ tb
->size
;
1062 tb_start
= tb
->page_addr
[1];
1063 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1065 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1066 #ifdef TARGET_HAS_PRECISE_SMC
1067 if (current_tb_not_found
) {
1068 current_tb_not_found
= 0;
1070 if (env
->mem_io_pc
) {
1071 /* now we have a real cpu fault */
1072 current_tb
= tb_find_pc(env
->mem_io_pc
);
1075 if (current_tb
== tb
&&
1076 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1077 /* If we are modifying the current TB, we must stop
1078 its execution. We could be more precise by checking
1079 that the modification is after the current PC, but it
1080 would require a specialized function to partially
1081 restore the CPU state */
1083 current_tb_modified
= 1;
1084 cpu_restore_state(current_tb
, env
,
1085 env
->mem_io_pc
, NULL
);
1086 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1089 #endif /* TARGET_HAS_PRECISE_SMC */
1090 /* we need to do that to handle the case where a signal
1091 occurs while doing tb_phys_invalidate() */
1094 saved_tb
= env
->current_tb
;
1095 env
->current_tb
= NULL
;
1097 tb_phys_invalidate(tb
, -1);
1099 env
->current_tb
= saved_tb
;
1100 if (env
->interrupt_request
&& env
->current_tb
)
1101 cpu_interrupt(env
, env
->interrupt_request
);
1106 #if !defined(CONFIG_USER_ONLY)
1107 /* if no code remaining, no need to continue to use slow writes */
1109 invalidate_page_bitmap(p
);
1110 if (is_cpu_write_access
) {
1111 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1115 #ifdef TARGET_HAS_PRECISE_SMC
1116 if (current_tb_modified
) {
1117 /* we generate a block containing just the instruction
1118 modifying the memory. It will ensure that it cannot modify
1120 env
->current_tb
= NULL
;
1121 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1122 cpu_resume_from_signal(env
, NULL
);
1127 /* len must be <= 8 and start must be a multiple of len */
1128 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1134 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1135 cpu_single_env
->mem_io_vaddr
, len
,
1136 cpu_single_env
->eip
,
1137 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1140 p
= page_find(start
>> TARGET_PAGE_BITS
);
1143 if (p
->code_bitmap
) {
1144 offset
= start
& ~TARGET_PAGE_MASK
;
1145 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1146 if (b
& ((1 << len
) - 1))
1150 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1154 #if !defined(CONFIG_SOFTMMU)
1155 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1156 unsigned long pc
, void *puc
)
1158 TranslationBlock
*tb
;
1161 #ifdef TARGET_HAS_PRECISE_SMC
1162 TranslationBlock
*current_tb
= NULL
;
1163 CPUState
*env
= cpu_single_env
;
1164 int current_tb_modified
= 0;
1165 target_ulong current_pc
= 0;
1166 target_ulong current_cs_base
= 0;
1167 int current_flags
= 0;
1170 addr
&= TARGET_PAGE_MASK
;
1171 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1175 #ifdef TARGET_HAS_PRECISE_SMC
1176 if (tb
&& pc
!= 0) {
1177 current_tb
= tb_find_pc(pc
);
1180 while (tb
!= NULL
) {
1182 tb
= (TranslationBlock
*)((long)tb
& ~3);
1183 #ifdef TARGET_HAS_PRECISE_SMC
1184 if (current_tb
== tb
&&
1185 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1186 /* If we are modifying the current TB, we must stop
1187 its execution. We could be more precise by checking
1188 that the modification is after the current PC, but it
1189 would require a specialized function to partially
1190 restore the CPU state */
1192 current_tb_modified
= 1;
1193 cpu_restore_state(current_tb
, env
, pc
, puc
);
1194 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1197 #endif /* TARGET_HAS_PRECISE_SMC */
1198 tb_phys_invalidate(tb
, addr
);
1199 tb
= tb
->page_next
[n
];
1202 #ifdef TARGET_HAS_PRECISE_SMC
1203 if (current_tb_modified
) {
1204 /* we generate a block containing just the instruction
1205 modifying the memory. It will ensure that it cannot modify
1207 env
->current_tb
= NULL
;
1208 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1209 cpu_resume_from_signal(env
, puc
);
1215 /* add the tb in the target page and protect it if necessary */
1216 static inline void tb_alloc_page(TranslationBlock
*tb
,
1217 unsigned int n
, tb_page_addr_t page_addr
)
1220 TranslationBlock
*last_first_tb
;
1222 tb
->page_addr
[n
] = page_addr
;
1223 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1224 tb
->page_next
[n
] = p
->first_tb
;
1225 last_first_tb
= p
->first_tb
;
1226 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1227 invalidate_page_bitmap(p
);
1229 #if defined(TARGET_HAS_SMC) || 1
1231 #if defined(CONFIG_USER_ONLY)
1232 if (p
->flags
& PAGE_WRITE
) {
1237 /* force the host page as non writable (writes will have a
1238 page fault + mprotect overhead) */
1239 page_addr
&= qemu_host_page_mask
;
1241 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1242 addr
+= TARGET_PAGE_SIZE
) {
1244 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1248 p2
->flags
&= ~PAGE_WRITE
;
1250 mprotect(g2h(page_addr
), qemu_host_page_size
,
1251 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1252 #ifdef DEBUG_TB_INVALIDATE
1253 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1258 /* if some code is already present, then the pages are already
1259 protected. So we handle the case where only the first TB is
1260 allocated in a physical page */
1261 if (!last_first_tb
) {
1262 tlb_protect_code(page_addr
);
1266 #endif /* TARGET_HAS_SMC */
1269 /* add a new TB and link it to the physical page tables. phys_page2 is
1270 (-1) to indicate that only one page contains the TB. */
1271 void tb_link_page(TranslationBlock
*tb
,
1272 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1275 TranslationBlock
**ptb
;
1277 /* Grab the mmap lock to stop another thread invalidating this TB
1278 before we are done. */
1280 /* add in the physical hash table */
1281 h
= tb_phys_hash_func(phys_pc
);
1282 ptb
= &tb_phys_hash
[h
];
1283 tb
->phys_hash_next
= *ptb
;
1286 /* add in the page list */
1287 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1288 if (phys_page2
!= -1)
1289 tb_alloc_page(tb
, 1, phys_page2
);
1291 tb
->page_addr
[1] = -1;
1293 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1294 tb
->jmp_next
[0] = NULL
;
1295 tb
->jmp_next
[1] = NULL
;
1297 /* init original jump addresses */
1298 if (tb
->tb_next_offset
[0] != 0xffff)
1299 tb_reset_jump(tb
, 0);
1300 if (tb
->tb_next_offset
[1] != 0xffff)
1301 tb_reset_jump(tb
, 1);
1303 #ifdef DEBUG_TB_CHECK
1309 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1310 tb[1].tc_ptr. Return NULL if not found */
1311 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1313 int m_min
, m_max
, m
;
1315 TranslationBlock
*tb
;
1319 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1320 tc_ptr
>= (unsigned long)code_gen_ptr
)
1322 /* binary search (cf Knuth) */
1325 while (m_min
<= m_max
) {
1326 m
= (m_min
+ m_max
) >> 1;
1328 v
= (unsigned long)tb
->tc_ptr
;
1331 else if (tc_ptr
< v
) {
1340 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1342 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1344 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1347 tb1
= tb
->jmp_next
[n
];
1349 /* find head of list */
1352 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1355 tb1
= tb1
->jmp_next
[n1
];
1357 /* we are now sure now that tb jumps to tb1 */
1360 /* remove tb from the jmp_first list */
1361 ptb
= &tb_next
->jmp_first
;
1365 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1366 if (n1
== n
&& tb1
== tb
)
1368 ptb
= &tb1
->jmp_next
[n1
];
1370 *ptb
= tb
->jmp_next
[n
];
1371 tb
->jmp_next
[n
] = NULL
;
1373 /* suppress the jump to next tb in generated code */
1374 tb_reset_jump(tb
, n
);
1376 /* suppress jumps in the tb on which we could have jumped */
1377 tb_reset_jump_recursive(tb_next
);
1381 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1383 tb_reset_jump_recursive2(tb
, 0);
1384 tb_reset_jump_recursive2(tb
, 1);
1387 #if defined(TARGET_HAS_ICE)
1388 #if defined(CONFIG_USER_ONLY)
1389 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1391 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1394 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1396 target_phys_addr_t addr
;
1398 ram_addr_t ram_addr
;
1401 addr
= cpu_get_phys_page_debug(env
, pc
);
1402 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1404 pd
= IO_MEM_UNASSIGNED
;
1406 pd
= p
->phys_offset
;
1408 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1409 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1412 #endif /* TARGET_HAS_ICE */
1414 #if defined(CONFIG_USER_ONLY)
1415 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1420 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1421 int flags
, CPUWatchpoint
**watchpoint
)
1426 /* Add a watchpoint. */
1427 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1428 int flags
, CPUWatchpoint
**watchpoint
)
1430 target_ulong len_mask
= ~(len
- 1);
1433 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1434 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1435 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1436 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1439 wp
= qemu_malloc(sizeof(*wp
));
1442 wp
->len_mask
= len_mask
;
1445 /* keep all GDB-injected watchpoints in front */
1447 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1449 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1451 tlb_flush_page(env
, addr
);
1458 /* Remove a specific watchpoint. */
1459 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1462 target_ulong len_mask
= ~(len
- 1);
1465 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1466 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1467 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1468 cpu_watchpoint_remove_by_ref(env
, wp
);
1475 /* Remove a specific watchpoint by reference. */
1476 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1478 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1480 tlb_flush_page(env
, watchpoint
->vaddr
);
1482 qemu_free(watchpoint
);
1485 /* Remove all matching watchpoints. */
1486 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1488 CPUWatchpoint
*wp
, *next
;
1490 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1491 if (wp
->flags
& mask
)
1492 cpu_watchpoint_remove_by_ref(env
, wp
);
1497 /* Add a breakpoint. */
1498 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1499 CPUBreakpoint
**breakpoint
)
1501 #if defined(TARGET_HAS_ICE)
1504 bp
= qemu_malloc(sizeof(*bp
));
1509 /* keep all GDB-injected breakpoints in front */
1511 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1513 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1515 breakpoint_invalidate(env
, pc
);
1525 /* Remove a specific breakpoint. */
1526 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1528 #if defined(TARGET_HAS_ICE)
1531 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1532 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1533 cpu_breakpoint_remove_by_ref(env
, bp
);
1543 /* Remove a specific breakpoint by reference. */
1544 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1546 #if defined(TARGET_HAS_ICE)
1547 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1549 breakpoint_invalidate(env
, breakpoint
->pc
);
1551 qemu_free(breakpoint
);
1555 /* Remove all matching breakpoints. */
1556 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1558 #if defined(TARGET_HAS_ICE)
1559 CPUBreakpoint
*bp
, *next
;
1561 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1562 if (bp
->flags
& mask
)
1563 cpu_breakpoint_remove_by_ref(env
, bp
);
1568 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1569 CPU loop after each instruction */
1570 void cpu_single_step(CPUState
*env
, int enabled
)
1572 #if defined(TARGET_HAS_ICE)
1573 if (env
->singlestep_enabled
!= enabled
) {
1574 env
->singlestep_enabled
= enabled
;
1576 kvm_update_guest_debug(env
, 0);
1578 /* must flush all the translated code to avoid inconsistencies */
1579 /* XXX: only flush what is necessary */
1586 /* enable or disable low levels log */
1587 void cpu_set_log(int log_flags
)
1589 loglevel
= log_flags
;
1590 if (loglevel
&& !logfile
) {
1591 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1593 perror(logfilename
);
1596 #if !defined(CONFIG_SOFTMMU)
1597 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1599 static char logfile_buf
[4096];
1600 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1602 #elif !defined(_WIN32)
1603 /* Win32 doesn't support line-buffering and requires size >= 2 */
1604 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1608 if (!loglevel
&& logfile
) {
1614 void cpu_set_log_filename(const char *filename
)
1616 logfilename
= strdup(filename
);
1621 cpu_set_log(loglevel
);
1624 static void cpu_unlink_tb(CPUState
*env
)
1626 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1627 problem and hope the cpu will stop of its own accord. For userspace
1628 emulation this often isn't actually as bad as it sounds. Often
1629 signals are used primarily to interrupt blocking syscalls. */
1630 TranslationBlock
*tb
;
1631 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1633 spin_lock(&interrupt_lock
);
1634 tb
= env
->current_tb
;
1635 /* if the cpu is currently executing code, we must unlink it and
1636 all the potentially executing TB */
1638 env
->current_tb
= NULL
;
1639 tb_reset_jump_recursive(tb
);
1641 spin_unlock(&interrupt_lock
);
1644 /* mask must never be zero, except for A20 change call */
1645 void cpu_interrupt(CPUState
*env
, int mask
)
1649 old_mask
= env
->interrupt_request
;
1650 env
->interrupt_request
|= mask
;
1651 if (kvm_enabled() && !kvm_irqchip_in_kernel())
1652 kvm_update_interrupt_request(env
);
1654 #ifndef CONFIG_USER_ONLY
1656 * If called from iothread context, wake the target cpu in
1659 if (!qemu_cpu_self(env
)) {
1666 env
->icount_decr
.u16
.high
= 0xffff;
1667 #ifndef CONFIG_USER_ONLY
1669 && (mask
& ~old_mask
) != 0) {
1670 cpu_abort(env
, "Raised interrupt while not in I/O function");
1678 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1680 env
->interrupt_request
&= ~mask
;
1683 void cpu_exit(CPUState
*env
)
1685 env
->exit_request
= 1;
1689 const CPULogItem cpu_log_items
[] = {
1690 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1691 "show generated host assembly code for each compiled TB" },
1692 { CPU_LOG_TB_IN_ASM
, "in_asm",
1693 "show target assembly code for each compiled TB" },
1694 { CPU_LOG_TB_OP
, "op",
1695 "show micro ops for each compiled TB" },
1696 { CPU_LOG_TB_OP_OPT
, "op_opt",
1699 "before eflags optimization and "
1701 "after liveness analysis" },
1702 { CPU_LOG_INT
, "int",
1703 "show interrupts/exceptions in short format" },
1704 { CPU_LOG_EXEC
, "exec",
1705 "show trace before each executed TB (lots of logs)" },
1706 { CPU_LOG_TB_CPU
, "cpu",
1707 "show CPU state before block translation" },
1709 { CPU_LOG_PCALL
, "pcall",
1710 "show protected mode far calls/returns/exceptions" },
1711 { CPU_LOG_RESET
, "cpu_reset",
1712 "show CPU state before CPU resets" },
1715 { CPU_LOG_IOPORT
, "ioport",
1716 "show all i/o ports accesses" },
1721 #ifndef CONFIG_USER_ONLY
1722 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1723 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1725 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1727 ram_addr_t phys_offset
)
1729 CPUPhysMemoryClient
*client
;
1730 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1731 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1735 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1736 target_phys_addr_t end
)
1738 CPUPhysMemoryClient
*client
;
1739 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1740 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1747 static int cpu_notify_migration_log(int enable
)
1749 CPUPhysMemoryClient
*client
;
1750 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1751 int r
= client
->migration_log(client
, enable
);
1758 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1759 int level
, void **lp
)
1767 PhysPageDesc
*pd
= *lp
;
1768 for (i
= 0; i
< L2_SIZE
; ++i
) {
1769 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1770 client
->set_memory(client
, pd
[i
].region_offset
,
1771 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
);
1776 for (i
= 0; i
< L2_SIZE
; ++i
) {
1777 phys_page_for_each_1(client
, level
- 1, pp
+ i
);
1782 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1785 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1786 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1791 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1793 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1794 phys_page_for_each(client
);
1797 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1799 QLIST_REMOVE(client
, list
);
1803 static int cmp1(const char *s1
, int n
, const char *s2
)
1805 if (strlen(s2
) != n
)
1807 return memcmp(s1
, s2
, n
) == 0;
1810 /* takes a comma separated list of log masks. Return 0 if error. */
1811 int cpu_str_to_log_mask(const char *str
)
1813 const CPULogItem
*item
;
1820 p1
= strchr(p
, ',');
1823 if(cmp1(p
,p1
-p
,"all")) {
1824 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1828 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1829 if (cmp1(p
, p1
- p
, item
->name
))
1843 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1850 fprintf(stderr
, "qemu: fatal: ");
1851 vfprintf(stderr
, fmt
, ap
);
1852 fprintf(stderr
, "\n");
1854 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1856 cpu_dump_state(env
, stderr
, fprintf
, 0);
1858 if (qemu_log_enabled()) {
1859 qemu_log("qemu: fatal: ");
1860 qemu_log_vprintf(fmt
, ap2
);
1863 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1865 log_cpu_state(env
, 0);
1872 #if defined(CONFIG_USER_ONLY)
1874 struct sigaction act
;
1875 sigfillset(&act
.sa_mask
);
1876 act
.sa_handler
= SIG_DFL
;
1877 sigaction(SIGABRT
, &act
, NULL
);
1883 CPUState
*cpu_copy(CPUState
*env
)
1885 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1886 CPUState
*next_cpu
= new_env
->next_cpu
;
1887 int cpu_index
= new_env
->cpu_index
;
1888 #if defined(TARGET_HAS_ICE)
1893 memcpy(new_env
, env
, sizeof(CPUState
));
1895 /* Preserve chaining and index. */
1896 new_env
->next_cpu
= next_cpu
;
1897 new_env
->cpu_index
= cpu_index
;
1899 /* Clone all break/watchpoints.
1900 Note: Once we support ptrace with hw-debug register access, make sure
1901 BP_CPU break/watchpoints are handled correctly on clone. */
1902 QTAILQ_INIT(&env
->breakpoints
);
1903 QTAILQ_INIT(&env
->watchpoints
);
1904 #if defined(TARGET_HAS_ICE)
1905 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1906 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1908 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1909 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1917 #if !defined(CONFIG_USER_ONLY)
1919 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1923 /* Discard jump cache entries for any tb which might potentially
1924 overlap the flushed page. */
1925 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1926 memset (&env
->tb_jmp_cache
[i
], 0,
1927 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1929 i
= tb_jmp_cache_hash_page(addr
);
1930 memset (&env
->tb_jmp_cache
[i
], 0,
1931 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1934 static CPUTLBEntry s_cputlb_empty_entry
= {
1941 /* NOTE: if flush_global is true, also flush global entries (not
1943 void tlb_flush(CPUState
*env
, int flush_global
)
1947 #if defined(DEBUG_TLB)
1948 printf("tlb_flush:\n");
1950 /* must reset current TB so that interrupts cannot modify the
1951 links while we are modifying them */
1952 env
->current_tb
= NULL
;
1954 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1956 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1957 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1961 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1963 env
->tlb_flush_addr
= -1;
1964 env
->tlb_flush_mask
= 0;
1968 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1970 if (addr
== (tlb_entry
->addr_read
&
1971 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1972 addr
== (tlb_entry
->addr_write
&
1973 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1974 addr
== (tlb_entry
->addr_code
&
1975 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1976 *tlb_entry
= s_cputlb_empty_entry
;
1980 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1985 #if defined(DEBUG_TLB)
1986 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1988 /* Check if we need to flush due to large pages. */
1989 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1990 #if defined(DEBUG_TLB)
1991 printf("tlb_flush_page: forced full flush ("
1992 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1993 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1998 /* must reset current TB so that interrupts cannot modify the
1999 links while we are modifying them */
2000 env
->current_tb
= NULL
;
2002 addr
&= TARGET_PAGE_MASK
;
2003 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2004 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2005 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2007 tlb_flush_jmp_cache(env
, addr
);
2010 /* update the TLBs so that writes to code in the virtual page 'addr'
2012 static void tlb_protect_code(ram_addr_t ram_addr
)
2014 cpu_physical_memory_reset_dirty(ram_addr
,
2015 ram_addr
+ TARGET_PAGE_SIZE
,
2019 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2020 tested for self modifying code */
2021 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2024 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2027 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2028 unsigned long start
, unsigned long length
)
2031 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2032 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2033 if ((addr
- start
) < length
) {
2034 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2039 /* Note: start and end must be within the same ram block. */
2040 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2044 unsigned long length
, start1
;
2047 start
&= TARGET_PAGE_MASK
;
2048 end
= TARGET_PAGE_ALIGN(end
);
2050 length
= end
- start
;
2053 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2055 /* we modify the TLB cache so that the dirty bit will be set again
2056 when accessing the range */
2057 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2058 /* Chek that we don't span multiple blocks - this breaks the
2059 address comparisons below. */
2060 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2061 != (end
- 1) - start
) {
2065 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2067 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2068 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2069 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2075 int cpu_physical_memory_set_dirty_tracking(int enable
)
2078 in_migration
= enable
;
2079 ret
= cpu_notify_migration_log(!!enable
);
2083 int cpu_physical_memory_get_dirty_tracking(void)
2085 return in_migration
;
2088 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2089 target_phys_addr_t end_addr
)
2093 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2097 int cpu_physical_log_start(target_phys_addr_t start_addr
,
2100 CPUPhysMemoryClient
*client
;
2101 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2102 if (client
->log_start
) {
2103 int r
= client
->log_start(client
, start_addr
, size
);
2112 int cpu_physical_log_stop(target_phys_addr_t start_addr
,
2115 CPUPhysMemoryClient
*client
;
2116 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2117 if (client
->log_stop
) {
2118 int r
= client
->log_stop(client
, start_addr
, size
);
2127 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2129 ram_addr_t ram_addr
;
2132 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2133 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2134 + tlb_entry
->addend
);
2135 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2136 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2137 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2142 /* update the TLB according to the current state of the dirty bits */
2143 void cpu_tlb_update_dirty(CPUState
*env
)
2147 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2148 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2149 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2153 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2155 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2156 tlb_entry
->addr_write
= vaddr
;
2159 /* update the TLB corresponding to virtual page vaddr
2160 so that it is no longer dirty */
2161 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2166 vaddr
&= TARGET_PAGE_MASK
;
2167 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2168 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2169 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2172 /* Our TLB does not support large pages, so remember the area covered by
2173 large pages and trigger a full TLB flush if these are invalidated. */
2174 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2177 target_ulong mask
= ~(size
- 1);
2179 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2180 env
->tlb_flush_addr
= vaddr
& mask
;
2181 env
->tlb_flush_mask
= mask
;
2184 /* Extend the existing region to include the new page.
2185 This is a compromise between unnecessary flushes and the cost
2186 of maintaining a full variable size TLB. */
2187 mask
&= env
->tlb_flush_mask
;
2188 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2191 env
->tlb_flush_addr
&= mask
;
2192 env
->tlb_flush_mask
= mask
;
2195 /* Add a new TLB entry. At most one entry for a given virtual address
2196 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2197 supplied size is only used by tlb_flush_page. */
2198 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2199 target_phys_addr_t paddr
, int prot
,
2200 int mmu_idx
, target_ulong size
)
2205 target_ulong address
;
2206 target_ulong code_address
;
2207 unsigned long addend
;
2210 target_phys_addr_t iotlb
;
2212 assert(size
>= TARGET_PAGE_SIZE
);
2213 if (size
!= TARGET_PAGE_SIZE
) {
2214 tlb_add_large_page(env
, vaddr
, size
);
2216 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2218 pd
= IO_MEM_UNASSIGNED
;
2220 pd
= p
->phys_offset
;
2222 #if defined(DEBUG_TLB)
2223 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2224 " prot=%x idx=%d pd=0x%08lx\n",
2225 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2229 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2230 /* IO memory case (romd handled later) */
2231 address
|= TLB_MMIO
;
2233 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2234 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2236 iotlb
= pd
& TARGET_PAGE_MASK
;
2237 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2238 iotlb
|= IO_MEM_NOTDIRTY
;
2240 iotlb
|= IO_MEM_ROM
;
2242 /* IO handlers are currently passed a physical address.
2243 It would be nice to pass an offset from the base address
2244 of that region. This would avoid having to special case RAM,
2245 and avoid full address decoding in every device.
2246 We can't use the high bits of pd for this because
2247 IO_MEM_ROMD uses these as a ram address. */
2248 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2250 iotlb
+= p
->region_offset
;
2256 code_address
= address
;
2257 /* Make accesses to pages with watchpoints go via the
2258 watchpoint trap routines. */
2259 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2260 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2261 /* Avoid trapping reads of pages with a write breakpoint. */
2262 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2263 iotlb
= io_mem_watch
+ paddr
;
2264 address
|= TLB_MMIO
;
2270 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2271 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2272 te
= &env
->tlb_table
[mmu_idx
][index
];
2273 te
->addend
= addend
- vaddr
;
2274 if (prot
& PAGE_READ
) {
2275 te
->addr_read
= address
;
2280 if (prot
& PAGE_EXEC
) {
2281 te
->addr_code
= code_address
;
2285 if (prot
& PAGE_WRITE
) {
2286 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2287 (pd
& IO_MEM_ROMD
)) {
2288 /* Write access calls the I/O callback. */
2289 te
->addr_write
= address
| TLB_MMIO
;
2290 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2291 !cpu_physical_memory_is_dirty(pd
)) {
2292 te
->addr_write
= address
| TLB_NOTDIRTY
;
2294 te
->addr_write
= address
;
2297 te
->addr_write
= -1;
2303 void tlb_flush(CPUState
*env
, int flush_global
)
2307 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2312 * Walks guest process memory "regions" one by one
2313 * and calls callback function 'fn' for each region.
2316 struct walk_memory_regions_data
2318 walk_memory_regions_fn fn
;
2320 unsigned long start
;
2324 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2325 abi_ulong end
, int new_prot
)
2327 if (data
->start
!= -1ul) {
2328 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2334 data
->start
= (new_prot
? end
: -1ul);
2335 data
->prot
= new_prot
;
2340 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2341 abi_ulong base
, int level
, void **lp
)
2347 return walk_memory_regions_end(data
, base
, 0);
2352 for (i
= 0; i
< L2_SIZE
; ++i
) {
2353 int prot
= pd
[i
].flags
;
2355 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2356 if (prot
!= data
->prot
) {
2357 rc
= walk_memory_regions_end(data
, pa
, prot
);
2365 for (i
= 0; i
< L2_SIZE
; ++i
) {
2366 pa
= base
| ((abi_ulong
)i
<<
2367 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2368 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2378 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2380 struct walk_memory_regions_data data
;
2388 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2389 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2390 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2396 return walk_memory_regions_end(&data
, 0, 0);
2399 static int dump_region(void *priv
, abi_ulong start
,
2400 abi_ulong end
, unsigned long prot
)
2402 FILE *f
= (FILE *)priv
;
2404 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2405 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2406 start
, end
, end
- start
,
2407 ((prot
& PAGE_READ
) ? 'r' : '-'),
2408 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2409 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2414 /* dump memory mappings */
2415 void page_dump(FILE *f
)
2417 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2418 "start", "end", "size", "prot");
2419 walk_memory_regions(f
, dump_region
);
2422 int page_get_flags(target_ulong address
)
2426 p
= page_find(address
>> TARGET_PAGE_BITS
);
2432 /* Modify the flags of a page and invalidate the code if necessary.
2433 The flag PAGE_WRITE_ORG is positioned automatically depending
2434 on PAGE_WRITE. The mmap_lock should already be held. */
2435 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2437 target_ulong addr
, len
;
2439 /* This function should never be called with addresses outside the
2440 guest address space. If this assert fires, it probably indicates
2441 a missing call to h2g_valid. */
2442 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2443 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2445 assert(start
< end
);
2447 start
= start
& TARGET_PAGE_MASK
;
2448 end
= TARGET_PAGE_ALIGN(end
);
2450 if (flags
& PAGE_WRITE
) {
2451 flags
|= PAGE_WRITE_ORG
;
2454 for (addr
= start
, len
= end
- start
;
2456 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2457 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2459 /* If the write protection bit is set, then we invalidate
2461 if (!(p
->flags
& PAGE_WRITE
) &&
2462 (flags
& PAGE_WRITE
) &&
2464 tb_invalidate_phys_page(addr
, 0, NULL
);
2470 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2476 /* This function should never be called with addresses outside the
2477 guest address space. If this assert fires, it probably indicates
2478 a missing call to h2g_valid. */
2479 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2480 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2486 if (start
+ len
- 1 < start
) {
2487 /* We've wrapped around. */
2491 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2492 start
= start
& TARGET_PAGE_MASK
;
2494 for (addr
= start
, len
= end
- start
;
2496 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2497 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2500 if( !(p
->flags
& PAGE_VALID
) )
2503 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2505 if (flags
& PAGE_WRITE
) {
2506 if (!(p
->flags
& PAGE_WRITE_ORG
))
2508 /* unprotect the page if it was put read-only because it
2509 contains translated code */
2510 if (!(p
->flags
& PAGE_WRITE
)) {
2511 if (!page_unprotect(addr
, 0, NULL
))
2520 /* called from signal handler: invalidate the code and unprotect the
2521 page. Return TRUE if the fault was successfully handled. */
2522 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2526 target_ulong host_start
, host_end
, addr
;
2528 /* Technically this isn't safe inside a signal handler. However we
2529 know this only ever happens in a synchronous SEGV handler, so in
2530 practice it seems to be ok. */
2533 p
= page_find(address
>> TARGET_PAGE_BITS
);
2539 /* if the page was really writable, then we change its
2540 protection back to writable */
2541 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2542 host_start
= address
& qemu_host_page_mask
;
2543 host_end
= host_start
+ qemu_host_page_size
;
2546 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2547 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2548 p
->flags
|= PAGE_WRITE
;
2551 /* and since the content will be modified, we must invalidate
2552 the corresponding translated code. */
2553 tb_invalidate_phys_page(addr
, pc
, puc
);
2554 #ifdef DEBUG_TB_CHECK
2555 tb_invalidate_check(addr
);
2558 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2568 static inline void tlb_set_dirty(CPUState
*env
,
2569 unsigned long addr
, target_ulong vaddr
)
2572 #endif /* defined(CONFIG_USER_ONLY) */
2574 #if !defined(CONFIG_USER_ONLY)
2576 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2577 typedef struct subpage_t
{
2578 target_phys_addr_t base
;
2579 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2580 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2583 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2584 ram_addr_t memory
, ram_addr_t region_offset
);
2585 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2586 ram_addr_t orig_memory
,
2587 ram_addr_t region_offset
);
2588 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2591 if (addr > start_addr) \
2594 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2595 if (start_addr2 > 0) \
2599 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2600 end_addr2 = TARGET_PAGE_SIZE - 1; \
2602 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2603 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2608 /* register physical memory.
2609 For RAM, 'size' must be a multiple of the target page size.
2610 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2611 io memory page. The address used when calling the IO function is
2612 the offset from the start of the region, plus region_offset. Both
2613 start_addr and region_offset are rounded down to a page boundary
2614 before calculating this offset. This should not be a problem unless
2615 the low bits of start_addr and region_offset differ. */
2616 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2618 ram_addr_t phys_offset
,
2619 ram_addr_t region_offset
)
2621 target_phys_addr_t addr
, end_addr
;
2624 ram_addr_t orig_size
= size
;
2627 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2629 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2630 region_offset
= start_addr
;
2632 region_offset
&= TARGET_PAGE_MASK
;
2633 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2634 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2635 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2636 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2637 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2638 ram_addr_t orig_memory
= p
->phys_offset
;
2639 target_phys_addr_t start_addr2
, end_addr2
;
2640 int need_subpage
= 0;
2642 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2645 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2646 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2647 &p
->phys_offset
, orig_memory
,
2650 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2653 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2655 p
->region_offset
= 0;
2657 p
->phys_offset
= phys_offset
;
2658 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2659 (phys_offset
& IO_MEM_ROMD
))
2660 phys_offset
+= TARGET_PAGE_SIZE
;
2663 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2664 p
->phys_offset
= phys_offset
;
2665 p
->region_offset
= region_offset
;
2666 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2667 (phys_offset
& IO_MEM_ROMD
)) {
2668 phys_offset
+= TARGET_PAGE_SIZE
;
2670 target_phys_addr_t start_addr2
, end_addr2
;
2671 int need_subpage
= 0;
2673 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2674 end_addr2
, need_subpage
);
2677 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2678 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2679 addr
& TARGET_PAGE_MASK
);
2680 subpage_register(subpage
, start_addr2
, end_addr2
,
2681 phys_offset
, region_offset
);
2682 p
->region_offset
= 0;
2686 region_offset
+= TARGET_PAGE_SIZE
;
2689 /* since each CPU stores ram addresses in its TLB cache, we must
2690 reset the modified entries */
2692 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2697 /* XXX: temporary until new memory mapping API */
2698 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2702 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2704 return IO_MEM_UNASSIGNED
;
2705 return p
->phys_offset
;
2708 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2711 kvm_coalesce_mmio_region(addr
, size
);
2714 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2717 kvm_uncoalesce_mmio_region(addr
, size
);
2720 void qemu_flush_coalesced_mmio_buffer(void)
2723 kvm_flush_coalesced_mmio_buffer();
2726 #if defined(__linux__) && !defined(TARGET_S390X)
2728 #include <sys/vfs.h>
2730 #define HUGETLBFS_MAGIC 0x958458f6
2732 static long gethugepagesize(const char *path
)
2738 ret
= statfs(path
, &fs
);
2739 } while (ret
!= 0 && errno
== EINTR
);
2746 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2747 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2752 static void *file_ram_alloc(RAMBlock
*block
,
2762 unsigned long hpagesize
;
2764 hpagesize
= gethugepagesize(path
);
2769 if (memory
< hpagesize
) {
2773 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2774 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2778 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2782 fd
= mkstemp(filename
);
2784 perror("unable to create backing store for hugepages");
2791 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2794 * ftruncate is not supported by hugetlbfs in older
2795 * hosts, so don't bother bailing out on errors.
2796 * If anything goes wrong with it under other filesystems,
2799 if (ftruncate(fd
, memory
))
2800 perror("ftruncate");
2803 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2804 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2805 * to sidestep this quirk.
2807 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2808 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2810 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2812 if (area
== MAP_FAILED
) {
2813 perror("file_ram_alloc: can't mmap RAM pages");
2822 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2824 RAMBlock
*block
, *next_block
;
2825 ram_addr_t offset
= 0, mingap
= ULONG_MAX
;
2827 if (QLIST_EMPTY(&ram_list
.blocks
))
2830 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2831 ram_addr_t end
, next
= ULONG_MAX
;
2833 end
= block
->offset
+ block
->length
;
2835 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2836 if (next_block
->offset
>= end
) {
2837 next
= MIN(next
, next_block
->offset
);
2840 if (next
- end
>= size
&& next
- end
< mingap
) {
2842 mingap
= next
- end
;
2848 static ram_addr_t
last_ram_offset(void)
2851 ram_addr_t last
= 0;
2853 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2854 last
= MAX(last
, block
->offset
+ block
->length
);
2859 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
2860 ram_addr_t size
, void *host
)
2862 RAMBlock
*new_block
, *block
;
2864 size
= TARGET_PAGE_ALIGN(size
);
2865 new_block
= qemu_mallocz(sizeof(*new_block
));
2867 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2868 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2870 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2874 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2876 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2877 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2878 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2885 new_block
->host
= host
;
2888 #if defined (__linux__) && !defined(TARGET_S390X)
2889 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2890 if (!new_block
->host
) {
2891 new_block
->host
= qemu_vmalloc(size
);
2892 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2895 fprintf(stderr
, "-mem-path option unsupported\n");
2899 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2900 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2901 new_block
->host
= mmap((void*)0x1000000, size
,
2902 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2903 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2905 new_block
->host
= qemu_vmalloc(size
);
2907 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2911 new_block
->offset
= find_ram_offset(size
);
2912 new_block
->length
= size
;
2914 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2916 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
2917 last_ram_offset() >> TARGET_PAGE_BITS
);
2918 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2919 0xff, size
>> TARGET_PAGE_BITS
);
2922 kvm_setup_guest_memory(new_block
->host
, size
);
2924 return new_block
->offset
;
2927 void qemu_ram_unmap(ram_addr_t addr
)
2931 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2932 if (addr
== block
->offset
) {
2933 QLIST_REMOVE(block
, next
);
2940 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
)
2942 return qemu_ram_alloc_from_ptr(dev
, name
, size
, NULL
);
2945 void qemu_ram_free(ram_addr_t addr
)
2949 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2950 if (addr
== block
->offset
) {
2951 QLIST_REMOVE(block
, next
);
2953 #if defined (__linux__) && !defined(TARGET_S390X)
2955 munmap(block
->host
, block
->length
);
2958 qemu_vfree(block
->host
);
2962 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2963 munmap(block
->host
, block
->length
);
2965 qemu_vfree(block
->host
);
2975 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2976 With the exception of the softmmu code in this file, this should
2977 only be used for local memory (e.g. video ram) that the device owns,
2978 and knows it isn't going to access beyond the end of the block.
2980 It should not be used for general purpose DMA.
2981 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2983 void *qemu_get_ram_ptr(ram_addr_t addr
)
2987 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2988 if (addr
- block
->offset
< block
->length
) {
2989 QLIST_REMOVE(block
, next
);
2990 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2991 return block
->host
+ (addr
- block
->offset
);
2995 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3001 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3002 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3004 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3008 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3009 if (addr
- block
->offset
< block
->length
) {
3010 return block
->host
+ (addr
- block
->offset
);
3014 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3020 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3023 uint8_t *host
= ptr
;
3025 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3026 if (host
- block
->host
< block
->length
) {
3027 *ram_addr
= block
->offset
+ (host
- block
->host
);
3034 /* Some of the softmmu routines need to translate from a host pointer
3035 (typically a TLB entry) back to a ram offset. */
3036 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3038 ram_addr_t ram_addr
;
3040 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3041 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3047 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3049 #ifdef DEBUG_UNASSIGNED
3050 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3052 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3053 do_unassigned_access(addr
, 0, 0, 0, 1);
3058 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3060 #ifdef DEBUG_UNASSIGNED
3061 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3063 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3064 do_unassigned_access(addr
, 0, 0, 0, 2);
3069 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3071 #ifdef DEBUG_UNASSIGNED
3072 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3074 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3075 do_unassigned_access(addr
, 0, 0, 0, 4);
3080 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3082 #ifdef DEBUG_UNASSIGNED
3083 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3085 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3086 do_unassigned_access(addr
, 1, 0, 0, 1);
3090 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3092 #ifdef DEBUG_UNASSIGNED
3093 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3095 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3096 do_unassigned_access(addr
, 1, 0, 0, 2);
3100 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3102 #ifdef DEBUG_UNASSIGNED
3103 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3105 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3106 do_unassigned_access(addr
, 1, 0, 0, 4);
3110 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3111 unassigned_mem_readb
,
3112 unassigned_mem_readw
,
3113 unassigned_mem_readl
,
3116 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3117 unassigned_mem_writeb
,
3118 unassigned_mem_writew
,
3119 unassigned_mem_writel
,
3122 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3126 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3127 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3128 #if !defined(CONFIG_USER_ONLY)
3129 tb_invalidate_phys_page_fast(ram_addr
, 1);
3130 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3133 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3134 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3135 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3136 /* we remove the notdirty callback only if the code has been
3138 if (dirty_flags
== 0xff)
3139 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3142 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3146 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3147 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3148 #if !defined(CONFIG_USER_ONLY)
3149 tb_invalidate_phys_page_fast(ram_addr
, 2);
3150 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3153 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3154 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3155 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3156 /* we remove the notdirty callback only if the code has been
3158 if (dirty_flags
== 0xff)
3159 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3162 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3166 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3167 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3168 #if !defined(CONFIG_USER_ONLY)
3169 tb_invalidate_phys_page_fast(ram_addr
, 4);
3170 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3173 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3174 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3175 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3176 /* we remove the notdirty callback only if the code has been
3178 if (dirty_flags
== 0xff)
3179 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3182 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3183 NULL
, /* never used */
3184 NULL
, /* never used */
3185 NULL
, /* never used */
3188 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3189 notdirty_mem_writeb
,
3190 notdirty_mem_writew
,
3191 notdirty_mem_writel
,
3194 /* Generate a debug exception if a watchpoint has been hit. */
3195 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3197 CPUState
*env
= cpu_single_env
;
3198 target_ulong pc
, cs_base
;
3199 TranslationBlock
*tb
;
3204 if (env
->watchpoint_hit
) {
3205 /* We re-entered the check after replacing the TB. Now raise
3206 * the debug interrupt so that is will trigger after the
3207 * current instruction. */
3208 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3211 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3212 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3213 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3214 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3215 wp
->flags
|= BP_WATCHPOINT_HIT
;
3216 if (!env
->watchpoint_hit
) {
3217 env
->watchpoint_hit
= wp
;
3218 tb
= tb_find_pc(env
->mem_io_pc
);
3220 cpu_abort(env
, "check_watchpoint: could not find TB for "
3221 "pc=%p", (void *)env
->mem_io_pc
);
3223 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
3224 tb_phys_invalidate(tb
, -1);
3225 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3226 env
->exception_index
= EXCP_DEBUG
;
3228 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3229 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3231 cpu_resume_from_signal(env
, NULL
);
3234 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3239 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3240 so these check for a hit then pass through to the normal out-of-line
3242 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3244 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3245 return ldub_phys(addr
);
3248 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3250 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3251 return lduw_phys(addr
);
3254 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3256 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3257 return ldl_phys(addr
);
3260 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3263 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3264 stb_phys(addr
, val
);
3267 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3270 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3271 stw_phys(addr
, val
);
3274 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3277 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3278 stl_phys(addr
, val
);
3281 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3287 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3293 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3294 target_phys_addr_t addr
,
3297 unsigned int idx
= SUBPAGE_IDX(addr
);
3298 #if defined(DEBUG_SUBPAGE)
3299 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3300 mmio
, len
, addr
, idx
);
3303 addr
+= mmio
->region_offset
[idx
];
3304 idx
= mmio
->sub_io_index
[idx
];
3305 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3308 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3309 uint32_t value
, unsigned int len
)
3311 unsigned int idx
= SUBPAGE_IDX(addr
);
3312 #if defined(DEBUG_SUBPAGE)
3313 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3314 __func__
, mmio
, len
, addr
, idx
, value
);
3317 addr
+= mmio
->region_offset
[idx
];
3318 idx
= mmio
->sub_io_index
[idx
];
3319 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3322 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3324 return subpage_readlen(opaque
, addr
, 0);
3327 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3330 subpage_writelen(opaque
, addr
, value
, 0);
3333 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3335 return subpage_readlen(opaque
, addr
, 1);
3338 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3341 subpage_writelen(opaque
, addr
, value
, 1);
3344 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3346 return subpage_readlen(opaque
, addr
, 2);
3349 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3352 subpage_writelen(opaque
, addr
, value
, 2);
3355 static CPUReadMemoryFunc
* const subpage_read
[] = {
3361 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3367 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3368 ram_addr_t memory
, ram_addr_t region_offset
)
3372 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3374 idx
= SUBPAGE_IDX(start
);
3375 eidx
= SUBPAGE_IDX(end
);
3376 #if defined(DEBUG_SUBPAGE)
3377 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3378 mmio
, start
, end
, idx
, eidx
, memory
);
3380 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
3381 memory
= IO_MEM_UNASSIGNED
;
3382 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3383 for (; idx
<= eidx
; idx
++) {
3384 mmio
->sub_io_index
[idx
] = memory
;
3385 mmio
->region_offset
[idx
] = region_offset
;
3391 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3392 ram_addr_t orig_memory
,
3393 ram_addr_t region_offset
)
3398 mmio
= qemu_mallocz(sizeof(subpage_t
));
3401 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
,
3402 DEVICE_NATIVE_ENDIAN
);
3403 #if defined(DEBUG_SUBPAGE)
3404 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3405 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3407 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3408 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3413 static int get_free_io_mem_idx(void)
3417 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3418 if (!io_mem_used
[i
]) {
3422 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3427 * Usually, devices operate in little endian mode. There are devices out
3428 * there that operate in big endian too. Each device gets byte swapped
3429 * mmio if plugged onto a CPU that does the other endianness.
3439 typedef struct SwapEndianContainer
{
3440 CPUReadMemoryFunc
*read
[3];
3441 CPUWriteMemoryFunc
*write
[3];
3443 } SwapEndianContainer
;
3445 static uint32_t swapendian_mem_readb (void *opaque
, target_phys_addr_t addr
)
3448 SwapEndianContainer
*c
= opaque
;
3449 val
= c
->read
[0](c
->opaque
, addr
);
3453 static uint32_t swapendian_mem_readw(void *opaque
, target_phys_addr_t addr
)
3456 SwapEndianContainer
*c
= opaque
;
3457 val
= bswap16(c
->read
[1](c
->opaque
, addr
));
3461 static uint32_t swapendian_mem_readl(void *opaque
, target_phys_addr_t addr
)
3464 SwapEndianContainer
*c
= opaque
;
3465 val
= bswap32(c
->read
[2](c
->opaque
, addr
));
3469 static CPUReadMemoryFunc
* const swapendian_readfn
[3]={
3470 swapendian_mem_readb
,
3471 swapendian_mem_readw
,
3472 swapendian_mem_readl
3475 static void swapendian_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3478 SwapEndianContainer
*c
= opaque
;
3479 c
->write
[0](c
->opaque
, addr
, val
);
3482 static void swapendian_mem_writew(void *opaque
, target_phys_addr_t addr
,
3485 SwapEndianContainer
*c
= opaque
;
3486 c
->write
[1](c
->opaque
, addr
, bswap16(val
));
3489 static void swapendian_mem_writel(void *opaque
, target_phys_addr_t addr
,
3492 SwapEndianContainer
*c
= opaque
;
3493 c
->write
[2](c
->opaque
, addr
, bswap32(val
));
3496 static CPUWriteMemoryFunc
* const swapendian_writefn
[3]={
3497 swapendian_mem_writeb
,
3498 swapendian_mem_writew
,
3499 swapendian_mem_writel
3502 static void swapendian_init(int io_index
)
3504 SwapEndianContainer
*c
= qemu_malloc(sizeof(SwapEndianContainer
));
3507 /* Swap mmio for big endian targets */
3508 c
->opaque
= io_mem_opaque
[io_index
];
3509 for (i
= 0; i
< 3; i
++) {
3510 c
->read
[i
] = io_mem_read
[io_index
][i
];
3511 c
->write
[i
] = io_mem_write
[io_index
][i
];
3513 io_mem_read
[io_index
][i
] = swapendian_readfn
[i
];
3514 io_mem_write
[io_index
][i
] = swapendian_writefn
[i
];
3516 io_mem_opaque
[io_index
] = c
;
3519 static void swapendian_del(int io_index
)
3521 if (io_mem_read
[io_index
][0] == swapendian_readfn
[0]) {
3522 qemu_free(io_mem_opaque
[io_index
]);
3526 /* mem_read and mem_write are arrays of functions containing the
3527 function to access byte (index 0), word (index 1) and dword (index
3528 2). Functions can be omitted with a NULL function pointer.
3529 If io_index is non zero, the corresponding io zone is
3530 modified. If it is zero, a new io zone is allocated. The return
3531 value can be used with cpu_register_physical_memory(). (-1) is
3532 returned if error. */
3533 static int cpu_register_io_memory_fixed(int io_index
,
3534 CPUReadMemoryFunc
* const *mem_read
,
3535 CPUWriteMemoryFunc
* const *mem_write
,
3536 void *opaque
, enum device_endian endian
)
3540 if (io_index
<= 0) {
3541 io_index
= get_free_io_mem_idx();
3545 io_index
>>= IO_MEM_SHIFT
;
3546 if (io_index
>= IO_MEM_NB_ENTRIES
)
3550 for (i
= 0; i
< 3; ++i
) {
3551 io_mem_read
[io_index
][i
]
3552 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3554 for (i
= 0; i
< 3; ++i
) {
3555 io_mem_write
[io_index
][i
]
3556 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3558 io_mem_opaque
[io_index
] = opaque
;
3561 case DEVICE_BIG_ENDIAN
:
3562 #ifndef TARGET_WORDS_BIGENDIAN
3563 swapendian_init(io_index
);
3566 case DEVICE_LITTLE_ENDIAN
:
3567 #ifdef TARGET_WORDS_BIGENDIAN
3568 swapendian_init(io_index
);
3571 case DEVICE_NATIVE_ENDIAN
:
3576 return (io_index
<< IO_MEM_SHIFT
);
3579 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3580 CPUWriteMemoryFunc
* const *mem_write
,
3581 void *opaque
, enum device_endian endian
)
3583 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
, endian
);
3586 void cpu_unregister_io_memory(int io_table_address
)
3589 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3591 swapendian_del(io_index
);
3593 for (i
=0;i
< 3; i
++) {
3594 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3595 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3597 io_mem_opaque
[io_index
] = NULL
;
3598 io_mem_used
[io_index
] = 0;
3601 static void io_mem_init(void)
3605 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3606 unassigned_mem_write
, NULL
,
3607 DEVICE_NATIVE_ENDIAN
);
3608 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3609 unassigned_mem_write
, NULL
,
3610 DEVICE_NATIVE_ENDIAN
);
3611 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3612 notdirty_mem_write
, NULL
,
3613 DEVICE_NATIVE_ENDIAN
);
3617 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3618 watch_mem_write
, NULL
,
3619 DEVICE_NATIVE_ENDIAN
);
3622 #endif /* !defined(CONFIG_USER_ONLY) */
3624 /* physical memory access (slow version, mainly for debug) */
3625 #if defined(CONFIG_USER_ONLY)
3626 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3627 uint8_t *buf
, int len
, int is_write
)
3634 page
= addr
& TARGET_PAGE_MASK
;
3635 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3638 flags
= page_get_flags(page
);
3639 if (!(flags
& PAGE_VALID
))
3642 if (!(flags
& PAGE_WRITE
))
3644 /* XXX: this code should not depend on lock_user */
3645 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3648 unlock_user(p
, addr
, l
);
3650 if (!(flags
& PAGE_READ
))
3652 /* XXX: this code should not depend on lock_user */
3653 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3656 unlock_user(p
, addr
, 0);
3666 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3667 int len
, int is_write
)
3672 target_phys_addr_t page
;
3677 page
= addr
& TARGET_PAGE_MASK
;
3678 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3681 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3683 pd
= IO_MEM_UNASSIGNED
;
3685 pd
= p
->phys_offset
;
3689 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3690 target_phys_addr_t addr1
= addr
;
3691 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3693 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3694 /* XXX: could force cpu_single_env to NULL to avoid
3696 if (l
>= 4 && ((addr1
& 3) == 0)) {
3697 /* 32 bit write access */
3699 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3701 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3702 /* 16 bit write access */
3704 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3707 /* 8 bit write access */
3709 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3713 unsigned long addr1
;
3714 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3716 ptr
= qemu_get_ram_ptr(addr1
);
3717 memcpy(ptr
, buf
, l
);
3718 if (!cpu_physical_memory_is_dirty(addr1
)) {
3719 /* invalidate code */
3720 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3722 cpu_physical_memory_set_dirty_flags(
3723 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3725 /* qemu doesn't execute guest code directly, but kvm does
3726 therefore flush instruction caches */
3728 flush_icache_range((unsigned long)ptr
,
3729 ((unsigned long)ptr
)+l
);
3732 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3733 !(pd
& IO_MEM_ROMD
)) {
3734 target_phys_addr_t addr1
= addr
;
3736 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3738 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3739 if (l
>= 4 && ((addr1
& 3) == 0)) {
3740 /* 32 bit read access */
3741 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3744 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3745 /* 16 bit read access */
3746 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3750 /* 8 bit read access */
3751 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3757 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3758 (addr
& ~TARGET_PAGE_MASK
);
3759 memcpy(buf
, ptr
, l
);
3768 /* used for ROM loading : can write in RAM and ROM */
3769 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3770 const uint8_t *buf
, int len
)
3774 target_phys_addr_t page
;
3779 page
= addr
& TARGET_PAGE_MASK
;
3780 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3783 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3785 pd
= IO_MEM_UNASSIGNED
;
3787 pd
= p
->phys_offset
;
3790 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3791 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3792 !(pd
& IO_MEM_ROMD
)) {
3795 unsigned long addr1
;
3796 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3798 ptr
= qemu_get_ram_ptr(addr1
);
3799 memcpy(ptr
, buf
, l
);
3809 target_phys_addr_t addr
;
3810 target_phys_addr_t len
;
3813 static BounceBuffer bounce
;
3815 typedef struct MapClient
{
3817 void (*callback
)(void *opaque
);
3818 QLIST_ENTRY(MapClient
) link
;
3821 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3822 = QLIST_HEAD_INITIALIZER(map_client_list
);
3824 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3826 MapClient
*client
= qemu_malloc(sizeof(*client
));
3828 client
->opaque
= opaque
;
3829 client
->callback
= callback
;
3830 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3834 void cpu_unregister_map_client(void *_client
)
3836 MapClient
*client
= (MapClient
*)_client
;
3838 QLIST_REMOVE(client
, link
);
3842 static void cpu_notify_map_clients(void)
3846 while (!QLIST_EMPTY(&map_client_list
)) {
3847 client
= QLIST_FIRST(&map_client_list
);
3848 client
->callback(client
->opaque
);
3849 cpu_unregister_map_client(client
);
3853 /* Map a physical memory region into a host virtual address.
3854 * May map a subset of the requested range, given by and returned in *plen.
3855 * May return NULL if resources needed to perform the mapping are exhausted.
3856 * Use only for reads OR writes - not for read-modify-write operations.
3857 * Use cpu_register_map_client() to know when retrying the map operation is
3858 * likely to succeed.
3860 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3861 target_phys_addr_t
*plen
,
3864 target_phys_addr_t len
= *plen
;
3865 target_phys_addr_t done
= 0;
3867 uint8_t *ret
= NULL
;
3869 target_phys_addr_t page
;
3872 unsigned long addr1
;
3875 page
= addr
& TARGET_PAGE_MASK
;
3876 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3879 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3881 pd
= IO_MEM_UNASSIGNED
;
3883 pd
= p
->phys_offset
;
3886 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3887 if (done
|| bounce
.buffer
) {
3890 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3894 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3896 ptr
= bounce
.buffer
;
3898 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3899 ptr
= qemu_get_ram_ptr(addr1
);
3903 } else if (ret
+ done
!= ptr
) {
3915 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3916 * Will also mark the memory as dirty if is_write == 1. access_len gives
3917 * the amount of memory that was actually read or written by the caller.
3919 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3920 int is_write
, target_phys_addr_t access_len
)
3922 unsigned long flush_len
= (unsigned long)access_len
;
3924 if (buffer
!= bounce
.buffer
) {
3926 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3927 while (access_len
) {
3929 l
= TARGET_PAGE_SIZE
;
3932 if (!cpu_physical_memory_is_dirty(addr1
)) {
3933 /* invalidate code */
3934 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3936 cpu_physical_memory_set_dirty_flags(
3937 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3942 dma_flush_range((unsigned long)buffer
,
3943 (unsigned long)buffer
+ flush_len
);
3948 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3950 qemu_vfree(bounce
.buffer
);
3951 bounce
.buffer
= NULL
;
3952 cpu_notify_map_clients();
3955 /* warning: addr must be aligned */
3956 uint32_t ldl_phys(target_phys_addr_t addr
)
3964 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3966 pd
= IO_MEM_UNASSIGNED
;
3968 pd
= p
->phys_offset
;
3971 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3972 !(pd
& IO_MEM_ROMD
)) {
3974 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3976 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3977 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3980 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3981 (addr
& ~TARGET_PAGE_MASK
);
3987 /* warning: addr must be aligned */
3988 uint64_t ldq_phys(target_phys_addr_t addr
)
3996 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3998 pd
= IO_MEM_UNASSIGNED
;
4000 pd
= p
->phys_offset
;
4003 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4004 !(pd
& IO_MEM_ROMD
)) {
4006 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4008 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4009 #ifdef TARGET_WORDS_BIGENDIAN
4010 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4011 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4013 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4014 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4018 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4019 (addr
& ~TARGET_PAGE_MASK
);
4026 uint32_t ldub_phys(target_phys_addr_t addr
)
4029 cpu_physical_memory_read(addr
, &val
, 1);
4033 /* warning: addr must be aligned */
4034 uint32_t lduw_phys(target_phys_addr_t addr
)
4042 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4044 pd
= IO_MEM_UNASSIGNED
;
4046 pd
= p
->phys_offset
;
4049 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4050 !(pd
& IO_MEM_ROMD
)) {
4052 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4054 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4055 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4058 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4059 (addr
& ~TARGET_PAGE_MASK
);
4065 /* warning: addr must be aligned. The ram page is not masked as dirty
4066 and the code inside is not invalidated. It is useful if the dirty
4067 bits are used to track modified PTEs */
4068 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4075 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4077 pd
= IO_MEM_UNASSIGNED
;
4079 pd
= p
->phys_offset
;
4082 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4083 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4085 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4086 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4088 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4089 ptr
= qemu_get_ram_ptr(addr1
);
4092 if (unlikely(in_migration
)) {
4093 if (!cpu_physical_memory_is_dirty(addr1
)) {
4094 /* invalidate code */
4095 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4097 cpu_physical_memory_set_dirty_flags(
4098 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4104 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4111 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4113 pd
= IO_MEM_UNASSIGNED
;
4115 pd
= p
->phys_offset
;
4118 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4119 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4121 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4122 #ifdef TARGET_WORDS_BIGENDIAN
4123 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4124 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4126 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4127 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4130 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4131 (addr
& ~TARGET_PAGE_MASK
);
4136 /* warning: addr must be aligned */
4137 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4144 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4146 pd
= IO_MEM_UNASSIGNED
;
4148 pd
= p
->phys_offset
;
4151 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4152 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4154 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4155 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4157 unsigned long addr1
;
4158 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4160 ptr
= qemu_get_ram_ptr(addr1
);
4162 if (!cpu_physical_memory_is_dirty(addr1
)) {
4163 /* invalidate code */
4164 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4166 cpu_physical_memory_set_dirty_flags(addr1
,
4167 (0xff & ~CODE_DIRTY_FLAG
));
4173 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4176 cpu_physical_memory_write(addr
, &v
, 1);
4179 /* warning: addr must be aligned */
4180 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4187 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4189 pd
= IO_MEM_UNASSIGNED
;
4191 pd
= p
->phys_offset
;
4194 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4195 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4197 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4198 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4200 unsigned long addr1
;
4201 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4203 ptr
= qemu_get_ram_ptr(addr1
);
4205 if (!cpu_physical_memory_is_dirty(addr1
)) {
4206 /* invalidate code */
4207 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4209 cpu_physical_memory_set_dirty_flags(addr1
,
4210 (0xff & ~CODE_DIRTY_FLAG
));
4216 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4219 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
4222 /* virtual memory access for debug (includes writing to ROM) */
4223 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4224 uint8_t *buf
, int len
, int is_write
)
4227 target_phys_addr_t phys_addr
;
4231 page
= addr
& TARGET_PAGE_MASK
;
4232 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4233 /* if no physical page mapped, return an error */
4234 if (phys_addr
== -1)
4236 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4239 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4241 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4243 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4252 /* in deterministic execution mode, instructions doing device I/Os
4253 must be at the end of the TB */
4254 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4256 TranslationBlock
*tb
;
4258 target_ulong pc
, cs_base
;
4261 tb
= tb_find_pc((unsigned long)retaddr
);
4263 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4266 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4267 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
4268 /* Calculate how many instructions had been executed before the fault
4270 n
= n
- env
->icount_decr
.u16
.low
;
4271 /* Generate a new TB ending on the I/O insn. */
4273 /* On MIPS and SH, delay slot instructions can only be restarted if
4274 they were already the first instruction in the TB. If this is not
4275 the first instruction in a TB then re-execute the preceding
4277 #if defined(TARGET_MIPS)
4278 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4279 env
->active_tc
.PC
-= 4;
4280 env
->icount_decr
.u16
.low
++;
4281 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4283 #elif defined(TARGET_SH4)
4284 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4287 env
->icount_decr
.u16
.low
++;
4288 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4291 /* This should never happen. */
4292 if (n
> CF_COUNT_MASK
)
4293 cpu_abort(env
, "TB too big during recompile");
4295 cflags
= n
| CF_LAST_IO
;
4297 cs_base
= tb
->cs_base
;
4299 tb_phys_invalidate(tb
, -1);
4300 /* FIXME: In theory this could raise an exception. In practice
4301 we have already translated the block once so it's probably ok. */
4302 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4303 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4304 the first in the TB) then we end up generating a whole new TB and
4305 repeating the fault, which is horribly inefficient.
4306 Better would be to execute just this insn uncached, or generate a
4308 cpu_resume_from_signal(env
, NULL
);
4311 #if !defined(CONFIG_USER_ONLY)
4313 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4315 int i
, target_code_size
, max_target_code_size
;
4316 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4317 TranslationBlock
*tb
;
4319 target_code_size
= 0;
4320 max_target_code_size
= 0;
4322 direct_jmp_count
= 0;
4323 direct_jmp2_count
= 0;
4324 for(i
= 0; i
< nb_tbs
; i
++) {
4326 target_code_size
+= tb
->size
;
4327 if (tb
->size
> max_target_code_size
)
4328 max_target_code_size
= tb
->size
;
4329 if (tb
->page_addr
[1] != -1)
4331 if (tb
->tb_next_offset
[0] != 0xffff) {
4333 if (tb
->tb_next_offset
[1] != 0xffff) {
4334 direct_jmp2_count
++;
4338 /* XXX: avoid using doubles ? */
4339 cpu_fprintf(f
, "Translation buffer state:\n");
4340 cpu_fprintf(f
, "gen code size %td/%ld\n",
4341 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4342 cpu_fprintf(f
, "TB count %d/%d\n",
4343 nb_tbs
, code_gen_max_blocks
);
4344 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4345 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4346 max_target_code_size
);
4347 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4348 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4349 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4350 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4352 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4353 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4355 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4357 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4358 cpu_fprintf(f
, "\nStatistics:\n");
4359 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4360 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4361 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4362 #ifdef CONFIG_PROFILER
4363 tcg_dump_info(f
, cpu_fprintf
);
4367 #define MMUSUFFIX _cmmu
4368 #define GETPC() NULL
4369 #define env cpu_single_env
4370 #define SOFTMMU_CODE_ACCESS
4373 #include "softmmu_template.h"
4376 #include "softmmu_template.h"
4379 #include "softmmu_template.h"
4382 #include "softmmu_template.h"