2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
43 #if defined(CONFIG_USER_ONLY)
47 //#define DEBUG_TB_INVALIDATE
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 static TranslationBlock
*tbs
;
88 int code_gen_max_blocks
;
89 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
102 #define code_gen_section \
103 __attribute__((aligned (32)))
106 uint8_t code_gen_prologue
[1024] code_gen_section
;
107 static uint8_t *code_gen_buffer
;
108 static unsigned long code_gen_buffer_size
;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size
;
111 uint8_t *code_gen_ptr
;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size
;
116 uint8_t *phys_ram_base
;
117 uint8_t *phys_ram_dirty
;
118 static int in_migration
;
119 static ram_addr_t phys_ram_alloc_offset
= 0;
123 /* current CPU in the current thread. It is only valid inside
125 CPUState
*cpu_single_env
;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 typedef struct PhysPageDesc
{
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset
;
152 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153 /* XXX: this is a temporary hack for alpha target.
154 * In the future, this is to be replaced by a multi-level table
155 * to actually be able to handle the complete 64 bits address space.
157 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
162 #define L1_SIZE (1 << L1_BITS)
163 #define L2_SIZE (1 << L2_BITS)
165 unsigned long qemu_real_host_page_size
;
166 unsigned long qemu_host_page_bits
;
167 unsigned long qemu_host_page_size
;
168 unsigned long qemu_host_page_mask
;
170 /* XXX: for system emulation, it could just be an array */
171 static PageDesc
*l1_map
[L1_SIZE
];
172 static PhysPageDesc
**l1_phys_map
;
174 #if !defined(CONFIG_USER_ONLY)
175 static void io_mem_init(void);
177 /* io memory support */
178 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
179 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
180 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
181 static int io_mem_nb
;
182 static int io_mem_watch
;
186 static const char *logfilename
= "/tmp/qemu.log";
189 static int log_append
= 0;
192 static int tlb_flush_count
;
193 static int tb_flush_count
;
194 static int tb_phys_invalidate_count
;
196 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197 typedef struct subpage_t
{
198 target_phys_addr_t base
;
199 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
200 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
201 void *opaque
[TARGET_PAGE_SIZE
][2][4];
205 static void map_exec(void *addr
, long size
)
208 VirtualProtect(addr
, size
,
209 PAGE_EXECUTE_READWRITE
, &old_protect
);
213 static void map_exec(void *addr
, long size
)
215 unsigned long start
, end
, page_size
;
217 page_size
= getpagesize();
218 start
= (unsigned long)addr
;
219 start
&= ~(page_size
- 1);
221 end
= (unsigned long)addr
+ size
;
222 end
+= page_size
- 1;
223 end
&= ~(page_size
- 1);
225 mprotect((void *)start
, end
- start
,
226 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
230 static void page_init(void)
232 /* NOTE: we can always suppose that qemu_host_page_size >=
236 SYSTEM_INFO system_info
;
238 GetSystemInfo(&system_info
);
239 qemu_real_host_page_size
= system_info
.dwPageSize
;
242 qemu_real_host_page_size
= getpagesize();
244 if (qemu_host_page_size
== 0)
245 qemu_host_page_size
= qemu_real_host_page_size
;
246 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
247 qemu_host_page_size
= TARGET_PAGE_SIZE
;
248 qemu_host_page_bits
= 0;
249 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
250 qemu_host_page_bits
++;
251 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
252 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
253 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
255 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
257 long long startaddr
, endaddr
;
262 last_brk
= (unsigned long)sbrk(0);
263 f
= fopen("/proc/self/maps", "r");
266 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
268 startaddr
= MIN(startaddr
,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
270 endaddr
= MIN(endaddr
,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
272 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
273 TARGET_PAGE_ALIGN(endaddr
),
284 static inline PageDesc
**page_l1_map(target_ulong index
)
286 #if TARGET_LONG_BITS > 32
287 /* Host memory outside guest VM. For 32-bit targets we have already
288 excluded high addresses. */
289 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
292 return &l1_map
[index
>> L2_BITS
];
295 static inline PageDesc
*page_find_alloc(target_ulong index
)
298 lp
= page_l1_map(index
);
304 /* allocate if not found */
305 #if defined(CONFIG_USER_ONLY)
307 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
308 /* Don't use qemu_malloc because it may recurse. */
309 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
310 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
313 if (addr
== (target_ulong
)addr
) {
314 page_set_flags(addr
& TARGET_PAGE_MASK
,
315 TARGET_PAGE_ALIGN(addr
+ len
),
319 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
323 return p
+ (index
& (L2_SIZE
- 1));
326 static inline PageDesc
*page_find(target_ulong index
)
329 lp
= page_l1_map(index
);
336 return p
+ (index
& (L2_SIZE
- 1));
339 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
344 p
= (void **)l1_phys_map
;
345 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
347 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
348 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
353 /* allocate if not found */
356 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
357 memset(p
, 0, sizeof(void *) * L1_SIZE
);
361 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
365 /* allocate if not found */
368 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
370 for (i
= 0; i
< L2_SIZE
; i
++)
371 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
373 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
376 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
378 return phys_page_find_alloc(index
, 0);
381 #if !defined(CONFIG_USER_ONLY)
382 static void tlb_protect_code(ram_addr_t ram_addr
);
383 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
385 #define mmap_lock() do { } while(0)
386 #define mmap_unlock() do { } while(0)
389 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
391 #if defined(CONFIG_USER_ONLY)
392 /* Currently it is not recommanded to allocate big chunks of data in
393 user mode. It will change when a dedicated libc will be used */
394 #define USE_STATIC_CODE_GEN_BUFFER
397 #ifdef USE_STATIC_CODE_GEN_BUFFER
398 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
401 static void code_gen_alloc(unsigned long tb_size
)
403 #ifdef USE_STATIC_CODE_GEN_BUFFER
404 code_gen_buffer
= static_code_gen_buffer
;
405 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
406 map_exec(code_gen_buffer
, code_gen_buffer_size
);
408 code_gen_buffer_size
= tb_size
;
409 if (code_gen_buffer_size
== 0) {
410 #if defined(CONFIG_USER_ONLY)
411 /* in user mode, phys_ram_size is not meaningful */
412 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
414 /* XXX: needs ajustments */
415 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
418 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
419 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
420 /* The code gen buffer location may have constraints depending on
421 the host cpu and OS */
422 #if defined(__linux__)
427 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
428 #if defined(__x86_64__)
430 /* Cannot map more than that */
431 if (code_gen_buffer_size
> (800 * 1024 * 1024))
432 code_gen_buffer_size
= (800 * 1024 * 1024);
433 #elif defined(__sparc_v9__)
434 // Map the buffer below 2G, so we can use direct calls and branches
436 start
= (void *) 0x60000000UL
;
437 if (code_gen_buffer_size
> (512 * 1024 * 1024))
438 code_gen_buffer_size
= (512 * 1024 * 1024);
440 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
441 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
443 if (code_gen_buffer
== MAP_FAILED
) {
444 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
448 #elif defined(__FreeBSD__)
452 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
453 #if defined(__x86_64__)
454 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
455 * 0x40000000 is free */
457 addr
= (void *)0x40000000;
458 /* Cannot map more than that */
459 if (code_gen_buffer_size
> (800 * 1024 * 1024))
460 code_gen_buffer_size
= (800 * 1024 * 1024);
462 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
463 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
465 if (code_gen_buffer
== MAP_FAILED
) {
466 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
471 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
472 if (!code_gen_buffer
) {
473 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
476 map_exec(code_gen_buffer
, code_gen_buffer_size
);
478 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
479 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
480 code_gen_buffer_max_size
= code_gen_buffer_size
-
481 code_gen_max_block_size();
482 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
483 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
486 /* Must be called before using the QEMU cpus. 'tb_size' is the size
487 (in bytes) allocated to the translation buffer. Zero means default
489 void cpu_exec_init_all(unsigned long tb_size
)
492 code_gen_alloc(tb_size
);
493 code_gen_ptr
= code_gen_buffer
;
495 #if !defined(CONFIG_USER_ONLY)
500 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502 #define CPU_COMMON_SAVE_VERSION 1
504 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
506 CPUState
*env
= opaque
;
508 qemu_put_be32s(f
, &env
->halted
);
509 qemu_put_be32s(f
, &env
->interrupt_request
);
512 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
514 CPUState
*env
= opaque
;
516 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
519 qemu_get_be32s(f
, &env
->halted
);
520 qemu_get_be32s(f
, &env
->interrupt_request
);
527 void cpu_exec_init(CPUState
*env
)
532 env
->next_cpu
= NULL
;
535 while (*penv
!= NULL
) {
536 penv
= (CPUState
**)&(*penv
)->next_cpu
;
539 env
->cpu_index
= cpu_index
;
540 TAILQ_INIT(&env
->breakpoints
);
541 TAILQ_INIT(&env
->watchpoints
);
543 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
544 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
545 cpu_common_save
, cpu_common_load
, env
);
546 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
547 cpu_save
, cpu_load
, env
);
551 static inline void invalidate_page_bitmap(PageDesc
*p
)
553 if (p
->code_bitmap
) {
554 qemu_free(p
->code_bitmap
);
555 p
->code_bitmap
= NULL
;
557 p
->code_write_count
= 0;
560 /* set to NULL all the 'first_tb' fields in all PageDescs */
561 static void page_flush_tb(void)
566 for(i
= 0; i
< L1_SIZE
; i
++) {
569 for(j
= 0; j
< L2_SIZE
; j
++) {
571 invalidate_page_bitmap(p
);
578 /* flush all the translation blocks */
579 /* XXX: tb_flush is currently not thread safe */
580 void tb_flush(CPUState
*env1
)
583 #if defined(DEBUG_FLUSH)
584 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
585 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
587 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
589 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
590 cpu_abort(env1
, "Internal error: code buffer overflow\n");
594 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
595 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
598 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
601 code_gen_ptr
= code_gen_buffer
;
602 /* XXX: flush processor icache at this point if cache flush is
607 #ifdef DEBUG_TB_CHECK
609 static void tb_invalidate_check(target_ulong address
)
611 TranslationBlock
*tb
;
613 address
&= TARGET_PAGE_MASK
;
614 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
615 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
616 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
617 address
>= tb
->pc
+ tb
->size
)) {
618 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
619 address
, (long)tb
->pc
, tb
->size
);
625 /* verify that all the pages have correct rights for code */
626 static void tb_page_check(void)
628 TranslationBlock
*tb
;
629 int i
, flags1
, flags2
;
631 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
632 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
633 flags1
= page_get_flags(tb
->pc
);
634 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
635 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
636 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
637 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
643 static void tb_jmp_check(TranslationBlock
*tb
)
645 TranslationBlock
*tb1
;
648 /* suppress any remaining jumps to this TB */
652 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
655 tb1
= tb1
->jmp_next
[n1
];
657 /* check end of list */
659 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
665 /* invalidate one TB */
666 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
669 TranslationBlock
*tb1
;
673 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
676 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
680 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
682 TranslationBlock
*tb1
;
688 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
690 *ptb
= tb1
->page_next
[n1
];
693 ptb
= &tb1
->page_next
[n1
];
697 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
699 TranslationBlock
*tb1
, **ptb
;
702 ptb
= &tb
->jmp_next
[n
];
705 /* find tb(n) in circular list */
709 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
710 if (n1
== n
&& tb1
== tb
)
713 ptb
= &tb1
->jmp_first
;
715 ptb
= &tb1
->jmp_next
[n1
];
718 /* now we can suppress tb(n) from the list */
719 *ptb
= tb
->jmp_next
[n
];
721 tb
->jmp_next
[n
] = NULL
;
725 /* reset the jump entry 'n' of a TB so that it is not chained to
727 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
729 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
732 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
737 target_phys_addr_t phys_pc
;
738 TranslationBlock
*tb1
, *tb2
;
740 /* remove the TB from the hash list */
741 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
742 h
= tb_phys_hash_func(phys_pc
);
743 tb_remove(&tb_phys_hash
[h
], tb
,
744 offsetof(TranslationBlock
, phys_hash_next
));
746 /* remove the TB from the page list */
747 if (tb
->page_addr
[0] != page_addr
) {
748 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
749 tb_page_remove(&p
->first_tb
, tb
);
750 invalidate_page_bitmap(p
);
752 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
753 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
754 tb_page_remove(&p
->first_tb
, tb
);
755 invalidate_page_bitmap(p
);
758 tb_invalidated_flag
= 1;
760 /* remove the TB from the hash list */
761 h
= tb_jmp_cache_hash_func(tb
->pc
);
762 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
763 if (env
->tb_jmp_cache
[h
] == tb
)
764 env
->tb_jmp_cache
[h
] = NULL
;
767 /* suppress this TB from the two jump lists */
768 tb_jmp_remove(tb
, 0);
769 tb_jmp_remove(tb
, 1);
771 /* suppress any remaining jumps to this TB */
777 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
778 tb2
= tb1
->jmp_next
[n1
];
779 tb_reset_jump(tb1
, n1
);
780 tb1
->jmp_next
[n1
] = NULL
;
783 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
785 tb_phys_invalidate_count
++;
788 static inline void set_bits(uint8_t *tab
, int start
, int len
)
794 mask
= 0xff << (start
& 7);
795 if ((start
& ~7) == (end
& ~7)) {
797 mask
&= ~(0xff << (end
& 7));
802 start
= (start
+ 8) & ~7;
804 while (start
< end1
) {
809 mask
= ~(0xff << (end
& 7));
815 static void build_page_bitmap(PageDesc
*p
)
817 int n
, tb_start
, tb_end
;
818 TranslationBlock
*tb
;
820 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
827 tb
= (TranslationBlock
*)((long)tb
& ~3);
828 /* NOTE: this is subtle as a TB may span two physical pages */
830 /* NOTE: tb_end may be after the end of the page, but
831 it is not a problem */
832 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
833 tb_end
= tb_start
+ tb
->size
;
834 if (tb_end
> TARGET_PAGE_SIZE
)
835 tb_end
= TARGET_PAGE_SIZE
;
838 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
840 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
841 tb
= tb
->page_next
[n
];
845 TranslationBlock
*tb_gen_code(CPUState
*env
,
846 target_ulong pc
, target_ulong cs_base
,
847 int flags
, int cflags
)
849 TranslationBlock
*tb
;
851 target_ulong phys_pc
, phys_page2
, virt_page2
;
854 phys_pc
= get_phys_addr_code(env
, pc
);
857 /* flush must be done */
859 /* cannot fail at this point */
861 /* Don't forget to invalidate previous TB info. */
862 tb_invalidated_flag
= 1;
864 tc_ptr
= code_gen_ptr
;
866 tb
->cs_base
= cs_base
;
869 cpu_gen_code(env
, tb
, &code_gen_size
);
870 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
872 /* check next page if needed */
873 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
875 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
876 phys_page2
= get_phys_addr_code(env
, virt_page2
);
878 tb_link_phys(tb
, phys_pc
, phys_page2
);
882 /* invalidate all TBs which intersect with the target physical page
883 starting in range [start;end[. NOTE: start and end must refer to
884 the same physical page. 'is_cpu_write_access' should be true if called
885 from a real cpu write access: the virtual CPU will exit the current
886 TB if code is modified inside this TB. */
887 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
888 int is_cpu_write_access
)
890 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
891 CPUState
*env
= cpu_single_env
;
892 target_ulong tb_start
, tb_end
;
895 #ifdef TARGET_HAS_PRECISE_SMC
896 int current_tb_not_found
= is_cpu_write_access
;
897 TranslationBlock
*current_tb
= NULL
;
898 int current_tb_modified
= 0;
899 target_ulong current_pc
= 0;
900 target_ulong current_cs_base
= 0;
901 int current_flags
= 0;
902 #endif /* TARGET_HAS_PRECISE_SMC */
904 p
= page_find(start
>> TARGET_PAGE_BITS
);
907 if (!p
->code_bitmap
&&
908 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
909 is_cpu_write_access
) {
910 /* build code bitmap */
911 build_page_bitmap(p
);
914 /* we remove all the TBs in the range [start, end[ */
915 /* XXX: see if in some cases it could be faster to invalidate all the code */
919 tb
= (TranslationBlock
*)((long)tb
& ~3);
920 tb_next
= tb
->page_next
[n
];
921 /* NOTE: this is subtle as a TB may span two physical pages */
923 /* NOTE: tb_end may be after the end of the page, but
924 it is not a problem */
925 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
926 tb_end
= tb_start
+ tb
->size
;
928 tb_start
= tb
->page_addr
[1];
929 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
931 if (!(tb_end
<= start
|| tb_start
>= end
)) {
932 #ifdef TARGET_HAS_PRECISE_SMC
933 if (current_tb_not_found
) {
934 current_tb_not_found
= 0;
936 if (env
->mem_io_pc
) {
937 /* now we have a real cpu fault */
938 current_tb
= tb_find_pc(env
->mem_io_pc
);
941 if (current_tb
== tb
&&
942 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
943 /* If we are modifying the current TB, we must stop
944 its execution. We could be more precise by checking
945 that the modification is after the current PC, but it
946 would require a specialized function to partially
947 restore the CPU state */
949 current_tb_modified
= 1;
950 cpu_restore_state(current_tb
, env
,
951 env
->mem_io_pc
, NULL
);
952 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
955 #endif /* TARGET_HAS_PRECISE_SMC */
956 /* we need to do that to handle the case where a signal
957 occurs while doing tb_phys_invalidate() */
960 saved_tb
= env
->current_tb
;
961 env
->current_tb
= NULL
;
963 tb_phys_invalidate(tb
, -1);
965 env
->current_tb
= saved_tb
;
966 if (env
->interrupt_request
&& env
->current_tb
)
967 cpu_interrupt(env
, env
->interrupt_request
);
972 #if !defined(CONFIG_USER_ONLY)
973 /* if no code remaining, no need to continue to use slow writes */
975 invalidate_page_bitmap(p
);
976 if (is_cpu_write_access
) {
977 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
981 #ifdef TARGET_HAS_PRECISE_SMC
982 if (current_tb_modified
) {
983 /* we generate a block containing just the instruction
984 modifying the memory. It will ensure that it cannot modify
986 env
->current_tb
= NULL
;
987 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
988 cpu_resume_from_signal(env
, NULL
);
993 /* len must be <= 8 and start must be a multiple of len */
994 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1001 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1002 cpu_single_env
->mem_io_vaddr
, len
,
1003 cpu_single_env
->eip
,
1004 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1008 p
= page_find(start
>> TARGET_PAGE_BITS
);
1011 if (p
->code_bitmap
) {
1012 offset
= start
& ~TARGET_PAGE_MASK
;
1013 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1014 if (b
& ((1 << len
) - 1))
1018 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1022 #if !defined(CONFIG_SOFTMMU)
1023 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1024 unsigned long pc
, void *puc
)
1026 TranslationBlock
*tb
;
1029 #ifdef TARGET_HAS_PRECISE_SMC
1030 TranslationBlock
*current_tb
= NULL
;
1031 CPUState
*env
= cpu_single_env
;
1032 int current_tb_modified
= 0;
1033 target_ulong current_pc
= 0;
1034 target_ulong current_cs_base
= 0;
1035 int current_flags
= 0;
1038 addr
&= TARGET_PAGE_MASK
;
1039 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1043 #ifdef TARGET_HAS_PRECISE_SMC
1044 if (tb
&& pc
!= 0) {
1045 current_tb
= tb_find_pc(pc
);
1048 while (tb
!= NULL
) {
1050 tb
= (TranslationBlock
*)((long)tb
& ~3);
1051 #ifdef TARGET_HAS_PRECISE_SMC
1052 if (current_tb
== tb
&&
1053 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1054 /* If we are modifying the current TB, we must stop
1055 its execution. We could be more precise by checking
1056 that the modification is after the current PC, but it
1057 would require a specialized function to partially
1058 restore the CPU state */
1060 current_tb_modified
= 1;
1061 cpu_restore_state(current_tb
, env
, pc
, puc
);
1062 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1065 #endif /* TARGET_HAS_PRECISE_SMC */
1066 tb_phys_invalidate(tb
, addr
);
1067 tb
= tb
->page_next
[n
];
1070 #ifdef TARGET_HAS_PRECISE_SMC
1071 if (current_tb_modified
) {
1072 /* we generate a block containing just the instruction
1073 modifying the memory. It will ensure that it cannot modify
1075 env
->current_tb
= NULL
;
1076 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1077 cpu_resume_from_signal(env
, puc
);
1083 /* add the tb in the target page and protect it if necessary */
1084 static inline void tb_alloc_page(TranslationBlock
*tb
,
1085 unsigned int n
, target_ulong page_addr
)
1088 TranslationBlock
*last_first_tb
;
1090 tb
->page_addr
[n
] = page_addr
;
1091 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1092 tb
->page_next
[n
] = p
->first_tb
;
1093 last_first_tb
= p
->first_tb
;
1094 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1095 invalidate_page_bitmap(p
);
1097 #if defined(TARGET_HAS_SMC) || 1
1099 #if defined(CONFIG_USER_ONLY)
1100 if (p
->flags
& PAGE_WRITE
) {
1105 /* force the host page as non writable (writes will have a
1106 page fault + mprotect overhead) */
1107 page_addr
&= qemu_host_page_mask
;
1109 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1110 addr
+= TARGET_PAGE_SIZE
) {
1112 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1116 p2
->flags
&= ~PAGE_WRITE
;
1117 page_get_flags(addr
);
1119 mprotect(g2h(page_addr
), qemu_host_page_size
,
1120 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1121 #ifdef DEBUG_TB_INVALIDATE
1122 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1127 /* if some code is already present, then the pages are already
1128 protected. So we handle the case where only the first TB is
1129 allocated in a physical page */
1130 if (!last_first_tb
) {
1131 tlb_protect_code(page_addr
);
1135 #endif /* TARGET_HAS_SMC */
1138 /* Allocate a new translation block. Flush the translation buffer if
1139 too many translation blocks or too much generated code. */
1140 TranslationBlock
*tb_alloc(target_ulong pc
)
1142 TranslationBlock
*tb
;
1144 if (nb_tbs
>= code_gen_max_blocks
||
1145 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1147 tb
= &tbs
[nb_tbs
++];
1153 void tb_free(TranslationBlock
*tb
)
1155 /* In practice this is mostly used for single use temporary TB
1156 Ignore the hard cases and just back up if this TB happens to
1157 be the last one generated. */
1158 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1159 code_gen_ptr
= tb
->tc_ptr
;
1164 /* add a new TB and link it to the physical page tables. phys_page2 is
1165 (-1) to indicate that only one page contains the TB. */
1166 void tb_link_phys(TranslationBlock
*tb
,
1167 target_ulong phys_pc
, target_ulong phys_page2
)
1170 TranslationBlock
**ptb
;
1172 /* Grab the mmap lock to stop another thread invalidating this TB
1173 before we are done. */
1175 /* add in the physical hash table */
1176 h
= tb_phys_hash_func(phys_pc
);
1177 ptb
= &tb_phys_hash
[h
];
1178 tb
->phys_hash_next
= *ptb
;
1181 /* add in the page list */
1182 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1183 if (phys_page2
!= -1)
1184 tb_alloc_page(tb
, 1, phys_page2
);
1186 tb
->page_addr
[1] = -1;
1188 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1189 tb
->jmp_next
[0] = NULL
;
1190 tb
->jmp_next
[1] = NULL
;
1192 /* init original jump addresses */
1193 if (tb
->tb_next_offset
[0] != 0xffff)
1194 tb_reset_jump(tb
, 0);
1195 if (tb
->tb_next_offset
[1] != 0xffff)
1196 tb_reset_jump(tb
, 1);
1198 #ifdef DEBUG_TB_CHECK
1204 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1205 tb[1].tc_ptr. Return NULL if not found */
1206 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1208 int m_min
, m_max
, m
;
1210 TranslationBlock
*tb
;
1214 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1215 tc_ptr
>= (unsigned long)code_gen_ptr
)
1217 /* binary search (cf Knuth) */
1220 while (m_min
<= m_max
) {
1221 m
= (m_min
+ m_max
) >> 1;
1223 v
= (unsigned long)tb
->tc_ptr
;
1226 else if (tc_ptr
< v
) {
1235 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1237 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1239 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1242 tb1
= tb
->jmp_next
[n
];
1244 /* find head of list */
1247 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1250 tb1
= tb1
->jmp_next
[n1
];
1252 /* we are now sure now that tb jumps to tb1 */
1255 /* remove tb from the jmp_first list */
1256 ptb
= &tb_next
->jmp_first
;
1260 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1261 if (n1
== n
&& tb1
== tb
)
1263 ptb
= &tb1
->jmp_next
[n1
];
1265 *ptb
= tb
->jmp_next
[n
];
1266 tb
->jmp_next
[n
] = NULL
;
1268 /* suppress the jump to next tb in generated code */
1269 tb_reset_jump(tb
, n
);
1271 /* suppress jumps in the tb on which we could have jumped */
1272 tb_reset_jump_recursive(tb_next
);
1276 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1278 tb_reset_jump_recursive2(tb
, 0);
1279 tb_reset_jump_recursive2(tb
, 1);
1282 #if defined(TARGET_HAS_ICE)
1283 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1285 target_phys_addr_t addr
;
1287 ram_addr_t ram_addr
;
1290 addr
= cpu_get_phys_page_debug(env
, pc
);
1291 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1293 pd
= IO_MEM_UNASSIGNED
;
1295 pd
= p
->phys_offset
;
1297 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1298 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1302 /* Add a watchpoint. */
1303 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1304 int flags
, CPUWatchpoint
**watchpoint
)
1306 target_ulong len_mask
= ~(len
- 1);
1309 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1310 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1311 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1312 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1315 wp
= qemu_malloc(sizeof(*wp
));
1320 wp
->len_mask
= len_mask
;
1323 /* keep all GDB-injected watchpoints in front */
1325 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1327 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1329 tlb_flush_page(env
, addr
);
1336 /* Remove a specific watchpoint. */
1337 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1340 target_ulong len_mask
= ~(len
- 1);
1343 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1344 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1345 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1346 cpu_watchpoint_remove_by_ref(env
, wp
);
1353 /* Remove a specific watchpoint by reference. */
1354 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1356 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1358 tlb_flush_page(env
, watchpoint
->vaddr
);
1360 qemu_free(watchpoint
);
1363 /* Remove all matching watchpoints. */
1364 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1366 CPUWatchpoint
*wp
, *next
;
1368 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1369 if (wp
->flags
& mask
)
1370 cpu_watchpoint_remove_by_ref(env
, wp
);
1374 /* Add a breakpoint. */
1375 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1376 CPUBreakpoint
**breakpoint
)
1378 #if defined(TARGET_HAS_ICE)
1381 bp
= qemu_malloc(sizeof(*bp
));
1388 /* keep all GDB-injected breakpoints in front */
1390 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1392 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1394 breakpoint_invalidate(env
, pc
);
1404 /* Remove a specific breakpoint. */
1405 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1407 #if defined(TARGET_HAS_ICE)
1410 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1411 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1412 cpu_breakpoint_remove_by_ref(env
, bp
);
1422 /* Remove a specific breakpoint by reference. */
1423 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1425 #if defined(TARGET_HAS_ICE)
1426 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1428 breakpoint_invalidate(env
, breakpoint
->pc
);
1430 qemu_free(breakpoint
);
1434 /* Remove all matching breakpoints. */
1435 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1437 #if defined(TARGET_HAS_ICE)
1438 CPUBreakpoint
*bp
, *next
;
1440 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1441 if (bp
->flags
& mask
)
1442 cpu_breakpoint_remove_by_ref(env
, bp
);
1447 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1448 CPU loop after each instruction */
1449 void cpu_single_step(CPUState
*env
, int enabled
)
1451 #if defined(TARGET_HAS_ICE)
1452 if (env
->singlestep_enabled
!= enabled
) {
1453 env
->singlestep_enabled
= enabled
;
1454 /* must flush all the translated code to avoid inconsistancies */
1455 /* XXX: only flush what is necessary */
1461 /* enable or disable low levels log */
1462 void cpu_set_log(int log_flags
)
1464 loglevel
= log_flags
;
1465 if (loglevel
&& !logfile
) {
1466 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1468 perror(logfilename
);
1471 #if !defined(CONFIG_SOFTMMU)
1472 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1474 static char logfile_buf
[4096];
1475 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1478 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1482 if (!loglevel
&& logfile
) {
1488 void cpu_set_log_filename(const char *filename
)
1490 logfilename
= strdup(filename
);
1495 cpu_set_log(loglevel
);
1498 /* mask must never be zero, except for A20 change call */
1499 void cpu_interrupt(CPUState
*env
, int mask
)
1501 #if !defined(USE_NPTL)
1502 TranslationBlock
*tb
;
1503 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1507 old_mask
= env
->interrupt_request
;
1508 /* FIXME: This is probably not threadsafe. A different thread could
1509 be in the middle of a read-modify-write operation. */
1510 env
->interrupt_request
|= mask
;
1511 #if defined(USE_NPTL)
1512 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1513 problem and hope the cpu will stop of its own accord. For userspace
1514 emulation this often isn't actually as bad as it sounds. Often
1515 signals are used primarily to interrupt blocking syscalls. */
1518 env
->icount_decr
.u16
.high
= 0xffff;
1519 #ifndef CONFIG_USER_ONLY
1520 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1521 an async event happened and we need to process it. */
1523 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1524 cpu_abort(env
, "Raised interrupt while not in I/O function");
1528 tb
= env
->current_tb
;
1529 /* if the cpu is currently executing code, we must unlink it and
1530 all the potentially executing TB */
1531 if (tb
&& !testandset(&interrupt_lock
)) {
1532 env
->current_tb
= NULL
;
1533 tb_reset_jump_recursive(tb
);
1534 resetlock(&interrupt_lock
);
1540 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1542 env
->interrupt_request
&= ~mask
;
1545 const CPULogItem cpu_log_items
[] = {
1546 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1547 "show generated host assembly code for each compiled TB" },
1548 { CPU_LOG_TB_IN_ASM
, "in_asm",
1549 "show target assembly code for each compiled TB" },
1550 { CPU_LOG_TB_OP
, "op",
1551 "show micro ops for each compiled TB" },
1552 { CPU_LOG_TB_OP_OPT
, "op_opt",
1555 "before eflags optimization and "
1557 "after liveness analysis" },
1558 { CPU_LOG_INT
, "int",
1559 "show interrupts/exceptions in short format" },
1560 { CPU_LOG_EXEC
, "exec",
1561 "show trace before each executed TB (lots of logs)" },
1562 { CPU_LOG_TB_CPU
, "cpu",
1563 "show CPU state before block translation" },
1565 { CPU_LOG_PCALL
, "pcall",
1566 "show protected mode far calls/returns/exceptions" },
1569 { CPU_LOG_IOPORT
, "ioport",
1570 "show all i/o ports accesses" },
1575 static int cmp1(const char *s1
, int n
, const char *s2
)
1577 if (strlen(s2
) != n
)
1579 return memcmp(s1
, s2
, n
) == 0;
1582 /* takes a comma separated list of log masks. Return 0 if error. */
1583 int cpu_str_to_log_mask(const char *str
)
1585 const CPULogItem
*item
;
1592 p1
= strchr(p
, ',');
1595 if(cmp1(p
,p1
-p
,"all")) {
1596 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1600 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1601 if (cmp1(p
, p1
- p
, item
->name
))
1615 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1622 fprintf(stderr
, "qemu: fatal: ");
1623 vfprintf(stderr
, fmt
, ap
);
1624 fprintf(stderr
, "\n");
1626 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1628 cpu_dump_state(env
, stderr
, fprintf
, 0);
1631 fprintf(logfile
, "qemu: fatal: ");
1632 vfprintf(logfile
, fmt
, ap2
);
1633 fprintf(logfile
, "\n");
1635 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1637 cpu_dump_state(env
, logfile
, fprintf
, 0);
1647 CPUState
*cpu_copy(CPUState
*env
)
1649 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1650 /* preserve chaining and index */
1651 CPUState
*next_cpu
= new_env
->next_cpu
;
1652 int cpu_index
= new_env
->cpu_index
;
1653 memcpy(new_env
, env
, sizeof(CPUState
));
1654 new_env
->next_cpu
= next_cpu
;
1655 new_env
->cpu_index
= cpu_index
;
1659 #if !defined(CONFIG_USER_ONLY)
1661 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1665 /* Discard jump cache entries for any tb which might potentially
1666 overlap the flushed page. */
1667 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1668 memset (&env
->tb_jmp_cache
[i
], 0,
1669 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1671 i
= tb_jmp_cache_hash_page(addr
);
1672 memset (&env
->tb_jmp_cache
[i
], 0,
1673 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1676 /* NOTE: if flush_global is true, also flush global entries (not
1678 void tlb_flush(CPUState
*env
, int flush_global
)
1682 #if defined(DEBUG_TLB)
1683 printf("tlb_flush:\n");
1685 /* must reset current TB so that interrupts cannot modify the
1686 links while we are modifying them */
1687 env
->current_tb
= NULL
;
1689 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1690 env
->tlb_table
[0][i
].addr_read
= -1;
1691 env
->tlb_table
[0][i
].addr_write
= -1;
1692 env
->tlb_table
[0][i
].addr_code
= -1;
1693 env
->tlb_table
[1][i
].addr_read
= -1;
1694 env
->tlb_table
[1][i
].addr_write
= -1;
1695 env
->tlb_table
[1][i
].addr_code
= -1;
1696 #if (NB_MMU_MODES >= 3)
1697 env
->tlb_table
[2][i
].addr_read
= -1;
1698 env
->tlb_table
[2][i
].addr_write
= -1;
1699 env
->tlb_table
[2][i
].addr_code
= -1;
1700 #if (NB_MMU_MODES == 4)
1701 env
->tlb_table
[3][i
].addr_read
= -1;
1702 env
->tlb_table
[3][i
].addr_write
= -1;
1703 env
->tlb_table
[3][i
].addr_code
= -1;
1708 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1711 if (env
->kqemu_enabled
) {
1712 kqemu_flush(env
, flush_global
);
1718 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1720 if (addr
== (tlb_entry
->addr_read
&
1721 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1722 addr
== (tlb_entry
->addr_write
&
1723 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1724 addr
== (tlb_entry
->addr_code
&
1725 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1726 tlb_entry
->addr_read
= -1;
1727 tlb_entry
->addr_write
= -1;
1728 tlb_entry
->addr_code
= -1;
1732 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1736 #if defined(DEBUG_TLB)
1737 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1739 /* must reset current TB so that interrupts cannot modify the
1740 links while we are modifying them */
1741 env
->current_tb
= NULL
;
1743 addr
&= TARGET_PAGE_MASK
;
1744 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1745 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1746 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1747 #if (NB_MMU_MODES >= 3)
1748 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1749 #if (NB_MMU_MODES == 4)
1750 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1754 tlb_flush_jmp_cache(env
, addr
);
1757 if (env
->kqemu_enabled
) {
1758 kqemu_flush_page(env
, addr
);
1763 /* update the TLBs so that writes to code in the virtual page 'addr'
1765 static void tlb_protect_code(ram_addr_t ram_addr
)
1767 cpu_physical_memory_reset_dirty(ram_addr
,
1768 ram_addr
+ TARGET_PAGE_SIZE
,
1772 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1773 tested for self modifying code */
1774 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1777 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1780 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1781 unsigned long start
, unsigned long length
)
1784 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1785 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1786 if ((addr
- start
) < length
) {
1787 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1792 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1796 unsigned long length
, start1
;
1800 start
&= TARGET_PAGE_MASK
;
1801 end
= TARGET_PAGE_ALIGN(end
);
1803 length
= end
- start
;
1806 len
= length
>> TARGET_PAGE_BITS
;
1808 /* XXX: should not depend on cpu context */
1810 if (env
->kqemu_enabled
) {
1813 for(i
= 0; i
< len
; i
++) {
1814 kqemu_set_notdirty(env
, addr
);
1815 addr
+= TARGET_PAGE_SIZE
;
1819 mask
= ~dirty_flags
;
1820 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1821 for(i
= 0; i
< len
; i
++)
1824 /* we modify the TLB cache so that the dirty bit will be set again
1825 when accessing the range */
1826 start1
= start
+ (unsigned long)phys_ram_base
;
1827 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1828 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1829 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1830 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1831 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1832 #if (NB_MMU_MODES >= 3)
1833 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1834 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1835 #if (NB_MMU_MODES == 4)
1836 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1837 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1843 int cpu_physical_memory_set_dirty_tracking(int enable
)
1845 in_migration
= enable
;
1849 int cpu_physical_memory_get_dirty_tracking(void)
1851 return in_migration
;
1854 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1857 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1860 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1862 ram_addr_t ram_addr
;
1864 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1865 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1866 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1867 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1868 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1873 /* update the TLB according to the current state of the dirty bits */
1874 void cpu_tlb_update_dirty(CPUState
*env
)
1877 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1878 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1879 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1880 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1881 #if (NB_MMU_MODES >= 3)
1882 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1883 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1884 #if (NB_MMU_MODES == 4)
1885 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1886 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1891 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1893 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1894 tlb_entry
->addr_write
= vaddr
;
1897 /* update the TLB corresponding to virtual page vaddr
1898 so that it is no longer dirty */
1899 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1903 vaddr
&= TARGET_PAGE_MASK
;
1904 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1905 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1906 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1907 #if (NB_MMU_MODES >= 3)
1908 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1909 #if (NB_MMU_MODES == 4)
1910 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1915 /* add a new TLB entry. At most one entry for a given virtual address
1916 is permitted. Return 0 if OK or 2 if the page could not be mapped
1917 (can only happen in non SOFTMMU mode for I/O pages or pages
1918 conflicting with the host address space). */
1919 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1920 target_phys_addr_t paddr
, int prot
,
1921 int mmu_idx
, int is_softmmu
)
1926 target_ulong address
;
1927 target_ulong code_address
;
1928 target_phys_addr_t addend
;
1932 target_phys_addr_t iotlb
;
1934 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1936 pd
= IO_MEM_UNASSIGNED
;
1938 pd
= p
->phys_offset
;
1940 #if defined(DEBUG_TLB)
1941 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1942 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1947 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1948 /* IO memory case (romd handled later) */
1949 address
|= TLB_MMIO
;
1951 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1952 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1954 iotlb
= pd
& TARGET_PAGE_MASK
;
1955 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1956 iotlb
|= IO_MEM_NOTDIRTY
;
1958 iotlb
|= IO_MEM_ROM
;
1960 /* IO handlers are currently passed a phsical address.
1961 It would be nice to pass an offset from the base address
1962 of that region. This would avoid having to special case RAM,
1963 and avoid full address decoding in every device.
1964 We can't use the high bits of pd for this because
1965 IO_MEM_ROMD uses these as a ram address. */
1966 iotlb
= (pd
& ~TARGET_PAGE_MASK
) + paddr
;
1969 code_address
= address
;
1970 /* Make accesses to pages with watchpoints go via the
1971 watchpoint trap routines. */
1972 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1973 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
1974 iotlb
= io_mem_watch
+ paddr
;
1975 /* TODO: The memory case can be optimized by not trapping
1976 reads of pages with a write breakpoint. */
1977 address
|= TLB_MMIO
;
1981 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1982 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
1983 te
= &env
->tlb_table
[mmu_idx
][index
];
1984 te
->addend
= addend
- vaddr
;
1985 if (prot
& PAGE_READ
) {
1986 te
->addr_read
= address
;
1991 if (prot
& PAGE_EXEC
) {
1992 te
->addr_code
= code_address
;
1996 if (prot
& PAGE_WRITE
) {
1997 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1998 (pd
& IO_MEM_ROMD
)) {
1999 /* Write access calls the I/O callback. */
2000 te
->addr_write
= address
| TLB_MMIO
;
2001 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2002 !cpu_physical_memory_is_dirty(pd
)) {
2003 te
->addr_write
= address
| TLB_NOTDIRTY
;
2005 te
->addr_write
= address
;
2008 te
->addr_write
= -1;
2015 void tlb_flush(CPUState
*env
, int flush_global
)
2019 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2023 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2024 target_phys_addr_t paddr
, int prot
,
2025 int mmu_idx
, int is_softmmu
)
2030 /* dump memory mappings */
2031 void page_dump(FILE *f
)
2033 unsigned long start
, end
;
2034 int i
, j
, prot
, prot1
;
2037 fprintf(f
, "%-8s %-8s %-8s %s\n",
2038 "start", "end", "size", "prot");
2042 for(i
= 0; i
<= L1_SIZE
; i
++) {
2047 for(j
= 0;j
< L2_SIZE
; j
++) {
2052 if (prot1
!= prot
) {
2053 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2055 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2056 start
, end
, end
- start
,
2057 prot
& PAGE_READ
? 'r' : '-',
2058 prot
& PAGE_WRITE
? 'w' : '-',
2059 prot
& PAGE_EXEC
? 'x' : '-');
2073 int page_get_flags(target_ulong address
)
2077 p
= page_find(address
>> TARGET_PAGE_BITS
);
2083 /* modify the flags of a page and invalidate the code if
2084 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2085 depending on PAGE_WRITE */
2086 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2091 /* mmap_lock should already be held. */
2092 start
= start
& TARGET_PAGE_MASK
;
2093 end
= TARGET_PAGE_ALIGN(end
);
2094 if (flags
& PAGE_WRITE
)
2095 flags
|= PAGE_WRITE_ORG
;
2096 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2097 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2098 /* We may be called for host regions that are outside guest
2102 /* if the write protection is set, then we invalidate the code
2104 if (!(p
->flags
& PAGE_WRITE
) &&
2105 (flags
& PAGE_WRITE
) &&
2107 tb_invalidate_phys_page(addr
, 0, NULL
);
2113 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2119 if (start
+ len
< start
)
2120 /* we've wrapped around */
2123 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2124 start
= start
& TARGET_PAGE_MASK
;
2126 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2127 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2130 if( !(p
->flags
& PAGE_VALID
) )
2133 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2135 if (flags
& PAGE_WRITE
) {
2136 if (!(p
->flags
& PAGE_WRITE_ORG
))
2138 /* unprotect the page if it was put read-only because it
2139 contains translated code */
2140 if (!(p
->flags
& PAGE_WRITE
)) {
2141 if (!page_unprotect(addr
, 0, NULL
))
2150 /* called from signal handler: invalidate the code and unprotect the
2151 page. Return TRUE if the fault was succesfully handled. */
2152 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2154 unsigned int page_index
, prot
, pindex
;
2156 target_ulong host_start
, host_end
, addr
;
2158 /* Technically this isn't safe inside a signal handler. However we
2159 know this only ever happens in a synchronous SEGV handler, so in
2160 practice it seems to be ok. */
2163 host_start
= address
& qemu_host_page_mask
;
2164 page_index
= host_start
>> TARGET_PAGE_BITS
;
2165 p1
= page_find(page_index
);
2170 host_end
= host_start
+ qemu_host_page_size
;
2173 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2177 /* if the page was really writable, then we change its
2178 protection back to writable */
2179 if (prot
& PAGE_WRITE_ORG
) {
2180 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2181 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2182 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2183 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2184 p1
[pindex
].flags
|= PAGE_WRITE
;
2185 /* and since the content will be modified, we must invalidate
2186 the corresponding translated code. */
2187 tb_invalidate_phys_page(address
, pc
, puc
);
2188 #ifdef DEBUG_TB_CHECK
2189 tb_invalidate_check(address
);
2199 static inline void tlb_set_dirty(CPUState
*env
,
2200 unsigned long addr
, target_ulong vaddr
)
2203 #endif /* defined(CONFIG_USER_ONLY) */
2205 #if !defined(CONFIG_USER_ONLY)
2206 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2208 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2209 ram_addr_t orig_memory
);
2210 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2213 if (addr > start_addr) \
2216 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2217 if (start_addr2 > 0) \
2221 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2222 end_addr2 = TARGET_PAGE_SIZE - 1; \
2224 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2225 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2230 /* register physical memory. 'size' must be a multiple of the target
2231 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2233 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2235 ram_addr_t phys_offset
)
2237 target_phys_addr_t addr
, end_addr
;
2240 ram_addr_t orig_size
= size
;
2244 /* XXX: should not depend on cpu context */
2246 if (env
->kqemu_enabled
) {
2247 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2251 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2253 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2254 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2255 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2256 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2257 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2258 ram_addr_t orig_memory
= p
->phys_offset
;
2259 target_phys_addr_t start_addr2
, end_addr2
;
2260 int need_subpage
= 0;
2262 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2264 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2265 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2266 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2267 &p
->phys_offset
, orig_memory
);
2269 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2272 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2274 p
->phys_offset
= phys_offset
;
2275 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2276 (phys_offset
& IO_MEM_ROMD
))
2277 phys_offset
+= TARGET_PAGE_SIZE
;
2280 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2281 p
->phys_offset
= phys_offset
;
2282 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2283 (phys_offset
& IO_MEM_ROMD
))
2284 phys_offset
+= TARGET_PAGE_SIZE
;
2286 target_phys_addr_t start_addr2
, end_addr2
;
2287 int need_subpage
= 0;
2289 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2290 end_addr2
, need_subpage
);
2292 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2293 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2294 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2295 subpage_register(subpage
, start_addr2
, end_addr2
,
2302 /* since each CPU stores ram addresses in its TLB cache, we must
2303 reset the modified entries */
2305 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2310 /* XXX: temporary until new memory mapping API */
2311 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2315 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2317 return IO_MEM_UNASSIGNED
;
2318 return p
->phys_offset
;
2321 /* XXX: better than nothing */
2322 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2325 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2326 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2327 (uint64_t)size
, (uint64_t)phys_ram_size
);
2330 addr
= phys_ram_alloc_offset
;
2331 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2335 void qemu_ram_free(ram_addr_t addr
)
2339 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2341 #ifdef DEBUG_UNASSIGNED
2342 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2344 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2345 do_unassigned_access(addr
, 0, 0, 0, 1);
2350 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2352 #ifdef DEBUG_UNASSIGNED
2353 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2355 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2356 do_unassigned_access(addr
, 0, 0, 0, 2);
2361 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2363 #ifdef DEBUG_UNASSIGNED
2364 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2366 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2367 do_unassigned_access(addr
, 0, 0, 0, 4);
2372 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2374 #ifdef DEBUG_UNASSIGNED
2375 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2377 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2378 do_unassigned_access(addr
, 1, 0, 0, 1);
2382 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2384 #ifdef DEBUG_UNASSIGNED
2385 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2387 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2388 do_unassigned_access(addr
, 1, 0, 0, 2);
2392 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2394 #ifdef DEBUG_UNASSIGNED
2395 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2397 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2398 do_unassigned_access(addr
, 1, 0, 0, 4);
2402 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2403 unassigned_mem_readb
,
2404 unassigned_mem_readw
,
2405 unassigned_mem_readl
,
2408 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2409 unassigned_mem_writeb
,
2410 unassigned_mem_writew
,
2411 unassigned_mem_writel
,
2414 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2418 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2419 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2420 #if !defined(CONFIG_USER_ONLY)
2421 tb_invalidate_phys_page_fast(ram_addr
, 1);
2422 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2425 stb_p(phys_ram_base
+ ram_addr
, val
);
2427 if (cpu_single_env
->kqemu_enabled
&&
2428 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2429 kqemu_modify_page(cpu_single_env
, ram_addr
);
2431 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2432 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2433 /* we remove the notdirty callback only if the code has been
2435 if (dirty_flags
== 0xff)
2436 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2439 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2443 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2444 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2445 #if !defined(CONFIG_USER_ONLY)
2446 tb_invalidate_phys_page_fast(ram_addr
, 2);
2447 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2450 stw_p(phys_ram_base
+ ram_addr
, val
);
2452 if (cpu_single_env
->kqemu_enabled
&&
2453 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2454 kqemu_modify_page(cpu_single_env
, ram_addr
);
2456 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2457 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2458 /* we remove the notdirty callback only if the code has been
2460 if (dirty_flags
== 0xff)
2461 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2464 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2468 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2469 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2470 #if !defined(CONFIG_USER_ONLY)
2471 tb_invalidate_phys_page_fast(ram_addr
, 4);
2472 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2475 stl_p(phys_ram_base
+ ram_addr
, val
);
2477 if (cpu_single_env
->kqemu_enabled
&&
2478 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2479 kqemu_modify_page(cpu_single_env
, ram_addr
);
2481 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2482 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2483 /* we remove the notdirty callback only if the code has been
2485 if (dirty_flags
== 0xff)
2486 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2489 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2490 NULL
, /* never used */
2491 NULL
, /* never used */
2492 NULL
, /* never used */
2495 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2496 notdirty_mem_writeb
,
2497 notdirty_mem_writew
,
2498 notdirty_mem_writel
,
2501 /* Generate a debug exception if a watchpoint has been hit. */
2502 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2504 CPUState
*env
= cpu_single_env
;
2505 target_ulong pc
, cs_base
;
2506 TranslationBlock
*tb
;
2511 if (env
->watchpoint_hit
) {
2512 /* We re-entered the check after replacing the TB. Now raise
2513 * the debug interrupt so that is will trigger after the
2514 * current instruction. */
2515 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2518 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2519 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2520 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2521 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2522 wp
->flags
|= BP_WATCHPOINT_HIT
;
2523 if (!env
->watchpoint_hit
) {
2524 env
->watchpoint_hit
= wp
;
2525 tb
= tb_find_pc(env
->mem_io_pc
);
2527 cpu_abort(env
, "check_watchpoint: could not find TB for "
2528 "pc=%p", (void *)env
->mem_io_pc
);
2530 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2531 tb_phys_invalidate(tb
, -1);
2532 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2533 env
->exception_index
= EXCP_DEBUG
;
2535 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2536 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2538 cpu_resume_from_signal(env
, NULL
);
2541 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2546 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2547 so these check for a hit then pass through to the normal out-of-line
2549 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2551 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2552 return ldub_phys(addr
);
2555 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2557 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2558 return lduw_phys(addr
);
2561 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2563 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2564 return ldl_phys(addr
);
2567 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2570 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2571 stb_phys(addr
, val
);
2574 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2577 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2578 stw_phys(addr
, val
);
2581 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2584 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2585 stl_phys(addr
, val
);
2588 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2594 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2600 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2606 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2607 #if defined(DEBUG_SUBPAGE)
2608 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2609 mmio
, len
, addr
, idx
);
2611 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2616 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2617 uint32_t value
, unsigned int len
)
2621 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2622 #if defined(DEBUG_SUBPAGE)
2623 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2624 mmio
, len
, addr
, idx
, value
);
2626 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2629 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2631 #if defined(DEBUG_SUBPAGE)
2632 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2635 return subpage_readlen(opaque
, addr
, 0);
2638 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2641 #if defined(DEBUG_SUBPAGE)
2642 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2644 subpage_writelen(opaque
, addr
, value
, 0);
2647 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2649 #if defined(DEBUG_SUBPAGE)
2650 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2653 return subpage_readlen(opaque
, addr
, 1);
2656 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2659 #if defined(DEBUG_SUBPAGE)
2660 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2662 subpage_writelen(opaque
, addr
, value
, 1);
2665 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2667 #if defined(DEBUG_SUBPAGE)
2668 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2671 return subpage_readlen(opaque
, addr
, 2);
2674 static void subpage_writel (void *opaque
,
2675 target_phys_addr_t addr
, uint32_t value
)
2677 #if defined(DEBUG_SUBPAGE)
2678 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2680 subpage_writelen(opaque
, addr
, value
, 2);
2683 static CPUReadMemoryFunc
*subpage_read
[] = {
2689 static CPUWriteMemoryFunc
*subpage_write
[] = {
2695 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2701 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2703 idx
= SUBPAGE_IDX(start
);
2704 eidx
= SUBPAGE_IDX(end
);
2705 #if defined(DEBUG_SUBPAGE)
2706 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2707 mmio
, start
, end
, idx
, eidx
, memory
);
2709 memory
>>= IO_MEM_SHIFT
;
2710 for (; idx
<= eidx
; idx
++) {
2711 for (i
= 0; i
< 4; i
++) {
2712 if (io_mem_read
[memory
][i
]) {
2713 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2714 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2716 if (io_mem_write
[memory
][i
]) {
2717 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2718 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2726 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2727 ram_addr_t orig_memory
)
2732 mmio
= qemu_mallocz(sizeof(subpage_t
));
2735 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2736 #if defined(DEBUG_SUBPAGE)
2737 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2738 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2740 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2741 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2747 static void io_mem_init(void)
2749 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2750 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2751 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2754 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2755 watch_mem_write
, NULL
);
2756 /* alloc dirty bits array */
2757 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2758 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2761 /* mem_read and mem_write are arrays of functions containing the
2762 function to access byte (index 0), word (index 1) and dword (index
2763 2). Functions can be omitted with a NULL function pointer. The
2764 registered functions may be modified dynamically later.
2765 If io_index is non zero, the corresponding io zone is
2766 modified. If it is zero, a new io zone is allocated. The return
2767 value can be used with cpu_register_physical_memory(). (-1) is
2768 returned if error. */
2769 int cpu_register_io_memory(int io_index
,
2770 CPUReadMemoryFunc
**mem_read
,
2771 CPUWriteMemoryFunc
**mem_write
,
2774 int i
, subwidth
= 0;
2776 if (io_index
<= 0) {
2777 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2779 io_index
= io_mem_nb
++;
2781 if (io_index
>= IO_MEM_NB_ENTRIES
)
2785 for(i
= 0;i
< 3; i
++) {
2786 if (!mem_read
[i
] || !mem_write
[i
])
2787 subwidth
= IO_MEM_SUBWIDTH
;
2788 io_mem_read
[io_index
][i
] = mem_read
[i
];
2789 io_mem_write
[io_index
][i
] = mem_write
[i
];
2791 io_mem_opaque
[io_index
] = opaque
;
2792 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2795 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2797 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2800 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2802 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2805 #endif /* !defined(CONFIG_USER_ONLY) */
2807 /* physical memory access (slow version, mainly for debug) */
2808 #if defined(CONFIG_USER_ONLY)
2809 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2810 int len
, int is_write
)
2817 page
= addr
& TARGET_PAGE_MASK
;
2818 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2821 flags
= page_get_flags(page
);
2822 if (!(flags
& PAGE_VALID
))
2825 if (!(flags
& PAGE_WRITE
))
2827 /* XXX: this code should not depend on lock_user */
2828 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2829 /* FIXME - should this return an error rather than just fail? */
2832 unlock_user(p
, addr
, l
);
2834 if (!(flags
& PAGE_READ
))
2836 /* XXX: this code should not depend on lock_user */
2837 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2838 /* FIXME - should this return an error rather than just fail? */
2841 unlock_user(p
, addr
, 0);
2850 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2851 int len
, int is_write
)
2856 target_phys_addr_t page
;
2861 page
= addr
& TARGET_PAGE_MASK
;
2862 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2865 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2867 pd
= IO_MEM_UNASSIGNED
;
2869 pd
= p
->phys_offset
;
2873 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2874 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2875 /* XXX: could force cpu_single_env to NULL to avoid
2877 if (l
>= 4 && ((addr
& 3) == 0)) {
2878 /* 32 bit write access */
2880 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2882 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2883 /* 16 bit write access */
2885 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2888 /* 8 bit write access */
2890 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2894 unsigned long addr1
;
2895 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2897 ptr
= phys_ram_base
+ addr1
;
2898 memcpy(ptr
, buf
, l
);
2899 if (!cpu_physical_memory_is_dirty(addr1
)) {
2900 /* invalidate code */
2901 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2903 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2904 (0xff & ~CODE_DIRTY_FLAG
);
2908 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2909 !(pd
& IO_MEM_ROMD
)) {
2911 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2912 if (l
>= 4 && ((addr
& 3) == 0)) {
2913 /* 32 bit read access */
2914 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2917 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2918 /* 16 bit read access */
2919 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2923 /* 8 bit read access */
2924 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2930 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2931 (addr
& ~TARGET_PAGE_MASK
);
2932 memcpy(buf
, ptr
, l
);
2941 /* used for ROM loading : can write in RAM and ROM */
2942 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2943 const uint8_t *buf
, int len
)
2947 target_phys_addr_t page
;
2952 page
= addr
& TARGET_PAGE_MASK
;
2953 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2956 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2958 pd
= IO_MEM_UNASSIGNED
;
2960 pd
= p
->phys_offset
;
2963 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2964 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2965 !(pd
& IO_MEM_ROMD
)) {
2968 unsigned long addr1
;
2969 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2971 ptr
= phys_ram_base
+ addr1
;
2972 memcpy(ptr
, buf
, l
);
2981 /* warning: addr must be aligned */
2982 uint32_t ldl_phys(target_phys_addr_t addr
)
2990 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2992 pd
= IO_MEM_UNASSIGNED
;
2994 pd
= p
->phys_offset
;
2997 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2998 !(pd
& IO_MEM_ROMD
)) {
3000 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3001 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3004 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3005 (addr
& ~TARGET_PAGE_MASK
);
3011 /* warning: addr must be aligned */
3012 uint64_t ldq_phys(target_phys_addr_t addr
)
3020 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3022 pd
= IO_MEM_UNASSIGNED
;
3024 pd
= p
->phys_offset
;
3027 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3028 !(pd
& IO_MEM_ROMD
)) {
3030 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3031 #ifdef TARGET_WORDS_BIGENDIAN
3032 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3033 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3035 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3036 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3040 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3041 (addr
& ~TARGET_PAGE_MASK
);
3048 uint32_t ldub_phys(target_phys_addr_t addr
)
3051 cpu_physical_memory_read(addr
, &val
, 1);
3056 uint32_t lduw_phys(target_phys_addr_t addr
)
3059 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3060 return tswap16(val
);
3063 /* warning: addr must be aligned. The ram page is not masked as dirty
3064 and the code inside is not invalidated. It is useful if the dirty
3065 bits are used to track modified PTEs */
3066 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3073 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3075 pd
= IO_MEM_UNASSIGNED
;
3077 pd
= p
->phys_offset
;
3080 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3081 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3082 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3084 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3085 ptr
= phys_ram_base
+ addr1
;
3088 if (unlikely(in_migration
)) {
3089 if (!cpu_physical_memory_is_dirty(addr1
)) {
3090 /* invalidate code */
3091 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3093 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3094 (0xff & ~CODE_DIRTY_FLAG
);
3100 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3107 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3109 pd
= IO_MEM_UNASSIGNED
;
3111 pd
= p
->phys_offset
;
3114 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3115 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3116 #ifdef TARGET_WORDS_BIGENDIAN
3117 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3118 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3120 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3121 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3124 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3125 (addr
& ~TARGET_PAGE_MASK
);
3130 /* warning: addr must be aligned */
3131 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3138 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3140 pd
= IO_MEM_UNASSIGNED
;
3142 pd
= p
->phys_offset
;
3145 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3146 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3147 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3149 unsigned long addr1
;
3150 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3152 ptr
= phys_ram_base
+ addr1
;
3154 if (!cpu_physical_memory_is_dirty(addr1
)) {
3155 /* invalidate code */
3156 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3158 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3159 (0xff & ~CODE_DIRTY_FLAG
);
3165 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3168 cpu_physical_memory_write(addr
, &v
, 1);
3172 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3174 uint16_t v
= tswap16(val
);
3175 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3179 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3182 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3187 /* virtual memory access for debug */
3188 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3189 uint8_t *buf
, int len
, int is_write
)
3192 target_phys_addr_t phys_addr
;
3196 page
= addr
& TARGET_PAGE_MASK
;
3197 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3198 /* if no physical page mapped, return an error */
3199 if (phys_addr
== -1)
3201 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3204 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3213 /* in deterministic execution mode, instructions doing device I/Os
3214 must be at the end of the TB */
3215 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3217 TranslationBlock
*tb
;
3219 target_ulong pc
, cs_base
;
3222 tb
= tb_find_pc((unsigned long)retaddr
);
3224 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3227 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3228 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3229 /* Calculate how many instructions had been executed before the fault
3231 n
= n
- env
->icount_decr
.u16
.low
;
3232 /* Generate a new TB ending on the I/O insn. */
3234 /* On MIPS and SH, delay slot instructions can only be restarted if
3235 they were already the first instruction in the TB. If this is not
3236 the first instruction in a TB then re-execute the preceding
3238 #if defined(TARGET_MIPS)
3239 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3240 env
->active_tc
.PC
-= 4;
3241 env
->icount_decr
.u16
.low
++;
3242 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3244 #elif defined(TARGET_SH4)
3245 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3248 env
->icount_decr
.u16
.low
++;
3249 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3252 /* This should never happen. */
3253 if (n
> CF_COUNT_MASK
)
3254 cpu_abort(env
, "TB too big during recompile");
3256 cflags
= n
| CF_LAST_IO
;
3258 cs_base
= tb
->cs_base
;
3260 tb_phys_invalidate(tb
, -1);
3261 /* FIXME: In theory this could raise an exception. In practice
3262 we have already translated the block once so it's probably ok. */
3263 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3264 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3265 the first in the TB) then we end up generating a whole new TB and
3266 repeating the fault, which is horribly inefficient.
3267 Better would be to execute just this insn uncached, or generate a
3269 cpu_resume_from_signal(env
, NULL
);
3272 void dump_exec_info(FILE *f
,
3273 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3275 int i
, target_code_size
, max_target_code_size
;
3276 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3277 TranslationBlock
*tb
;
3279 target_code_size
= 0;
3280 max_target_code_size
= 0;
3282 direct_jmp_count
= 0;
3283 direct_jmp2_count
= 0;
3284 for(i
= 0; i
< nb_tbs
; i
++) {
3286 target_code_size
+= tb
->size
;
3287 if (tb
->size
> max_target_code_size
)
3288 max_target_code_size
= tb
->size
;
3289 if (tb
->page_addr
[1] != -1)
3291 if (tb
->tb_next_offset
[0] != 0xffff) {
3293 if (tb
->tb_next_offset
[1] != 0xffff) {
3294 direct_jmp2_count
++;
3298 /* XXX: avoid using doubles ? */
3299 cpu_fprintf(f
, "Translation buffer state:\n");
3300 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3301 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3302 cpu_fprintf(f
, "TB count %d/%d\n",
3303 nb_tbs
, code_gen_max_blocks
);
3304 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3305 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3306 max_target_code_size
);
3307 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3308 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3309 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3310 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3312 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3313 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3315 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3317 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3318 cpu_fprintf(f
, "\nStatistics:\n");
3319 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3320 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3321 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3322 tcg_dump_info(f
, cpu_fprintf
);
3325 #if !defined(CONFIG_USER_ONLY)
3327 #define MMUSUFFIX _cmmu
3328 #define GETPC() NULL
3329 #define env cpu_single_env
3330 #define SOFTMMU_CODE_ACCESS
3333 #include "softmmu_template.h"
3336 #include "softmmu_template.h"
3339 #include "softmmu_template.h"
3342 #include "softmmu_template.h"