2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
43 #if defined(CONFIG_USER_ONLY)
47 //#define DEBUG_TB_INVALIDATE
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 static TranslationBlock
*tbs
;
88 int code_gen_max_blocks
;
89 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
102 #define code_gen_section \
103 __attribute__((aligned (32)))
106 uint8_t code_gen_prologue
[1024] code_gen_section
;
107 static uint8_t *code_gen_buffer
;
108 static unsigned long code_gen_buffer_size
;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size
;
111 uint8_t *code_gen_ptr
;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size
;
116 uint8_t *phys_ram_base
;
117 uint8_t *phys_ram_dirty
;
118 static int in_migration
;
119 static ram_addr_t phys_ram_alloc_offset
= 0;
123 /* current CPU in the current thread. It is only valid inside
125 CPUState
*cpu_single_env
;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 typedef struct PhysPageDesc
{
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset
;
152 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153 /* XXX: this is a temporary hack for alpha target.
154 * In the future, this is to be replaced by a multi-level table
155 * to actually be able to handle the complete 64 bits address space.
157 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
162 #define L1_SIZE (1 << L1_BITS)
163 #define L2_SIZE (1 << L2_BITS)
165 unsigned long qemu_real_host_page_size
;
166 unsigned long qemu_host_page_bits
;
167 unsigned long qemu_host_page_size
;
168 unsigned long qemu_host_page_mask
;
170 /* XXX: for system emulation, it could just be an array */
171 static PageDesc
*l1_map
[L1_SIZE
];
172 static PhysPageDesc
**l1_phys_map
;
174 #if !defined(CONFIG_USER_ONLY)
175 static void io_mem_init(void);
177 /* io memory support */
178 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
179 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
180 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
181 static int io_mem_nb
;
182 static int io_mem_watch
;
186 static const char *logfilename
= "/tmp/qemu.log";
189 static int log_append
= 0;
192 static int tlb_flush_count
;
193 static int tb_flush_count
;
194 static int tb_phys_invalidate_count
;
196 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197 typedef struct subpage_t
{
198 target_phys_addr_t base
;
199 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
200 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
201 void *opaque
[TARGET_PAGE_SIZE
][2][4];
205 static void map_exec(void *addr
, long size
)
208 VirtualProtect(addr
, size
,
209 PAGE_EXECUTE_READWRITE
, &old_protect
);
213 static void map_exec(void *addr
, long size
)
215 unsigned long start
, end
, page_size
;
217 page_size
= getpagesize();
218 start
= (unsigned long)addr
;
219 start
&= ~(page_size
- 1);
221 end
= (unsigned long)addr
+ size
;
222 end
+= page_size
- 1;
223 end
&= ~(page_size
- 1);
225 mprotect((void *)start
, end
- start
,
226 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
230 static void page_init(void)
232 /* NOTE: we can always suppose that qemu_host_page_size >=
236 SYSTEM_INFO system_info
;
238 GetSystemInfo(&system_info
);
239 qemu_real_host_page_size
= system_info
.dwPageSize
;
242 qemu_real_host_page_size
= getpagesize();
244 if (qemu_host_page_size
== 0)
245 qemu_host_page_size
= qemu_real_host_page_size
;
246 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
247 qemu_host_page_size
= TARGET_PAGE_SIZE
;
248 qemu_host_page_bits
= 0;
249 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
250 qemu_host_page_bits
++;
251 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
252 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
253 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
255 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
257 long long startaddr
, endaddr
;
262 last_brk
= (unsigned long)sbrk(0);
263 f
= fopen("/proc/self/maps", "r");
266 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
268 startaddr
= MIN(startaddr
,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
270 endaddr
= MIN(endaddr
,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
272 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
273 TARGET_PAGE_ALIGN(endaddr
),
284 static inline PageDesc
**page_l1_map(target_ulong index
)
286 #if TARGET_LONG_BITS > 32
287 /* Host memory outside guest VM. For 32-bit targets we have already
288 excluded high addresses. */
289 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
292 return &l1_map
[index
>> L2_BITS
];
295 static inline PageDesc
*page_find_alloc(target_ulong index
)
298 lp
= page_l1_map(index
);
304 /* allocate if not found */
305 #if defined(CONFIG_USER_ONLY)
307 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
308 /* Don't use qemu_malloc because it may recurse. */
309 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
310 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
313 if (addr
== (target_ulong
)addr
) {
314 page_set_flags(addr
& TARGET_PAGE_MASK
,
315 TARGET_PAGE_ALIGN(addr
+ len
),
319 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
323 return p
+ (index
& (L2_SIZE
- 1));
326 static inline PageDesc
*page_find(target_ulong index
)
329 lp
= page_l1_map(index
);
336 return p
+ (index
& (L2_SIZE
- 1));
339 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
344 p
= (void **)l1_phys_map
;
345 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
347 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
348 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
353 /* allocate if not found */
356 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
357 memset(p
, 0, sizeof(void *) * L1_SIZE
);
361 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
365 /* allocate if not found */
368 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
370 for (i
= 0; i
< L2_SIZE
; i
++)
371 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
373 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
376 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
378 return phys_page_find_alloc(index
, 0);
381 #if !defined(CONFIG_USER_ONLY)
382 static void tlb_protect_code(ram_addr_t ram_addr
);
383 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
385 #define mmap_lock() do { } while(0)
386 #define mmap_unlock() do { } while(0)
389 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
391 #if defined(CONFIG_USER_ONLY)
392 /* Currently it is not recommanded to allocate big chunks of data in
393 user mode. It will change when a dedicated libc will be used */
394 #define USE_STATIC_CODE_GEN_BUFFER
397 #ifdef USE_STATIC_CODE_GEN_BUFFER
398 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
401 static void code_gen_alloc(unsigned long tb_size
)
403 #ifdef USE_STATIC_CODE_GEN_BUFFER
404 code_gen_buffer
= static_code_gen_buffer
;
405 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
406 map_exec(code_gen_buffer
, code_gen_buffer_size
);
408 code_gen_buffer_size
= tb_size
;
409 if (code_gen_buffer_size
== 0) {
410 #if defined(CONFIG_USER_ONLY)
411 /* in user mode, phys_ram_size is not meaningful */
412 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
414 /* XXX: needs ajustments */
415 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
418 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
419 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
420 /* The code gen buffer location may have constraints depending on
421 the host cpu and OS */
422 #if defined(__linux__)
427 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
428 #if defined(__x86_64__)
430 /* Cannot map more than that */
431 if (code_gen_buffer_size
> (800 * 1024 * 1024))
432 code_gen_buffer_size
= (800 * 1024 * 1024);
433 #elif defined(__sparc_v9__)
434 // Map the buffer below 2G, so we can use direct calls and branches
436 start
= (void *) 0x60000000UL
;
437 if (code_gen_buffer_size
> (512 * 1024 * 1024))
438 code_gen_buffer_size
= (512 * 1024 * 1024);
440 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
441 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
443 if (code_gen_buffer
== MAP_FAILED
) {
444 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
448 #elif defined(__FreeBSD__)
452 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
453 #if defined(__x86_64__)
454 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
455 * 0x40000000 is free */
457 addr
= (void *)0x40000000;
458 /* Cannot map more than that */
459 if (code_gen_buffer_size
> (800 * 1024 * 1024))
460 code_gen_buffer_size
= (800 * 1024 * 1024);
462 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
463 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
465 if (code_gen_buffer
== MAP_FAILED
) {
466 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
471 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
472 if (!code_gen_buffer
) {
473 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
476 map_exec(code_gen_buffer
, code_gen_buffer_size
);
478 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
479 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
480 code_gen_buffer_max_size
= code_gen_buffer_size
-
481 code_gen_max_block_size();
482 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
483 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
486 /* Must be called before using the QEMU cpus. 'tb_size' is the size
487 (in bytes) allocated to the translation buffer. Zero means default
489 void cpu_exec_init_all(unsigned long tb_size
)
492 code_gen_alloc(tb_size
);
493 code_gen_ptr
= code_gen_buffer
;
495 #if !defined(CONFIG_USER_ONLY)
500 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502 #define CPU_COMMON_SAVE_VERSION 1
504 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
506 CPUState
*env
= opaque
;
508 qemu_put_be32s(f
, &env
->halted
);
509 qemu_put_be32s(f
, &env
->interrupt_request
);
512 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
514 CPUState
*env
= opaque
;
516 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
519 qemu_get_be32s(f
, &env
->halted
);
520 qemu_get_be32s(f
, &env
->interrupt_request
);
527 void cpu_exec_init(CPUState
*env
)
532 env
->next_cpu
= NULL
;
535 while (*penv
!= NULL
) {
536 penv
= (CPUState
**)&(*penv
)->next_cpu
;
539 env
->cpu_index
= cpu_index
;
541 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
542 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
543 cpu_common_save
, cpu_common_load
, env
);
544 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
545 cpu_save
, cpu_load
, env
);
549 static inline void invalidate_page_bitmap(PageDesc
*p
)
551 if (p
->code_bitmap
) {
552 qemu_free(p
->code_bitmap
);
553 p
->code_bitmap
= NULL
;
555 p
->code_write_count
= 0;
558 /* set to NULL all the 'first_tb' fields in all PageDescs */
559 static void page_flush_tb(void)
564 for(i
= 0; i
< L1_SIZE
; i
++) {
567 for(j
= 0; j
< L2_SIZE
; j
++) {
569 invalidate_page_bitmap(p
);
576 /* flush all the translation blocks */
577 /* XXX: tb_flush is currently not thread safe */
578 void tb_flush(CPUState
*env1
)
581 #if defined(DEBUG_FLUSH)
582 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
583 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
585 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
587 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
588 cpu_abort(env1
, "Internal error: code buffer overflow\n");
592 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
593 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
596 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
599 code_gen_ptr
= code_gen_buffer
;
600 /* XXX: flush processor icache at this point if cache flush is
605 #ifdef DEBUG_TB_CHECK
607 static void tb_invalidate_check(target_ulong address
)
609 TranslationBlock
*tb
;
611 address
&= TARGET_PAGE_MASK
;
612 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
613 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
614 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
615 address
>= tb
->pc
+ tb
->size
)) {
616 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
617 address
, (long)tb
->pc
, tb
->size
);
623 /* verify that all the pages have correct rights for code */
624 static void tb_page_check(void)
626 TranslationBlock
*tb
;
627 int i
, flags1
, flags2
;
629 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
630 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
631 flags1
= page_get_flags(tb
->pc
);
632 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
633 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
634 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
635 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
641 static void tb_jmp_check(TranslationBlock
*tb
)
643 TranslationBlock
*tb1
;
646 /* suppress any remaining jumps to this TB */
650 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
653 tb1
= tb1
->jmp_next
[n1
];
655 /* check end of list */
657 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
663 /* invalidate one TB */
664 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
667 TranslationBlock
*tb1
;
671 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
674 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
678 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
680 TranslationBlock
*tb1
;
686 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
688 *ptb
= tb1
->page_next
[n1
];
691 ptb
= &tb1
->page_next
[n1
];
695 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
697 TranslationBlock
*tb1
, **ptb
;
700 ptb
= &tb
->jmp_next
[n
];
703 /* find tb(n) in circular list */
707 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
708 if (n1
== n
&& tb1
== tb
)
711 ptb
= &tb1
->jmp_first
;
713 ptb
= &tb1
->jmp_next
[n1
];
716 /* now we can suppress tb(n) from the list */
717 *ptb
= tb
->jmp_next
[n
];
719 tb
->jmp_next
[n
] = NULL
;
723 /* reset the jump entry 'n' of a TB so that it is not chained to
725 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
727 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
730 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
735 target_phys_addr_t phys_pc
;
736 TranslationBlock
*tb1
, *tb2
;
738 /* remove the TB from the hash list */
739 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
740 h
= tb_phys_hash_func(phys_pc
);
741 tb_remove(&tb_phys_hash
[h
], tb
,
742 offsetof(TranslationBlock
, phys_hash_next
));
744 /* remove the TB from the page list */
745 if (tb
->page_addr
[0] != page_addr
) {
746 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
747 tb_page_remove(&p
->first_tb
, tb
);
748 invalidate_page_bitmap(p
);
750 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
751 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
752 tb_page_remove(&p
->first_tb
, tb
);
753 invalidate_page_bitmap(p
);
756 tb_invalidated_flag
= 1;
758 /* remove the TB from the hash list */
759 h
= tb_jmp_cache_hash_func(tb
->pc
);
760 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
761 if (env
->tb_jmp_cache
[h
] == tb
)
762 env
->tb_jmp_cache
[h
] = NULL
;
765 /* suppress this TB from the two jump lists */
766 tb_jmp_remove(tb
, 0);
767 tb_jmp_remove(tb
, 1);
769 /* suppress any remaining jumps to this TB */
775 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
776 tb2
= tb1
->jmp_next
[n1
];
777 tb_reset_jump(tb1
, n1
);
778 tb1
->jmp_next
[n1
] = NULL
;
781 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
783 tb_phys_invalidate_count
++;
786 static inline void set_bits(uint8_t *tab
, int start
, int len
)
792 mask
= 0xff << (start
& 7);
793 if ((start
& ~7) == (end
& ~7)) {
795 mask
&= ~(0xff << (end
& 7));
800 start
= (start
+ 8) & ~7;
802 while (start
< end1
) {
807 mask
= ~(0xff << (end
& 7));
813 static void build_page_bitmap(PageDesc
*p
)
815 int n
, tb_start
, tb_end
;
816 TranslationBlock
*tb
;
818 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
825 tb
= (TranslationBlock
*)((long)tb
& ~3);
826 /* NOTE: this is subtle as a TB may span two physical pages */
828 /* NOTE: tb_end may be after the end of the page, but
829 it is not a problem */
830 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
831 tb_end
= tb_start
+ tb
->size
;
832 if (tb_end
> TARGET_PAGE_SIZE
)
833 tb_end
= TARGET_PAGE_SIZE
;
836 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
838 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
839 tb
= tb
->page_next
[n
];
843 TranslationBlock
*tb_gen_code(CPUState
*env
,
844 target_ulong pc
, target_ulong cs_base
,
845 int flags
, int cflags
)
847 TranslationBlock
*tb
;
849 target_ulong phys_pc
, phys_page2
, virt_page2
;
852 phys_pc
= get_phys_addr_code(env
, pc
);
855 /* flush must be done */
857 /* cannot fail at this point */
859 /* Don't forget to invalidate previous TB info. */
860 tb_invalidated_flag
= 1;
862 tc_ptr
= code_gen_ptr
;
864 tb
->cs_base
= cs_base
;
867 cpu_gen_code(env
, tb
, &code_gen_size
);
868 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
870 /* check next page if needed */
871 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
873 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
874 phys_page2
= get_phys_addr_code(env
, virt_page2
);
876 tb_link_phys(tb
, phys_pc
, phys_page2
);
880 /* invalidate all TBs which intersect with the target physical page
881 starting in range [start;end[. NOTE: start and end must refer to
882 the same physical page. 'is_cpu_write_access' should be true if called
883 from a real cpu write access: the virtual CPU will exit the current
884 TB if code is modified inside this TB. */
885 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
886 int is_cpu_write_access
)
888 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
889 CPUState
*env
= cpu_single_env
;
890 target_ulong tb_start
, tb_end
;
893 #ifdef TARGET_HAS_PRECISE_SMC
894 int current_tb_not_found
= is_cpu_write_access
;
895 TranslationBlock
*current_tb
= NULL
;
896 int current_tb_modified
= 0;
897 target_ulong current_pc
= 0;
898 target_ulong current_cs_base
= 0;
899 int current_flags
= 0;
900 #endif /* TARGET_HAS_PRECISE_SMC */
902 p
= page_find(start
>> TARGET_PAGE_BITS
);
905 if (!p
->code_bitmap
&&
906 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
907 is_cpu_write_access
) {
908 /* build code bitmap */
909 build_page_bitmap(p
);
912 /* we remove all the TBs in the range [start, end[ */
913 /* XXX: see if in some cases it could be faster to invalidate all the code */
917 tb
= (TranslationBlock
*)((long)tb
& ~3);
918 tb_next
= tb
->page_next
[n
];
919 /* NOTE: this is subtle as a TB may span two physical pages */
921 /* NOTE: tb_end may be after the end of the page, but
922 it is not a problem */
923 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
924 tb_end
= tb_start
+ tb
->size
;
926 tb_start
= tb
->page_addr
[1];
927 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
929 if (!(tb_end
<= start
|| tb_start
>= end
)) {
930 #ifdef TARGET_HAS_PRECISE_SMC
931 if (current_tb_not_found
) {
932 current_tb_not_found
= 0;
934 if (env
->mem_io_pc
) {
935 /* now we have a real cpu fault */
936 current_tb
= tb_find_pc(env
->mem_io_pc
);
939 if (current_tb
== tb
&&
940 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
941 /* If we are modifying the current TB, we must stop
942 its execution. We could be more precise by checking
943 that the modification is after the current PC, but it
944 would require a specialized function to partially
945 restore the CPU state */
947 current_tb_modified
= 1;
948 cpu_restore_state(current_tb
, env
,
949 env
->mem_io_pc
, NULL
);
950 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
953 #endif /* TARGET_HAS_PRECISE_SMC */
954 /* we need to do that to handle the case where a signal
955 occurs while doing tb_phys_invalidate() */
958 saved_tb
= env
->current_tb
;
959 env
->current_tb
= NULL
;
961 tb_phys_invalidate(tb
, -1);
963 env
->current_tb
= saved_tb
;
964 if (env
->interrupt_request
&& env
->current_tb
)
965 cpu_interrupt(env
, env
->interrupt_request
);
970 #if !defined(CONFIG_USER_ONLY)
971 /* if no code remaining, no need to continue to use slow writes */
973 invalidate_page_bitmap(p
);
974 if (is_cpu_write_access
) {
975 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
979 #ifdef TARGET_HAS_PRECISE_SMC
980 if (current_tb_modified
) {
981 /* we generate a block containing just the instruction
982 modifying the memory. It will ensure that it cannot modify
984 env
->current_tb
= NULL
;
985 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
986 cpu_resume_from_signal(env
, NULL
);
991 /* len must be <= 8 and start must be a multiple of len */
992 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
999 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1000 cpu_single_env
->mem_io_vaddr
, len
,
1001 cpu_single_env
->eip
,
1002 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1006 p
= page_find(start
>> TARGET_PAGE_BITS
);
1009 if (p
->code_bitmap
) {
1010 offset
= start
& ~TARGET_PAGE_MASK
;
1011 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1012 if (b
& ((1 << len
) - 1))
1016 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1020 #if !defined(CONFIG_SOFTMMU)
1021 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1022 unsigned long pc
, void *puc
)
1024 TranslationBlock
*tb
;
1027 #ifdef TARGET_HAS_PRECISE_SMC
1028 TranslationBlock
*current_tb
= NULL
;
1029 CPUState
*env
= cpu_single_env
;
1030 int current_tb_modified
= 0;
1031 target_ulong current_pc
= 0;
1032 target_ulong current_cs_base
= 0;
1033 int current_flags
= 0;
1036 addr
&= TARGET_PAGE_MASK
;
1037 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1041 #ifdef TARGET_HAS_PRECISE_SMC
1042 if (tb
&& pc
!= 0) {
1043 current_tb
= tb_find_pc(pc
);
1046 while (tb
!= NULL
) {
1048 tb
= (TranslationBlock
*)((long)tb
& ~3);
1049 #ifdef TARGET_HAS_PRECISE_SMC
1050 if (current_tb
== tb
&&
1051 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1052 /* If we are modifying the current TB, we must stop
1053 its execution. We could be more precise by checking
1054 that the modification is after the current PC, but it
1055 would require a specialized function to partially
1056 restore the CPU state */
1058 current_tb_modified
= 1;
1059 cpu_restore_state(current_tb
, env
, pc
, puc
);
1060 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1063 #endif /* TARGET_HAS_PRECISE_SMC */
1064 tb_phys_invalidate(tb
, addr
);
1065 tb
= tb
->page_next
[n
];
1068 #ifdef TARGET_HAS_PRECISE_SMC
1069 if (current_tb_modified
) {
1070 /* we generate a block containing just the instruction
1071 modifying the memory. It will ensure that it cannot modify
1073 env
->current_tb
= NULL
;
1074 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1075 cpu_resume_from_signal(env
, puc
);
1081 /* add the tb in the target page and protect it if necessary */
1082 static inline void tb_alloc_page(TranslationBlock
*tb
,
1083 unsigned int n
, target_ulong page_addr
)
1086 TranslationBlock
*last_first_tb
;
1088 tb
->page_addr
[n
] = page_addr
;
1089 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1090 tb
->page_next
[n
] = p
->first_tb
;
1091 last_first_tb
= p
->first_tb
;
1092 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1093 invalidate_page_bitmap(p
);
1095 #if defined(TARGET_HAS_SMC) || 1
1097 #if defined(CONFIG_USER_ONLY)
1098 if (p
->flags
& PAGE_WRITE
) {
1103 /* force the host page as non writable (writes will have a
1104 page fault + mprotect overhead) */
1105 page_addr
&= qemu_host_page_mask
;
1107 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1108 addr
+= TARGET_PAGE_SIZE
) {
1110 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1114 p2
->flags
&= ~PAGE_WRITE
;
1115 page_get_flags(addr
);
1117 mprotect(g2h(page_addr
), qemu_host_page_size
,
1118 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1119 #ifdef DEBUG_TB_INVALIDATE
1120 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1125 /* if some code is already present, then the pages are already
1126 protected. So we handle the case where only the first TB is
1127 allocated in a physical page */
1128 if (!last_first_tb
) {
1129 tlb_protect_code(page_addr
);
1133 #endif /* TARGET_HAS_SMC */
1136 /* Allocate a new translation block. Flush the translation buffer if
1137 too many translation blocks or too much generated code. */
1138 TranslationBlock
*tb_alloc(target_ulong pc
)
1140 TranslationBlock
*tb
;
1142 if (nb_tbs
>= code_gen_max_blocks
||
1143 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1145 tb
= &tbs
[nb_tbs
++];
1151 void tb_free(TranslationBlock
*tb
)
1153 /* In practice this is mostly used for single use temporary TB
1154 Ignore the hard cases and just back up if this TB happens to
1155 be the last one generated. */
1156 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1157 code_gen_ptr
= tb
->tc_ptr
;
1162 /* add a new TB and link it to the physical page tables. phys_page2 is
1163 (-1) to indicate that only one page contains the TB. */
1164 void tb_link_phys(TranslationBlock
*tb
,
1165 target_ulong phys_pc
, target_ulong phys_page2
)
1168 TranslationBlock
**ptb
;
1170 /* Grab the mmap lock to stop another thread invalidating this TB
1171 before we are done. */
1173 /* add in the physical hash table */
1174 h
= tb_phys_hash_func(phys_pc
);
1175 ptb
= &tb_phys_hash
[h
];
1176 tb
->phys_hash_next
= *ptb
;
1179 /* add in the page list */
1180 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1181 if (phys_page2
!= -1)
1182 tb_alloc_page(tb
, 1, phys_page2
);
1184 tb
->page_addr
[1] = -1;
1186 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1187 tb
->jmp_next
[0] = NULL
;
1188 tb
->jmp_next
[1] = NULL
;
1190 /* init original jump addresses */
1191 if (tb
->tb_next_offset
[0] != 0xffff)
1192 tb_reset_jump(tb
, 0);
1193 if (tb
->tb_next_offset
[1] != 0xffff)
1194 tb_reset_jump(tb
, 1);
1196 #ifdef DEBUG_TB_CHECK
1202 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1203 tb[1].tc_ptr. Return NULL if not found */
1204 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1206 int m_min
, m_max
, m
;
1208 TranslationBlock
*tb
;
1212 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1213 tc_ptr
>= (unsigned long)code_gen_ptr
)
1215 /* binary search (cf Knuth) */
1218 while (m_min
<= m_max
) {
1219 m
= (m_min
+ m_max
) >> 1;
1221 v
= (unsigned long)tb
->tc_ptr
;
1224 else if (tc_ptr
< v
) {
1233 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1235 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1237 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1240 tb1
= tb
->jmp_next
[n
];
1242 /* find head of list */
1245 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1248 tb1
= tb1
->jmp_next
[n1
];
1250 /* we are now sure now that tb jumps to tb1 */
1253 /* remove tb from the jmp_first list */
1254 ptb
= &tb_next
->jmp_first
;
1258 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1259 if (n1
== n
&& tb1
== tb
)
1261 ptb
= &tb1
->jmp_next
[n1
];
1263 *ptb
= tb
->jmp_next
[n
];
1264 tb
->jmp_next
[n
] = NULL
;
1266 /* suppress the jump to next tb in generated code */
1267 tb_reset_jump(tb
, n
);
1269 /* suppress jumps in the tb on which we could have jumped */
1270 tb_reset_jump_recursive(tb_next
);
1274 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1276 tb_reset_jump_recursive2(tb
, 0);
1277 tb_reset_jump_recursive2(tb
, 1);
1280 #if defined(TARGET_HAS_ICE)
1281 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1283 target_phys_addr_t addr
;
1285 ram_addr_t ram_addr
;
1288 addr
= cpu_get_phys_page_debug(env
, pc
);
1289 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1291 pd
= IO_MEM_UNASSIGNED
;
1293 pd
= p
->phys_offset
;
1295 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1296 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1300 /* Add a watchpoint. */
1301 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1302 int flags
, CPUWatchpoint
**watchpoint
)
1304 target_ulong len_mask
= ~(len
- 1);
1305 CPUWatchpoint
*wp
, *prev_wp
;
1307 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1308 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1309 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1310 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1313 wp
= qemu_malloc(sizeof(*wp
));
1318 wp
->len_mask
= len_mask
;
1321 /* keep all GDB-injected watchpoints in front */
1322 if (!(flags
& BP_GDB
) && env
->watchpoints
) {
1323 prev_wp
= env
->watchpoints
;
1324 while (prev_wp
->next
!= NULL
&& (prev_wp
->next
->flags
& BP_GDB
))
1325 prev_wp
= prev_wp
->next
;
1330 /* Insert new watchpoint */
1332 wp
->next
= prev_wp
->next
;
1335 wp
->next
= env
->watchpoints
;
1336 env
->watchpoints
= wp
;
1339 wp
->next
->prev
= wp
;
1342 tlb_flush_page(env
, addr
);
1349 /* Remove a specific watchpoint. */
1350 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1353 target_ulong len_mask
= ~(len
- 1);
1356 for (wp
= env
->watchpoints
; wp
!= NULL
; wp
= wp
->next
) {
1357 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1358 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1359 cpu_watchpoint_remove_by_ref(env
, wp
);
1366 /* Remove a specific watchpoint by reference. */
1367 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1369 if (watchpoint
->next
)
1370 watchpoint
->next
->prev
= watchpoint
->prev
;
1371 if (watchpoint
->prev
)
1372 watchpoint
->prev
->next
= watchpoint
->next
;
1374 env
->watchpoints
= watchpoint
->next
;
1376 tlb_flush_page(env
, watchpoint
->vaddr
);
1378 qemu_free(watchpoint
);
1381 /* Remove all matching watchpoints. */
1382 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1386 for (wp
= env
->watchpoints
; wp
!= NULL
; wp
= wp
->next
)
1387 if (wp
->flags
& mask
)
1388 cpu_watchpoint_remove_by_ref(env
, wp
);
1391 /* Add a breakpoint. */
1392 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1393 CPUBreakpoint
**breakpoint
)
1395 #if defined(TARGET_HAS_ICE)
1396 CPUBreakpoint
*bp
, *prev_bp
;
1398 bp
= qemu_malloc(sizeof(*bp
));
1405 /* keep all GDB-injected breakpoints in front */
1406 if (!(flags
& BP_GDB
) && env
->breakpoints
) {
1407 prev_bp
= env
->breakpoints
;
1408 while (prev_bp
->next
!= NULL
&& (prev_bp
->next
->flags
& BP_GDB
))
1409 prev_bp
= prev_bp
->next
;
1414 /* Insert new breakpoint */
1416 bp
->next
= prev_bp
->next
;
1419 bp
->next
= env
->breakpoints
;
1420 env
->breakpoints
= bp
;
1423 bp
->next
->prev
= bp
;
1426 breakpoint_invalidate(env
, pc
);
1436 /* Remove a specific breakpoint. */
1437 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1439 #if defined(TARGET_HAS_ICE)
1442 for (bp
= env
->breakpoints
; bp
!= NULL
; bp
= bp
->next
) {
1443 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1444 cpu_breakpoint_remove_by_ref(env
, bp
);
1454 /* Remove a specific breakpoint by reference. */
1455 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1457 #if defined(TARGET_HAS_ICE)
1458 if (breakpoint
->next
)
1459 breakpoint
->next
->prev
= breakpoint
->prev
;
1460 if (breakpoint
->prev
)
1461 breakpoint
->prev
->next
= breakpoint
->next
;
1463 env
->breakpoints
= breakpoint
->next
;
1465 breakpoint_invalidate(env
, breakpoint
->pc
);
1467 qemu_free(breakpoint
);
1471 /* Remove all matching breakpoints. */
1472 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1474 #if defined(TARGET_HAS_ICE)
1477 for (bp
= env
->breakpoints
; bp
!= NULL
; bp
= bp
->next
)
1478 if (bp
->flags
& mask
)
1479 cpu_breakpoint_remove_by_ref(env
, bp
);
1483 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1484 CPU loop after each instruction */
1485 void cpu_single_step(CPUState
*env
, int enabled
)
1487 #if defined(TARGET_HAS_ICE)
1488 if (env
->singlestep_enabled
!= enabled
) {
1489 env
->singlestep_enabled
= enabled
;
1490 /* must flush all the translated code to avoid inconsistancies */
1491 /* XXX: only flush what is necessary */
1497 /* enable or disable low levels log */
1498 void cpu_set_log(int log_flags
)
1500 loglevel
= log_flags
;
1501 if (loglevel
&& !logfile
) {
1502 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1504 perror(logfilename
);
1507 #if !defined(CONFIG_SOFTMMU)
1508 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1510 static char logfile_buf
[4096];
1511 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1514 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1518 if (!loglevel
&& logfile
) {
1524 void cpu_set_log_filename(const char *filename
)
1526 logfilename
= strdup(filename
);
1531 cpu_set_log(loglevel
);
1534 /* mask must never be zero, except for A20 change call */
1535 void cpu_interrupt(CPUState
*env
, int mask
)
1537 #if !defined(USE_NPTL)
1538 TranslationBlock
*tb
;
1539 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1543 old_mask
= env
->interrupt_request
;
1544 /* FIXME: This is probably not threadsafe. A different thread could
1545 be in the middle of a read-modify-write operation. */
1546 env
->interrupt_request
|= mask
;
1547 #if defined(USE_NPTL)
1548 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1549 problem and hope the cpu will stop of its own accord. For userspace
1550 emulation this often isn't actually as bad as it sounds. Often
1551 signals are used primarily to interrupt blocking syscalls. */
1554 env
->icount_decr
.u16
.high
= 0xffff;
1555 #ifndef CONFIG_USER_ONLY
1556 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1557 an async event happened and we need to process it. */
1559 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1560 cpu_abort(env
, "Raised interrupt while not in I/O function");
1564 tb
= env
->current_tb
;
1565 /* if the cpu is currently executing code, we must unlink it and
1566 all the potentially executing TB */
1567 if (tb
&& !testandset(&interrupt_lock
)) {
1568 env
->current_tb
= NULL
;
1569 tb_reset_jump_recursive(tb
);
1570 resetlock(&interrupt_lock
);
1576 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1578 env
->interrupt_request
&= ~mask
;
1581 const CPULogItem cpu_log_items
[] = {
1582 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1583 "show generated host assembly code for each compiled TB" },
1584 { CPU_LOG_TB_IN_ASM
, "in_asm",
1585 "show target assembly code for each compiled TB" },
1586 { CPU_LOG_TB_OP
, "op",
1587 "show micro ops for each compiled TB" },
1588 { CPU_LOG_TB_OP_OPT
, "op_opt",
1591 "before eflags optimization and "
1593 "after liveness analysis" },
1594 { CPU_LOG_INT
, "int",
1595 "show interrupts/exceptions in short format" },
1596 { CPU_LOG_EXEC
, "exec",
1597 "show trace before each executed TB (lots of logs)" },
1598 { CPU_LOG_TB_CPU
, "cpu",
1599 "show CPU state before block translation" },
1601 { CPU_LOG_PCALL
, "pcall",
1602 "show protected mode far calls/returns/exceptions" },
1605 { CPU_LOG_IOPORT
, "ioport",
1606 "show all i/o ports accesses" },
1611 static int cmp1(const char *s1
, int n
, const char *s2
)
1613 if (strlen(s2
) != n
)
1615 return memcmp(s1
, s2
, n
) == 0;
1618 /* takes a comma separated list of log masks. Return 0 if error. */
1619 int cpu_str_to_log_mask(const char *str
)
1621 const CPULogItem
*item
;
1628 p1
= strchr(p
, ',');
1631 if(cmp1(p
,p1
-p
,"all")) {
1632 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1636 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1637 if (cmp1(p
, p1
- p
, item
->name
))
1651 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1658 fprintf(stderr
, "qemu: fatal: ");
1659 vfprintf(stderr
, fmt
, ap
);
1660 fprintf(stderr
, "\n");
1662 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1664 cpu_dump_state(env
, stderr
, fprintf
, 0);
1667 fprintf(logfile
, "qemu: fatal: ");
1668 vfprintf(logfile
, fmt
, ap2
);
1669 fprintf(logfile
, "\n");
1671 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1673 cpu_dump_state(env
, logfile
, fprintf
, 0);
1683 CPUState
*cpu_copy(CPUState
*env
)
1685 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1686 /* preserve chaining and index */
1687 CPUState
*next_cpu
= new_env
->next_cpu
;
1688 int cpu_index
= new_env
->cpu_index
;
1689 memcpy(new_env
, env
, sizeof(CPUState
));
1690 new_env
->next_cpu
= next_cpu
;
1691 new_env
->cpu_index
= cpu_index
;
1695 #if !defined(CONFIG_USER_ONLY)
1697 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1701 /* Discard jump cache entries for any tb which might potentially
1702 overlap the flushed page. */
1703 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1704 memset (&env
->tb_jmp_cache
[i
], 0,
1705 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1707 i
= tb_jmp_cache_hash_page(addr
);
1708 memset (&env
->tb_jmp_cache
[i
], 0,
1709 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1712 /* NOTE: if flush_global is true, also flush global entries (not
1714 void tlb_flush(CPUState
*env
, int flush_global
)
1718 #if defined(DEBUG_TLB)
1719 printf("tlb_flush:\n");
1721 /* must reset current TB so that interrupts cannot modify the
1722 links while we are modifying them */
1723 env
->current_tb
= NULL
;
1725 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1726 env
->tlb_table
[0][i
].addr_read
= -1;
1727 env
->tlb_table
[0][i
].addr_write
= -1;
1728 env
->tlb_table
[0][i
].addr_code
= -1;
1729 env
->tlb_table
[1][i
].addr_read
= -1;
1730 env
->tlb_table
[1][i
].addr_write
= -1;
1731 env
->tlb_table
[1][i
].addr_code
= -1;
1732 #if (NB_MMU_MODES >= 3)
1733 env
->tlb_table
[2][i
].addr_read
= -1;
1734 env
->tlb_table
[2][i
].addr_write
= -1;
1735 env
->tlb_table
[2][i
].addr_code
= -1;
1736 #if (NB_MMU_MODES == 4)
1737 env
->tlb_table
[3][i
].addr_read
= -1;
1738 env
->tlb_table
[3][i
].addr_write
= -1;
1739 env
->tlb_table
[3][i
].addr_code
= -1;
1744 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1747 if (env
->kqemu_enabled
) {
1748 kqemu_flush(env
, flush_global
);
1754 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1756 if (addr
== (tlb_entry
->addr_read
&
1757 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1758 addr
== (tlb_entry
->addr_write
&
1759 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1760 addr
== (tlb_entry
->addr_code
&
1761 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1762 tlb_entry
->addr_read
= -1;
1763 tlb_entry
->addr_write
= -1;
1764 tlb_entry
->addr_code
= -1;
1768 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1772 #if defined(DEBUG_TLB)
1773 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1775 /* must reset current TB so that interrupts cannot modify the
1776 links while we are modifying them */
1777 env
->current_tb
= NULL
;
1779 addr
&= TARGET_PAGE_MASK
;
1780 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1781 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1782 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1783 #if (NB_MMU_MODES >= 3)
1784 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1785 #if (NB_MMU_MODES == 4)
1786 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1790 tlb_flush_jmp_cache(env
, addr
);
1793 if (env
->kqemu_enabled
) {
1794 kqemu_flush_page(env
, addr
);
1799 /* update the TLBs so that writes to code in the virtual page 'addr'
1801 static void tlb_protect_code(ram_addr_t ram_addr
)
1803 cpu_physical_memory_reset_dirty(ram_addr
,
1804 ram_addr
+ TARGET_PAGE_SIZE
,
1808 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1809 tested for self modifying code */
1810 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1813 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1816 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1817 unsigned long start
, unsigned long length
)
1820 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1821 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1822 if ((addr
- start
) < length
) {
1823 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1828 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1832 unsigned long length
, start1
;
1836 start
&= TARGET_PAGE_MASK
;
1837 end
= TARGET_PAGE_ALIGN(end
);
1839 length
= end
- start
;
1842 len
= length
>> TARGET_PAGE_BITS
;
1844 /* XXX: should not depend on cpu context */
1846 if (env
->kqemu_enabled
) {
1849 for(i
= 0; i
< len
; i
++) {
1850 kqemu_set_notdirty(env
, addr
);
1851 addr
+= TARGET_PAGE_SIZE
;
1855 mask
= ~dirty_flags
;
1856 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1857 for(i
= 0; i
< len
; i
++)
1860 /* we modify the TLB cache so that the dirty bit will be set again
1861 when accessing the range */
1862 start1
= start
+ (unsigned long)phys_ram_base
;
1863 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1864 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1865 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1866 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1867 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1868 #if (NB_MMU_MODES >= 3)
1869 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1870 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1871 #if (NB_MMU_MODES == 4)
1872 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1873 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1879 int cpu_physical_memory_set_dirty_tracking(int enable
)
1881 in_migration
= enable
;
1885 int cpu_physical_memory_get_dirty_tracking(void)
1887 return in_migration
;
1890 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1893 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1896 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1898 ram_addr_t ram_addr
;
1900 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1901 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1902 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1903 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1904 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1909 /* update the TLB according to the current state of the dirty bits */
1910 void cpu_tlb_update_dirty(CPUState
*env
)
1913 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1914 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1915 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1916 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1917 #if (NB_MMU_MODES >= 3)
1918 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1919 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1920 #if (NB_MMU_MODES == 4)
1921 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1922 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1927 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1929 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1930 tlb_entry
->addr_write
= vaddr
;
1933 /* update the TLB corresponding to virtual page vaddr
1934 so that it is no longer dirty */
1935 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1939 vaddr
&= TARGET_PAGE_MASK
;
1940 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1941 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1942 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1943 #if (NB_MMU_MODES >= 3)
1944 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1945 #if (NB_MMU_MODES == 4)
1946 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1951 /* add a new TLB entry. At most one entry for a given virtual address
1952 is permitted. Return 0 if OK or 2 if the page could not be mapped
1953 (can only happen in non SOFTMMU mode for I/O pages or pages
1954 conflicting with the host address space). */
1955 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1956 target_phys_addr_t paddr
, int prot
,
1957 int mmu_idx
, int is_softmmu
)
1962 target_ulong address
;
1963 target_ulong code_address
;
1964 target_phys_addr_t addend
;
1968 target_phys_addr_t iotlb
;
1970 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1972 pd
= IO_MEM_UNASSIGNED
;
1974 pd
= p
->phys_offset
;
1976 #if defined(DEBUG_TLB)
1977 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1978 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1983 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1984 /* IO memory case (romd handled later) */
1985 address
|= TLB_MMIO
;
1987 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1988 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1990 iotlb
= pd
& TARGET_PAGE_MASK
;
1991 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1992 iotlb
|= IO_MEM_NOTDIRTY
;
1994 iotlb
|= IO_MEM_ROM
;
1996 /* IO handlers are currently passed a phsical address.
1997 It would be nice to pass an offset from the base address
1998 of that region. This would avoid having to special case RAM,
1999 and avoid full address decoding in every device.
2000 We can't use the high bits of pd for this because
2001 IO_MEM_ROMD uses these as a ram address. */
2002 iotlb
= (pd
& ~TARGET_PAGE_MASK
) + paddr
;
2005 code_address
= address
;
2006 /* Make accesses to pages with watchpoints go via the
2007 watchpoint trap routines. */
2008 for (wp
= env
->watchpoints
; wp
!= NULL
; wp
= wp
->next
) {
2009 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2010 iotlb
= io_mem_watch
+ paddr
;
2011 /* TODO: The memory case can be optimized by not trapping
2012 reads of pages with a write breakpoint. */
2013 address
|= TLB_MMIO
;
2017 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2018 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2019 te
= &env
->tlb_table
[mmu_idx
][index
];
2020 te
->addend
= addend
- vaddr
;
2021 if (prot
& PAGE_READ
) {
2022 te
->addr_read
= address
;
2027 if (prot
& PAGE_EXEC
) {
2028 te
->addr_code
= code_address
;
2032 if (prot
& PAGE_WRITE
) {
2033 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2034 (pd
& IO_MEM_ROMD
)) {
2035 /* Write access calls the I/O callback. */
2036 te
->addr_write
= address
| TLB_MMIO
;
2037 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2038 !cpu_physical_memory_is_dirty(pd
)) {
2039 te
->addr_write
= address
| TLB_NOTDIRTY
;
2041 te
->addr_write
= address
;
2044 te
->addr_write
= -1;
2051 void tlb_flush(CPUState
*env
, int flush_global
)
2055 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2059 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2060 target_phys_addr_t paddr
, int prot
,
2061 int mmu_idx
, int is_softmmu
)
2066 /* dump memory mappings */
2067 void page_dump(FILE *f
)
2069 unsigned long start
, end
;
2070 int i
, j
, prot
, prot1
;
2073 fprintf(f
, "%-8s %-8s %-8s %s\n",
2074 "start", "end", "size", "prot");
2078 for(i
= 0; i
<= L1_SIZE
; i
++) {
2083 for(j
= 0;j
< L2_SIZE
; j
++) {
2088 if (prot1
!= prot
) {
2089 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2091 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2092 start
, end
, end
- start
,
2093 prot
& PAGE_READ
? 'r' : '-',
2094 prot
& PAGE_WRITE
? 'w' : '-',
2095 prot
& PAGE_EXEC
? 'x' : '-');
2109 int page_get_flags(target_ulong address
)
2113 p
= page_find(address
>> TARGET_PAGE_BITS
);
2119 /* modify the flags of a page and invalidate the code if
2120 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2121 depending on PAGE_WRITE */
2122 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2127 /* mmap_lock should already be held. */
2128 start
= start
& TARGET_PAGE_MASK
;
2129 end
= TARGET_PAGE_ALIGN(end
);
2130 if (flags
& PAGE_WRITE
)
2131 flags
|= PAGE_WRITE_ORG
;
2132 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2133 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2134 /* We may be called for host regions that are outside guest
2138 /* if the write protection is set, then we invalidate the code
2140 if (!(p
->flags
& PAGE_WRITE
) &&
2141 (flags
& PAGE_WRITE
) &&
2143 tb_invalidate_phys_page(addr
, 0, NULL
);
2149 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2155 if (start
+ len
< start
)
2156 /* we've wrapped around */
2159 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2160 start
= start
& TARGET_PAGE_MASK
;
2162 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2163 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2166 if( !(p
->flags
& PAGE_VALID
) )
2169 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2171 if (flags
& PAGE_WRITE
) {
2172 if (!(p
->flags
& PAGE_WRITE_ORG
))
2174 /* unprotect the page if it was put read-only because it
2175 contains translated code */
2176 if (!(p
->flags
& PAGE_WRITE
)) {
2177 if (!page_unprotect(addr
, 0, NULL
))
2186 /* called from signal handler: invalidate the code and unprotect the
2187 page. Return TRUE if the fault was succesfully handled. */
2188 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2190 unsigned int page_index
, prot
, pindex
;
2192 target_ulong host_start
, host_end
, addr
;
2194 /* Technically this isn't safe inside a signal handler. However we
2195 know this only ever happens in a synchronous SEGV handler, so in
2196 practice it seems to be ok. */
2199 host_start
= address
& qemu_host_page_mask
;
2200 page_index
= host_start
>> TARGET_PAGE_BITS
;
2201 p1
= page_find(page_index
);
2206 host_end
= host_start
+ qemu_host_page_size
;
2209 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2213 /* if the page was really writable, then we change its
2214 protection back to writable */
2215 if (prot
& PAGE_WRITE_ORG
) {
2216 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2217 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2218 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2219 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2220 p1
[pindex
].flags
|= PAGE_WRITE
;
2221 /* and since the content will be modified, we must invalidate
2222 the corresponding translated code. */
2223 tb_invalidate_phys_page(address
, pc
, puc
);
2224 #ifdef DEBUG_TB_CHECK
2225 tb_invalidate_check(address
);
2235 static inline void tlb_set_dirty(CPUState
*env
,
2236 unsigned long addr
, target_ulong vaddr
)
2239 #endif /* defined(CONFIG_USER_ONLY) */
2241 #if !defined(CONFIG_USER_ONLY)
2242 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2244 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2245 ram_addr_t orig_memory
);
2246 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2249 if (addr > start_addr) \
2252 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2253 if (start_addr2 > 0) \
2257 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2258 end_addr2 = TARGET_PAGE_SIZE - 1; \
2260 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2261 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2266 /* register physical memory. 'size' must be a multiple of the target
2267 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2269 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2271 ram_addr_t phys_offset
)
2273 target_phys_addr_t addr
, end_addr
;
2276 ram_addr_t orig_size
= size
;
2280 /* XXX: should not depend on cpu context */
2282 if (env
->kqemu_enabled
) {
2283 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2287 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2289 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2290 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2291 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2292 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2293 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2294 ram_addr_t orig_memory
= p
->phys_offset
;
2295 target_phys_addr_t start_addr2
, end_addr2
;
2296 int need_subpage
= 0;
2298 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2300 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2301 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2302 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2303 &p
->phys_offset
, orig_memory
);
2305 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2308 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2310 p
->phys_offset
= phys_offset
;
2311 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2312 (phys_offset
& IO_MEM_ROMD
))
2313 phys_offset
+= TARGET_PAGE_SIZE
;
2316 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2317 p
->phys_offset
= phys_offset
;
2318 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2319 (phys_offset
& IO_MEM_ROMD
))
2320 phys_offset
+= TARGET_PAGE_SIZE
;
2322 target_phys_addr_t start_addr2
, end_addr2
;
2323 int need_subpage
= 0;
2325 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2326 end_addr2
, need_subpage
);
2328 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2329 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2330 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2331 subpage_register(subpage
, start_addr2
, end_addr2
,
2338 /* since each CPU stores ram addresses in its TLB cache, we must
2339 reset the modified entries */
2341 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2346 /* XXX: temporary until new memory mapping API */
2347 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2351 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2353 return IO_MEM_UNASSIGNED
;
2354 return p
->phys_offset
;
2357 /* XXX: better than nothing */
2358 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2361 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2362 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2363 (uint64_t)size
, (uint64_t)phys_ram_size
);
2366 addr
= phys_ram_alloc_offset
;
2367 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2371 void qemu_ram_free(ram_addr_t addr
)
2375 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2377 #ifdef DEBUG_UNASSIGNED
2378 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2380 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2381 do_unassigned_access(addr
, 0, 0, 0, 1);
2386 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2388 #ifdef DEBUG_UNASSIGNED
2389 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2391 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2392 do_unassigned_access(addr
, 0, 0, 0, 2);
2397 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2399 #ifdef DEBUG_UNASSIGNED
2400 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2402 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2403 do_unassigned_access(addr
, 0, 0, 0, 4);
2408 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2410 #ifdef DEBUG_UNASSIGNED
2411 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2413 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2414 do_unassigned_access(addr
, 1, 0, 0, 1);
2418 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2420 #ifdef DEBUG_UNASSIGNED
2421 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2423 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2424 do_unassigned_access(addr
, 1, 0, 0, 2);
2428 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2430 #ifdef DEBUG_UNASSIGNED
2431 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2433 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2434 do_unassigned_access(addr
, 1, 0, 0, 4);
2438 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2439 unassigned_mem_readb
,
2440 unassigned_mem_readw
,
2441 unassigned_mem_readl
,
2444 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2445 unassigned_mem_writeb
,
2446 unassigned_mem_writew
,
2447 unassigned_mem_writel
,
2450 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2454 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2455 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2456 #if !defined(CONFIG_USER_ONLY)
2457 tb_invalidate_phys_page_fast(ram_addr
, 1);
2458 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2461 stb_p(phys_ram_base
+ ram_addr
, val
);
2463 if (cpu_single_env
->kqemu_enabled
&&
2464 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2465 kqemu_modify_page(cpu_single_env
, ram_addr
);
2467 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2468 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2469 /* we remove the notdirty callback only if the code has been
2471 if (dirty_flags
== 0xff)
2472 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2475 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2479 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2480 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2481 #if !defined(CONFIG_USER_ONLY)
2482 tb_invalidate_phys_page_fast(ram_addr
, 2);
2483 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2486 stw_p(phys_ram_base
+ ram_addr
, val
);
2488 if (cpu_single_env
->kqemu_enabled
&&
2489 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2490 kqemu_modify_page(cpu_single_env
, ram_addr
);
2492 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2493 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2494 /* we remove the notdirty callback only if the code has been
2496 if (dirty_flags
== 0xff)
2497 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2500 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2504 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2505 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2506 #if !defined(CONFIG_USER_ONLY)
2507 tb_invalidate_phys_page_fast(ram_addr
, 4);
2508 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2511 stl_p(phys_ram_base
+ ram_addr
, val
);
2513 if (cpu_single_env
->kqemu_enabled
&&
2514 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2515 kqemu_modify_page(cpu_single_env
, ram_addr
);
2517 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2518 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2519 /* we remove the notdirty callback only if the code has been
2521 if (dirty_flags
== 0xff)
2522 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2525 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2526 NULL
, /* never used */
2527 NULL
, /* never used */
2528 NULL
, /* never used */
2531 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2532 notdirty_mem_writeb
,
2533 notdirty_mem_writew
,
2534 notdirty_mem_writel
,
2537 /* Generate a debug exception if a watchpoint has been hit. */
2538 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2540 CPUState
*env
= cpu_single_env
;
2541 target_ulong pc
, cs_base
;
2542 TranslationBlock
*tb
;
2547 if (env
->watchpoint_hit
) {
2548 /* We re-entered the check after replacing the TB. Now raise
2549 * the debug interrupt so that is will trigger after the
2550 * current instruction. */
2551 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2554 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2555 for (wp
= env
->watchpoints
; wp
!= NULL
; wp
= wp
->next
) {
2556 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2557 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2558 wp
->flags
|= BP_WATCHPOINT_HIT
;
2559 if (!env
->watchpoint_hit
) {
2560 env
->watchpoint_hit
= wp
;
2561 tb
= tb_find_pc(env
->mem_io_pc
);
2563 cpu_abort(env
, "check_watchpoint: could not find TB for "
2564 "pc=%p", (void *)env
->mem_io_pc
);
2566 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2567 tb_phys_invalidate(tb
, -1);
2568 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2569 env
->exception_index
= EXCP_DEBUG
;
2571 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2572 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2574 cpu_resume_from_signal(env
, NULL
);
2577 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2582 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2583 so these check for a hit then pass through to the normal out-of-line
2585 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2587 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2588 return ldub_phys(addr
);
2591 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2593 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2594 return lduw_phys(addr
);
2597 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2599 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2600 return ldl_phys(addr
);
2603 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2606 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2607 stb_phys(addr
, val
);
2610 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2613 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2614 stw_phys(addr
, val
);
2617 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2620 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2621 stl_phys(addr
, val
);
2624 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2630 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2636 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2642 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2643 #if defined(DEBUG_SUBPAGE)
2644 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2645 mmio
, len
, addr
, idx
);
2647 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2652 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2653 uint32_t value
, unsigned int len
)
2657 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2658 #if defined(DEBUG_SUBPAGE)
2659 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2660 mmio
, len
, addr
, idx
, value
);
2662 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2665 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2667 #if defined(DEBUG_SUBPAGE)
2668 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2671 return subpage_readlen(opaque
, addr
, 0);
2674 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2677 #if defined(DEBUG_SUBPAGE)
2678 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2680 subpage_writelen(opaque
, addr
, value
, 0);
2683 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2685 #if defined(DEBUG_SUBPAGE)
2686 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2689 return subpage_readlen(opaque
, addr
, 1);
2692 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2695 #if defined(DEBUG_SUBPAGE)
2696 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2698 subpage_writelen(opaque
, addr
, value
, 1);
2701 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2703 #if defined(DEBUG_SUBPAGE)
2704 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2707 return subpage_readlen(opaque
, addr
, 2);
2710 static void subpage_writel (void *opaque
,
2711 target_phys_addr_t addr
, uint32_t value
)
2713 #if defined(DEBUG_SUBPAGE)
2714 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2716 subpage_writelen(opaque
, addr
, value
, 2);
2719 static CPUReadMemoryFunc
*subpage_read
[] = {
2725 static CPUWriteMemoryFunc
*subpage_write
[] = {
2731 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2737 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2739 idx
= SUBPAGE_IDX(start
);
2740 eidx
= SUBPAGE_IDX(end
);
2741 #if defined(DEBUG_SUBPAGE)
2742 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2743 mmio
, start
, end
, idx
, eidx
, memory
);
2745 memory
>>= IO_MEM_SHIFT
;
2746 for (; idx
<= eidx
; idx
++) {
2747 for (i
= 0; i
< 4; i
++) {
2748 if (io_mem_read
[memory
][i
]) {
2749 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2750 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2752 if (io_mem_write
[memory
][i
]) {
2753 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2754 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2762 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2763 ram_addr_t orig_memory
)
2768 mmio
= qemu_mallocz(sizeof(subpage_t
));
2771 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2772 #if defined(DEBUG_SUBPAGE)
2773 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2774 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2776 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2777 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2783 static void io_mem_init(void)
2785 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2786 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2787 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2790 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2791 watch_mem_write
, NULL
);
2792 /* alloc dirty bits array */
2793 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2794 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2797 /* mem_read and mem_write are arrays of functions containing the
2798 function to access byte (index 0), word (index 1) and dword (index
2799 2). Functions can be omitted with a NULL function pointer. The
2800 registered functions may be modified dynamically later.
2801 If io_index is non zero, the corresponding io zone is
2802 modified. If it is zero, a new io zone is allocated. The return
2803 value can be used with cpu_register_physical_memory(). (-1) is
2804 returned if error. */
2805 int cpu_register_io_memory(int io_index
,
2806 CPUReadMemoryFunc
**mem_read
,
2807 CPUWriteMemoryFunc
**mem_write
,
2810 int i
, subwidth
= 0;
2812 if (io_index
<= 0) {
2813 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2815 io_index
= io_mem_nb
++;
2817 if (io_index
>= IO_MEM_NB_ENTRIES
)
2821 for(i
= 0;i
< 3; i
++) {
2822 if (!mem_read
[i
] || !mem_write
[i
])
2823 subwidth
= IO_MEM_SUBWIDTH
;
2824 io_mem_read
[io_index
][i
] = mem_read
[i
];
2825 io_mem_write
[io_index
][i
] = mem_write
[i
];
2827 io_mem_opaque
[io_index
] = opaque
;
2828 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2831 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2833 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2836 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2838 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2841 #endif /* !defined(CONFIG_USER_ONLY) */
2843 /* physical memory access (slow version, mainly for debug) */
2844 #if defined(CONFIG_USER_ONLY)
2845 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2846 int len
, int is_write
)
2853 page
= addr
& TARGET_PAGE_MASK
;
2854 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2857 flags
= page_get_flags(page
);
2858 if (!(flags
& PAGE_VALID
))
2861 if (!(flags
& PAGE_WRITE
))
2863 /* XXX: this code should not depend on lock_user */
2864 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2865 /* FIXME - should this return an error rather than just fail? */
2868 unlock_user(p
, addr
, l
);
2870 if (!(flags
& PAGE_READ
))
2872 /* XXX: this code should not depend on lock_user */
2873 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2874 /* FIXME - should this return an error rather than just fail? */
2877 unlock_user(p
, addr
, 0);
2886 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2887 int len
, int is_write
)
2892 target_phys_addr_t page
;
2897 page
= addr
& TARGET_PAGE_MASK
;
2898 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2901 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2903 pd
= IO_MEM_UNASSIGNED
;
2905 pd
= p
->phys_offset
;
2909 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2910 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2911 /* XXX: could force cpu_single_env to NULL to avoid
2913 if (l
>= 4 && ((addr
& 3) == 0)) {
2914 /* 32 bit write access */
2916 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2918 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2919 /* 16 bit write access */
2921 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2924 /* 8 bit write access */
2926 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2930 unsigned long addr1
;
2931 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2933 ptr
= phys_ram_base
+ addr1
;
2934 memcpy(ptr
, buf
, l
);
2935 if (!cpu_physical_memory_is_dirty(addr1
)) {
2936 /* invalidate code */
2937 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2939 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2940 (0xff & ~CODE_DIRTY_FLAG
);
2944 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2945 !(pd
& IO_MEM_ROMD
)) {
2947 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2948 if (l
>= 4 && ((addr
& 3) == 0)) {
2949 /* 32 bit read access */
2950 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2953 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2954 /* 16 bit read access */
2955 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2959 /* 8 bit read access */
2960 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2966 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2967 (addr
& ~TARGET_PAGE_MASK
);
2968 memcpy(buf
, ptr
, l
);
2977 /* used for ROM loading : can write in RAM and ROM */
2978 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2979 const uint8_t *buf
, int len
)
2983 target_phys_addr_t page
;
2988 page
= addr
& TARGET_PAGE_MASK
;
2989 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2992 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2994 pd
= IO_MEM_UNASSIGNED
;
2996 pd
= p
->phys_offset
;
2999 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3000 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3001 !(pd
& IO_MEM_ROMD
)) {
3004 unsigned long addr1
;
3005 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3007 ptr
= phys_ram_base
+ addr1
;
3008 memcpy(ptr
, buf
, l
);
3017 /* warning: addr must be aligned */
3018 uint32_t ldl_phys(target_phys_addr_t addr
)
3026 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3028 pd
= IO_MEM_UNASSIGNED
;
3030 pd
= p
->phys_offset
;
3033 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3034 !(pd
& IO_MEM_ROMD
)) {
3036 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3037 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3040 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3041 (addr
& ~TARGET_PAGE_MASK
);
3047 /* warning: addr must be aligned */
3048 uint64_t ldq_phys(target_phys_addr_t addr
)
3056 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3058 pd
= IO_MEM_UNASSIGNED
;
3060 pd
= p
->phys_offset
;
3063 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3064 !(pd
& IO_MEM_ROMD
)) {
3066 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3067 #ifdef TARGET_WORDS_BIGENDIAN
3068 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3069 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3071 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3072 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3076 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3077 (addr
& ~TARGET_PAGE_MASK
);
3084 uint32_t ldub_phys(target_phys_addr_t addr
)
3087 cpu_physical_memory_read(addr
, &val
, 1);
3092 uint32_t lduw_phys(target_phys_addr_t addr
)
3095 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3096 return tswap16(val
);
3099 /* warning: addr must be aligned. The ram page is not masked as dirty
3100 and the code inside is not invalidated. It is useful if the dirty
3101 bits are used to track modified PTEs */
3102 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3109 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3111 pd
= IO_MEM_UNASSIGNED
;
3113 pd
= p
->phys_offset
;
3116 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3117 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3118 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3120 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3121 ptr
= phys_ram_base
+ addr1
;
3124 if (unlikely(in_migration
)) {
3125 if (!cpu_physical_memory_is_dirty(addr1
)) {
3126 /* invalidate code */
3127 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3129 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3130 (0xff & ~CODE_DIRTY_FLAG
);
3136 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3143 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3145 pd
= IO_MEM_UNASSIGNED
;
3147 pd
= p
->phys_offset
;
3150 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3151 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3152 #ifdef TARGET_WORDS_BIGENDIAN
3153 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3154 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3156 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3157 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3160 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3161 (addr
& ~TARGET_PAGE_MASK
);
3166 /* warning: addr must be aligned */
3167 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3174 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3176 pd
= IO_MEM_UNASSIGNED
;
3178 pd
= p
->phys_offset
;
3181 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3182 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3183 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3185 unsigned long addr1
;
3186 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3188 ptr
= phys_ram_base
+ addr1
;
3190 if (!cpu_physical_memory_is_dirty(addr1
)) {
3191 /* invalidate code */
3192 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3194 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3195 (0xff & ~CODE_DIRTY_FLAG
);
3201 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3204 cpu_physical_memory_write(addr
, &v
, 1);
3208 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3210 uint16_t v
= tswap16(val
);
3211 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3215 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3218 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3223 /* virtual memory access for debug */
3224 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3225 uint8_t *buf
, int len
, int is_write
)
3228 target_phys_addr_t phys_addr
;
3232 page
= addr
& TARGET_PAGE_MASK
;
3233 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3234 /* if no physical page mapped, return an error */
3235 if (phys_addr
== -1)
3237 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3240 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3249 /* in deterministic execution mode, instructions doing device I/Os
3250 must be at the end of the TB */
3251 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3253 TranslationBlock
*tb
;
3255 target_ulong pc
, cs_base
;
3258 tb
= tb_find_pc((unsigned long)retaddr
);
3260 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3263 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3264 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3265 /* Calculate how many instructions had been executed before the fault
3267 n
= n
- env
->icount_decr
.u16
.low
;
3268 /* Generate a new TB ending on the I/O insn. */
3270 /* On MIPS and SH, delay slot instructions can only be restarted if
3271 they were already the first instruction in the TB. If this is not
3272 the first instruction in a TB then re-execute the preceding
3274 #if defined(TARGET_MIPS)
3275 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3276 env
->active_tc
.PC
-= 4;
3277 env
->icount_decr
.u16
.low
++;
3278 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3280 #elif defined(TARGET_SH4)
3281 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3284 env
->icount_decr
.u16
.low
++;
3285 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3288 /* This should never happen. */
3289 if (n
> CF_COUNT_MASK
)
3290 cpu_abort(env
, "TB too big during recompile");
3292 cflags
= n
| CF_LAST_IO
;
3294 cs_base
= tb
->cs_base
;
3296 tb_phys_invalidate(tb
, -1);
3297 /* FIXME: In theory this could raise an exception. In practice
3298 we have already translated the block once so it's probably ok. */
3299 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3300 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3301 the first in the TB) then we end up generating a whole new TB and
3302 repeating the fault, which is horribly inefficient.
3303 Better would be to execute just this insn uncached, or generate a
3305 cpu_resume_from_signal(env
, NULL
);
3308 void dump_exec_info(FILE *f
,
3309 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3311 int i
, target_code_size
, max_target_code_size
;
3312 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3313 TranslationBlock
*tb
;
3315 target_code_size
= 0;
3316 max_target_code_size
= 0;
3318 direct_jmp_count
= 0;
3319 direct_jmp2_count
= 0;
3320 for(i
= 0; i
< nb_tbs
; i
++) {
3322 target_code_size
+= tb
->size
;
3323 if (tb
->size
> max_target_code_size
)
3324 max_target_code_size
= tb
->size
;
3325 if (tb
->page_addr
[1] != -1)
3327 if (tb
->tb_next_offset
[0] != 0xffff) {
3329 if (tb
->tb_next_offset
[1] != 0xffff) {
3330 direct_jmp2_count
++;
3334 /* XXX: avoid using doubles ? */
3335 cpu_fprintf(f
, "Translation buffer state:\n");
3336 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3337 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3338 cpu_fprintf(f
, "TB count %d/%d\n",
3339 nb_tbs
, code_gen_max_blocks
);
3340 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3341 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3342 max_target_code_size
);
3343 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3344 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3345 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3346 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3348 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3349 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3351 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3353 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3354 cpu_fprintf(f
, "\nStatistics:\n");
3355 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3356 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3357 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3358 tcg_dump_info(f
, cpu_fprintf
);
3361 #if !defined(CONFIG_USER_ONLY)
3363 #define MMUSUFFIX _cmmu
3364 #define GETPC() NULL
3365 #define env cpu_single_env
3366 #define SOFTMMU_CODE_ACCESS
3369 #include "softmmu_template.h"
3372 #include "softmmu_template.h"
3375 #include "softmmu_template.h"
3378 #include "softmmu_template.h"