2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
46 #if defined(CONFIG_USER_ONLY)
50 //#define DEBUG_TB_INVALIDATE
53 //#define DEBUG_UNASSIGNED
55 /* make various TB consistency checks */
56 //#define DEBUG_TB_CHECK
57 //#define DEBUG_TLB_CHECK
59 //#define DEBUG_IOPORT
60 //#define DEBUG_SUBPAGE
62 #if !defined(CONFIG_USER_ONLY)
63 /* TB consistency checks only implemented for usermode emulation. */
67 #define SMC_BITMAP_USE_THRESHOLD 10
69 #define MMAP_AREA_START 0x00000000
70 #define MMAP_AREA_END 0xa8000000
72 #if defined(TARGET_SPARC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 41
74 #elif defined(TARGET_SPARC)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 36
76 #elif defined(TARGET_ALPHA)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #define TARGET_VIRT_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_PPC64)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 36
86 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
87 #define TARGET_PHYS_ADDR_SPACE_BITS 32
90 TranslationBlock
*tbs
;
91 int code_gen_max_blocks
;
92 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
94 /* any access to the tbs or the page table must use this lock */
95 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
97 uint8_t code_gen_prologue
[1024] __attribute__((aligned (32)));
98 uint8_t *code_gen_buffer
;
99 unsigned long code_gen_buffer_size
;
100 /* threshold to flush the translated code buffer */
101 unsigned long code_gen_buffer_max_size
;
102 uint8_t *code_gen_ptr
;
104 #if !defined(CONFIG_USER_ONLY)
105 ram_addr_t phys_ram_size
;
107 uint8_t *phys_ram_base
;
108 uint8_t *phys_ram_dirty
;
110 static int in_migration
;
111 static ram_addr_t phys_ram_alloc_offset
= 0;
115 /* current CPU in the current thread. It is only valid inside
117 CPUState
*cpu_single_env
;
118 /* 0 = Do not count executed instructions.
119 1 = Precise instruction counting.
120 2 = Adaptive rate instruction counting. */
122 /* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
126 typedef struct PageDesc
{
127 /* list of TBs intersecting this ram page */
128 TranslationBlock
*first_tb
;
129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count
;
132 uint8_t *code_bitmap
;
133 #if defined(CONFIG_USER_ONLY)
138 typedef struct PhysPageDesc
{
139 /* offset in host memory of the page + io_index in the low bits */
140 ram_addr_t phys_offset
;
144 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
145 /* XXX: this is a temporary hack for alpha target.
146 * In the future, this is to be replaced by a multi-level table
147 * to actually be able to handle the complete 64 bits address space.
149 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
151 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
154 #define L1_SIZE (1 << L1_BITS)
155 #define L2_SIZE (1 << L2_BITS)
157 unsigned long qemu_real_host_page_size
;
158 unsigned long qemu_host_page_bits
;
159 unsigned long qemu_host_page_size
;
160 unsigned long qemu_host_page_mask
;
162 /* XXX: for system emulation, it could just be an array */
163 static PageDesc
*l1_map
[L1_SIZE
];
164 PhysPageDesc
**l1_phys_map
;
166 #if !defined(CONFIG_USER_ONLY)
167 static void io_mem_init(void);
169 /* io memory support */
170 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
171 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
172 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
173 static int io_mem_nb
;
174 char io_mem_used
[IO_MEM_NB_ENTRIES
];
175 static int io_mem_watch
;
179 char *logfilename
= "/tmp/qemu.log";
182 static int log_append
= 0;
185 static int tlb_flush_count
;
186 static int tb_flush_count
;
187 static int tb_phys_invalidate_count
;
189 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
190 typedef struct subpage_t
{
191 target_phys_addr_t base
;
192 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
193 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
194 void *opaque
[TARGET_PAGE_SIZE
][2][4];
198 static void map_exec(void *addr
, long size
)
201 VirtualProtect(addr
, size
,
202 PAGE_EXECUTE_READWRITE
, &old_protect
);
206 static void map_exec(void *addr
, long size
)
208 unsigned long start
, end
, page_size
;
210 page_size
= getpagesize();
211 start
= (unsigned long)addr
;
212 start
&= ~(page_size
- 1);
214 end
= (unsigned long)addr
+ size
;
215 end
+= page_size
- 1;
216 end
&= ~(page_size
- 1);
218 mprotect((void *)start
, end
- start
,
219 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
223 static void page_init(void)
225 /* NOTE: we can always suppose that qemu_host_page_size >=
229 SYSTEM_INFO system_info
;
232 GetSystemInfo(&system_info
);
233 qemu_real_host_page_size
= system_info
.dwPageSize
;
236 qemu_real_host_page_size
= getpagesize();
238 if (qemu_host_page_size
== 0)
239 qemu_host_page_size
= qemu_real_host_page_size
;
240 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
241 qemu_host_page_size
= TARGET_PAGE_SIZE
;
242 qemu_host_page_bits
= 0;
243 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
244 qemu_host_page_bits
++;
245 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
246 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
247 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
249 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
251 long long startaddr
, endaddr
;
256 last_brk
= (unsigned long)sbrk(0);
257 f
= fopen("/proc/self/maps", "r");
260 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
262 startaddr
= MIN(startaddr
,
263 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
264 endaddr
= MIN(endaddr
,
265 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
266 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
267 TARGET_PAGE_ALIGN(endaddr
),
278 static inline PageDesc
*page_find_alloc(target_ulong index
)
282 #if TARGET_LONG_BITS > 32
283 /* Host memory outside guest VM. For 32-bit targets we have already
284 excluded high addresses. */
285 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
* TARGET_PAGE_SIZE
))
288 lp
= &l1_map
[index
>> L2_BITS
];
291 /* allocate if not found */
292 #if defined(CONFIG_USER_ONLY)
294 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
295 /* Don't use qemu_malloc because it may recurse. */
296 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
297 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
300 if (addr
== (target_ulong
)addr
) {
301 page_set_flags(addr
& TARGET_PAGE_MASK
,
302 TARGET_PAGE_ALIGN(addr
+ len
),
306 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
310 return p
+ (index
& (L2_SIZE
- 1));
313 static inline PageDesc
*page_find(target_ulong index
)
317 p
= l1_map
[index
>> L2_BITS
];
320 return p
+ (index
& (L2_SIZE
- 1));
323 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
328 p
= (void **)l1_phys_map
;
329 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
331 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
332 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
334 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
337 /* allocate if not found */
340 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
341 memset(p
, 0, sizeof(void *) * L1_SIZE
);
345 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
349 /* allocate if not found */
352 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
354 for (i
= 0; i
< L2_SIZE
; i
++)
355 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
357 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
360 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
362 return phys_page_find_alloc(index
, 0);
365 #if !defined(CONFIG_USER_ONLY)
366 static void tlb_protect_code(ram_addr_t ram_addr
);
367 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
369 #define mmap_lock() do { } while(0)
370 #define mmap_unlock() do { } while(0)
373 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
375 #if defined(CONFIG_USER_ONLY)
376 /* Currently it is not recommanded to allocate big chunks of data in
377 user mode. It will change when a dedicated libc will be used */
378 #define USE_STATIC_CODE_GEN_BUFFER
381 #ifdef USE_STATIC_CODE_GEN_BUFFER
382 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
385 void code_gen_alloc(unsigned long tb_size
)
387 #ifdef USE_STATIC_CODE_GEN_BUFFER
388 code_gen_buffer
= static_code_gen_buffer
;
389 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
390 map_exec(code_gen_buffer
, code_gen_buffer_size
);
392 code_gen_buffer_size
= tb_size
;
393 if (code_gen_buffer_size
== 0) {
394 #if defined(CONFIG_USER_ONLY)
395 /* in user mode, phys_ram_size is not meaningful */
396 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
398 /* XXX: needs ajustments */
399 code_gen_buffer_size
= (int)(phys_ram_size
/ 4);
402 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
403 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
404 /* The code gen buffer location may have constraints depending on
405 the host cpu and OS */
406 #if defined(__linux__)
409 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
410 #if defined(__x86_64__)
412 /* Cannot map more than that */
413 if (code_gen_buffer_size
> (800 * 1024 * 1024))
414 code_gen_buffer_size
= (800 * 1024 * 1024);
416 code_gen_buffer
= mmap(NULL
, code_gen_buffer_size
,
417 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
419 if (code_gen_buffer
== MAP_FAILED
) {
420 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
425 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
426 if (!code_gen_buffer
) {
427 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
430 map_exec(code_gen_buffer
, code_gen_buffer_size
);
432 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
433 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
434 code_gen_buffer_max_size
= code_gen_buffer_size
-
435 code_gen_max_block_size();
436 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
437 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
440 /* Must be called before using the QEMU cpus. 'tb_size' is the size
441 (in bytes) allocated to the translation buffer. Zero means default
443 void cpu_exec_init_all(unsigned long tb_size
)
446 code_gen_alloc(tb_size
);
447 code_gen_ptr
= code_gen_buffer
;
449 #if !defined(CONFIG_USER_ONLY)
454 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
456 #define CPU_COMMON_SAVE_VERSION 1
458 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
460 CPUState
*env
= opaque
;
462 qemu_put_be32s(f
, &env
->halted
);
463 qemu_put_be32s(f
, &env
->interrupt_request
);
466 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
468 CPUState
*env
= opaque
;
470 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
473 qemu_get_be32s(f
, &env
->halted
);
474 qemu_get_be32s(f
, &env
->interrupt_request
);
481 void cpu_exec_init(CPUState
*env
)
486 env
->next_cpu
= NULL
;
489 while (*penv
!= NULL
) {
490 penv
= (CPUState
**)&(*penv
)->next_cpu
;
493 env
->cpu_index
= cpu_index
;
494 env
->nb_watchpoints
= 0;
496 env
->thread_id
= GetCurrentProcessId();
498 env
->thread_id
= getpid();
501 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
503 cpu_common_save
, cpu_common_load
, env
);
504 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
505 cpu_save
, cpu_load
, env
);
509 static inline void invalidate_page_bitmap(PageDesc
*p
)
511 if (p
->code_bitmap
) {
512 qemu_free(p
->code_bitmap
);
513 p
->code_bitmap
= NULL
;
515 p
->code_write_count
= 0;
518 /* set to NULL all the 'first_tb' fields in all PageDescs */
519 static void page_flush_tb(void)
524 for(i
= 0; i
< L1_SIZE
; i
++) {
527 for(j
= 0; j
< L2_SIZE
; j
++) {
529 invalidate_page_bitmap(p
);
536 /* flush all the translation blocks */
537 /* XXX: tb_flush is currently not thread safe */
538 void tb_flush(CPUState
*env1
)
541 #if defined(DEBUG_FLUSH)
542 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
543 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
545 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
547 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
548 cpu_abort(env1
, "Internal error: code buffer overflow\n");
552 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
553 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
556 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
559 code_gen_ptr
= code_gen_buffer
;
560 /* XXX: flush processor icache at this point if cache flush is
565 #ifdef DEBUG_TB_CHECK
567 static void tb_invalidate_check(target_ulong address
)
569 TranslationBlock
*tb
;
571 address
&= TARGET_PAGE_MASK
;
572 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
573 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
574 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
575 address
>= tb
->pc
+ tb
->size
)) {
576 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
577 address
, (long)tb
->pc
, tb
->size
);
583 /* verify that all the pages have correct rights for code */
584 static void tb_page_check(void)
586 TranslationBlock
*tb
;
587 int i
, flags1
, flags2
;
589 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
590 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
591 flags1
= page_get_flags(tb
->pc
);
592 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
593 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
594 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
595 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
601 void tb_jmp_check(TranslationBlock
*tb
)
603 TranslationBlock
*tb1
;
606 /* suppress any remaining jumps to this TB */
610 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
613 tb1
= tb1
->jmp_next
[n1
];
615 /* check end of list */
617 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
623 /* invalidate one TB */
624 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
627 TranslationBlock
*tb1
;
631 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
634 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
638 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
640 TranslationBlock
*tb1
;
646 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
648 *ptb
= tb1
->page_next
[n1
];
651 ptb
= &tb1
->page_next
[n1
];
655 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
657 TranslationBlock
*tb1
, **ptb
;
660 ptb
= &tb
->jmp_next
[n
];
663 /* find tb(n) in circular list */
667 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
668 if (n1
== n
&& tb1
== tb
)
671 ptb
= &tb1
->jmp_first
;
673 ptb
= &tb1
->jmp_next
[n1
];
676 /* now we can suppress tb(n) from the list */
677 *ptb
= tb
->jmp_next
[n
];
679 tb
->jmp_next
[n
] = NULL
;
683 /* reset the jump entry 'n' of a TB so that it is not chained to
685 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
687 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
690 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
695 target_phys_addr_t phys_pc
;
696 TranslationBlock
*tb1
, *tb2
;
698 /* remove the TB from the hash list */
699 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
700 h
= tb_phys_hash_func(phys_pc
);
701 tb_remove(&tb_phys_hash
[h
], tb
,
702 offsetof(TranslationBlock
, phys_hash_next
));
704 /* remove the TB from the page list */
705 if (tb
->page_addr
[0] != page_addr
) {
706 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
707 tb_page_remove(&p
->first_tb
, tb
);
708 invalidate_page_bitmap(p
);
710 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
711 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
712 tb_page_remove(&p
->first_tb
, tb
);
713 invalidate_page_bitmap(p
);
716 tb_invalidated_flag
= 1;
718 /* remove the TB from the hash list */
719 h
= tb_jmp_cache_hash_func(tb
->pc
);
720 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
721 if (env
->tb_jmp_cache
[h
] == tb
)
722 env
->tb_jmp_cache
[h
] = NULL
;
725 /* suppress this TB from the two jump lists */
726 tb_jmp_remove(tb
, 0);
727 tb_jmp_remove(tb
, 1);
729 /* suppress any remaining jumps to this TB */
735 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
736 tb2
= tb1
->jmp_next
[n1
];
737 tb_reset_jump(tb1
, n1
);
738 tb1
->jmp_next
[n1
] = NULL
;
741 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
743 tb_phys_invalidate_count
++;
746 static inline void set_bits(uint8_t *tab
, int start
, int len
)
752 mask
= 0xff << (start
& 7);
753 if ((start
& ~7) == (end
& ~7)) {
755 mask
&= ~(0xff << (end
& 7));
760 start
= (start
+ 8) & ~7;
762 while (start
< end1
) {
767 mask
= ~(0xff << (end
& 7));
773 static void build_page_bitmap(PageDesc
*p
)
775 int n
, tb_start
, tb_end
;
776 TranslationBlock
*tb
;
778 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
785 tb
= (TranslationBlock
*)((long)tb
& ~3);
786 /* NOTE: this is subtle as a TB may span two physical pages */
788 /* NOTE: tb_end may be after the end of the page, but
789 it is not a problem */
790 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
791 tb_end
= tb_start
+ tb
->size
;
792 if (tb_end
> TARGET_PAGE_SIZE
)
793 tb_end
= TARGET_PAGE_SIZE
;
796 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
798 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
799 tb
= tb
->page_next
[n
];
803 TranslationBlock
*tb_gen_code(CPUState
*env
,
804 target_ulong pc
, target_ulong cs_base
,
805 int flags
, int cflags
)
807 TranslationBlock
*tb
;
809 target_ulong phys_pc
, phys_page2
, virt_page2
;
812 phys_pc
= get_phys_addr_code(env
, pc
);
815 /* flush must be done */
817 /* cannot fail at this point */
819 /* Don't forget to invalidate previous TB info. */
820 tb_invalidated_flag
= 1;
822 tc_ptr
= code_gen_ptr
;
824 tb
->cs_base
= cs_base
;
827 cpu_gen_code(env
, tb
, &code_gen_size
);
828 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
830 /* check next page if needed */
831 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
833 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
834 phys_page2
= get_phys_addr_code(env
, virt_page2
);
836 tb_link_phys(tb
, phys_pc
, phys_page2
);
840 /* invalidate all TBs which intersect with the target physical page
841 starting in range [start;end[. NOTE: start and end must refer to
842 the same physical page. 'is_cpu_write_access' should be true if called
843 from a real cpu write access: the virtual CPU will exit the current
844 TB if code is modified inside this TB. */
845 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
846 int is_cpu_write_access
)
848 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
849 CPUState
*env
= cpu_single_env
;
851 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
852 target_ulong tb_start
, tb_end
;
853 target_ulong current_pc
, current_cs_base
;
855 p
= page_find(start
>> TARGET_PAGE_BITS
);
858 if (!p
->code_bitmap
&&
859 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
860 is_cpu_write_access
) {
861 /* build code bitmap */
862 build_page_bitmap(p
);
865 /* we remove all the TBs in the range [start, end[ */
866 /* XXX: see if in some cases it could be faster to invalidate all the code */
867 current_tb_not_found
= is_cpu_write_access
;
868 current_tb_modified
= 0;
869 current_tb
= NULL
; /* avoid warning */
870 current_pc
= 0; /* avoid warning */
871 current_cs_base
= 0; /* avoid warning */
872 current_flags
= 0; /* avoid warning */
876 tb
= (TranslationBlock
*)((long)tb
& ~3);
877 tb_next
= tb
->page_next
[n
];
878 /* NOTE: this is subtle as a TB may span two physical pages */
880 /* NOTE: tb_end may be after the end of the page, but
881 it is not a problem */
882 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
883 tb_end
= tb_start
+ tb
->size
;
885 tb_start
= tb
->page_addr
[1];
886 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
888 if (!(tb_end
<= start
|| tb_start
>= end
)) {
889 #ifdef TARGET_HAS_PRECISE_SMC
890 if (current_tb_not_found
) {
891 current_tb_not_found
= 0;
893 if (env
->mem_io_pc
) {
894 /* now we have a real cpu fault */
895 current_tb
= tb_find_pc(env
->mem_io_pc
);
898 if (current_tb
== tb
&&
899 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
900 /* If we are modifying the current TB, we must stop
901 its execution. We could be more precise by checking
902 that the modification is after the current PC, but it
903 would require a specialized function to partially
904 restore the CPU state */
906 current_tb_modified
= 1;
907 cpu_restore_state(current_tb
, env
,
908 env
->mem_io_pc
, NULL
);
909 #if defined(TARGET_I386)
910 current_flags
= env
->hflags
;
911 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
912 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
913 current_pc
= current_cs_base
+ env
->eip
;
915 #error unsupported CPU
918 #endif /* TARGET_HAS_PRECISE_SMC */
919 /* we need to do that to handle the case where a signal
920 occurs while doing tb_phys_invalidate() */
923 saved_tb
= env
->current_tb
;
924 env
->current_tb
= NULL
;
926 tb_phys_invalidate(tb
, -1);
928 env
->current_tb
= saved_tb
;
929 if (env
->interrupt_request
&& env
->current_tb
)
930 cpu_interrupt(env
, env
->interrupt_request
);
935 #if !defined(CONFIG_USER_ONLY)
936 /* if no code remaining, no need to continue to use slow writes */
938 invalidate_page_bitmap(p
);
939 if (is_cpu_write_access
) {
940 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
944 #ifdef TARGET_HAS_PRECISE_SMC
945 if (current_tb_modified
) {
946 /* we generate a block containing just the instruction
947 modifying the memory. It will ensure that it cannot modify
949 env
->current_tb
= NULL
;
950 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
951 cpu_resume_from_signal(env
, NULL
);
956 /* len must be <= 8 and start must be a multiple of len */
957 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
964 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
965 cpu_single_env
->mem_io_vaddr
, len
,
967 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
971 p
= page_find(start
>> TARGET_PAGE_BITS
);
974 if (p
->code_bitmap
) {
975 offset
= start
& ~TARGET_PAGE_MASK
;
976 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
977 if (b
& ((1 << len
) - 1))
981 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
985 #if !defined(CONFIG_SOFTMMU)
986 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
987 unsigned long pc
, void *puc
)
989 int n
, current_flags
, current_tb_modified
;
990 target_ulong current_pc
, current_cs_base
;
992 TranslationBlock
*tb
, *current_tb
;
993 #ifdef TARGET_HAS_PRECISE_SMC
994 CPUState
*env
= cpu_single_env
;
997 addr
&= TARGET_PAGE_MASK
;
998 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1002 current_tb_modified
= 0;
1004 current_pc
= 0; /* avoid warning */
1005 current_cs_base
= 0; /* avoid warning */
1006 current_flags
= 0; /* avoid warning */
1007 #ifdef TARGET_HAS_PRECISE_SMC
1008 if (tb
&& pc
!= 0) {
1009 current_tb
= tb_find_pc(pc
);
1012 while (tb
!= NULL
) {
1014 tb
= (TranslationBlock
*)((long)tb
& ~3);
1015 #ifdef TARGET_HAS_PRECISE_SMC
1016 if (current_tb
== tb
&&
1017 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1018 /* If we are modifying the current TB, we must stop
1019 its execution. We could be more precise by checking
1020 that the modification is after the current PC, but it
1021 would require a specialized function to partially
1022 restore the CPU state */
1024 current_tb_modified
= 1;
1025 cpu_restore_state(current_tb
, env
, pc
, puc
);
1026 #if defined(TARGET_I386)
1027 current_flags
= env
->hflags
;
1028 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
1029 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
1030 current_pc
= current_cs_base
+ env
->eip
;
1032 #error unsupported CPU
1035 #endif /* TARGET_HAS_PRECISE_SMC */
1036 tb_phys_invalidate(tb
, addr
);
1037 tb
= tb
->page_next
[n
];
1040 #ifdef TARGET_HAS_PRECISE_SMC
1041 if (current_tb_modified
) {
1042 /* we generate a block containing just the instruction
1043 modifying the memory. It will ensure that it cannot modify
1045 env
->current_tb
= NULL
;
1046 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1047 cpu_resume_from_signal(env
, puc
);
1053 /* add the tb in the target page and protect it if necessary */
1054 static inline void tb_alloc_page(TranslationBlock
*tb
,
1055 unsigned int n
, target_ulong page_addr
)
1058 TranslationBlock
*last_first_tb
;
1060 tb
->page_addr
[n
] = page_addr
;
1061 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1062 tb
->page_next
[n
] = p
->first_tb
;
1063 last_first_tb
= p
->first_tb
;
1064 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1065 invalidate_page_bitmap(p
);
1067 #if defined(TARGET_HAS_SMC) || 1
1069 #if defined(CONFIG_USER_ONLY)
1070 if (p
->flags
& PAGE_WRITE
) {
1075 /* force the host page as non writable (writes will have a
1076 page fault + mprotect overhead) */
1077 page_addr
&= qemu_host_page_mask
;
1079 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1080 addr
+= TARGET_PAGE_SIZE
) {
1082 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1086 p2
->flags
&= ~PAGE_WRITE
;
1087 page_get_flags(addr
);
1089 mprotect(g2h(page_addr
), qemu_host_page_size
,
1090 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1091 #ifdef DEBUG_TB_INVALIDATE
1092 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1097 /* if some code is already present, then the pages are already
1098 protected. So we handle the case where only the first TB is
1099 allocated in a physical page */
1100 if (!last_first_tb
) {
1101 tlb_protect_code(page_addr
);
1105 #endif /* TARGET_HAS_SMC */
1108 /* Allocate a new translation block. Flush the translation buffer if
1109 too many translation blocks or too much generated code. */
1110 TranslationBlock
*tb_alloc(target_ulong pc
)
1112 TranslationBlock
*tb
;
1114 if (nb_tbs
>= code_gen_max_blocks
||
1115 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1117 tb
= &tbs
[nb_tbs
++];
1123 void tb_free(TranslationBlock
*tb
)
1125 /* In practice this is mostly used for single use temporary TB
1126 Ignore the hard cases and just back up if this TB happens to
1127 be the last one generated. */
1128 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1129 code_gen_ptr
= tb
->tc_ptr
;
1134 /* add a new TB and link it to the physical page tables. phys_page2 is
1135 (-1) to indicate that only one page contains the TB. */
1136 void tb_link_phys(TranslationBlock
*tb
,
1137 target_ulong phys_pc
, target_ulong phys_page2
)
1140 TranslationBlock
**ptb
;
1142 /* Grab the mmap lock to stop another thread invalidating this TB
1143 before we are done. */
1145 /* add in the physical hash table */
1146 h
= tb_phys_hash_func(phys_pc
);
1147 ptb
= &tb_phys_hash
[h
];
1148 tb
->phys_hash_next
= *ptb
;
1151 /* add in the page list */
1152 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1153 if (phys_page2
!= -1)
1154 tb_alloc_page(tb
, 1, phys_page2
);
1156 tb
->page_addr
[1] = -1;
1158 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1159 tb
->jmp_next
[0] = NULL
;
1160 tb
->jmp_next
[1] = NULL
;
1162 /* init original jump addresses */
1163 if (tb
->tb_next_offset
[0] != 0xffff)
1164 tb_reset_jump(tb
, 0);
1165 if (tb
->tb_next_offset
[1] != 0xffff)
1166 tb_reset_jump(tb
, 1);
1168 #ifdef DEBUG_TB_CHECK
1174 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1175 tb[1].tc_ptr. Return NULL if not found */
1176 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1178 int m_min
, m_max
, m
;
1180 TranslationBlock
*tb
;
1184 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1185 tc_ptr
>= (unsigned long)code_gen_ptr
)
1187 /* binary search (cf Knuth) */
1190 while (m_min
<= m_max
) {
1191 m
= (m_min
+ m_max
) >> 1;
1193 v
= (unsigned long)tb
->tc_ptr
;
1196 else if (tc_ptr
< v
) {
1205 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1207 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1209 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1212 tb1
= tb
->jmp_next
[n
];
1214 /* find head of list */
1217 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1220 tb1
= tb1
->jmp_next
[n1
];
1222 /* we are now sure now that tb jumps to tb1 */
1225 /* remove tb from the jmp_first list */
1226 ptb
= &tb_next
->jmp_first
;
1230 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1231 if (n1
== n
&& tb1
== tb
)
1233 ptb
= &tb1
->jmp_next
[n1
];
1235 *ptb
= tb
->jmp_next
[n
];
1236 tb
->jmp_next
[n
] = NULL
;
1238 /* suppress the jump to next tb in generated code */
1239 tb_reset_jump(tb
, n
);
1241 /* suppress jumps in the tb on which we could have jumped */
1242 tb_reset_jump_recursive(tb_next
);
1246 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1248 tb_reset_jump_recursive2(tb
, 0);
1249 tb_reset_jump_recursive2(tb
, 1);
1252 #if defined(TARGET_HAS_ICE)
1253 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1255 target_phys_addr_t addr
;
1257 ram_addr_t ram_addr
;
1260 addr
= cpu_get_phys_page_debug(env
, pc
);
1261 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1263 pd
= IO_MEM_UNASSIGNED
;
1265 pd
= p
->phys_offset
;
1267 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1268 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1272 /* Add a watchpoint. */
1273 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, int type
)
1277 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1278 if (addr
== env
->watchpoint
[i
].vaddr
)
1281 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1284 i
= env
->nb_watchpoints
++;
1285 env
->watchpoint
[i
].vaddr
= addr
;
1286 env
->watchpoint
[i
].type
= type
;
1287 tlb_flush_page(env
, addr
);
1288 /* FIXME: This flush is needed because of the hack to make memory ops
1289 terminate the TB. It can be removed once the proper IO trap and
1290 re-execute bits are in. */
1295 /* Remove a watchpoint. */
1296 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1300 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1301 if (addr
== env
->watchpoint
[i
].vaddr
) {
1302 env
->nb_watchpoints
--;
1303 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1304 tlb_flush_page(env
, addr
);
1311 /* Remove all watchpoints. */
1312 void cpu_watchpoint_remove_all(CPUState
*env
) {
1315 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1316 tlb_flush_page(env
, env
->watchpoint
[i
].vaddr
);
1318 env
->nb_watchpoints
= 0;
1321 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1322 breakpoint is reached */
1323 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1325 #if defined(TARGET_HAS_ICE)
1328 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1329 if (env
->breakpoints
[i
] == pc
)
1333 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1335 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1338 kvm_update_debugger(env
);
1340 breakpoint_invalidate(env
, pc
);
1347 /* remove all breakpoints */
1348 void cpu_breakpoint_remove_all(CPUState
*env
) {
1349 #if defined(TARGET_HAS_ICE)
1351 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1352 breakpoint_invalidate(env
, env
->breakpoints
[i
]);
1354 env
->nb_breakpoints
= 0;
1358 /* remove a breakpoint */
1359 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1361 #if defined(TARGET_HAS_ICE)
1363 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1364 if (env
->breakpoints
[i
] == pc
)
1369 env
->nb_breakpoints
--;
1370 if (i
< env
->nb_breakpoints
)
1371 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1374 kvm_update_debugger(env
);
1376 breakpoint_invalidate(env
, pc
);
1383 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1384 CPU loop after each instruction */
1385 void cpu_single_step(CPUState
*env
, int enabled
)
1387 #if defined(TARGET_HAS_ICE)
1388 if (env
->singlestep_enabled
!= enabled
) {
1389 env
->singlestep_enabled
= enabled
;
1390 /* must flush all the translated code to avoid inconsistancies */
1391 /* XXX: only flush what is necessary */
1395 kvm_update_debugger(env
);
1399 /* enable or disable low levels log */
1400 void cpu_set_log(int log_flags
)
1402 loglevel
= log_flags
;
1403 if (loglevel
&& !logfile
) {
1404 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1406 perror(logfilename
);
1409 #if !defined(CONFIG_SOFTMMU)
1410 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1412 static uint8_t logfile_buf
[4096];
1413 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1416 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1420 if (!loglevel
&& logfile
) {
1426 void cpu_set_log_filename(const char *filename
)
1428 logfilename
= strdup(filename
);
1433 cpu_set_log(loglevel
);
1436 /* mask must never be zero, except for A20 change call */
1437 void cpu_interrupt(CPUState
*env
, int mask
)
1439 #if !defined(USE_NPTL)
1440 TranslationBlock
*tb
;
1441 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1445 old_mask
= env
->interrupt_request
;
1446 /* FIXME: This is probably not threadsafe. A different thread could
1447 be in the middle of a read-modify-write operation. */
1448 env
->interrupt_request
|= mask
;
1449 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1450 kvm_update_interrupt_request(env
);
1451 #if defined(USE_NPTL)
1452 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1453 problem and hope the cpu will stop of its own accord. For userspace
1454 emulation this often isn't actually as bad as it sounds. Often
1455 signals are used primarily to interrupt blocking syscalls. */
1458 env
->icount_decr
.u16
.high
= 0xffff;
1459 #ifndef CONFIG_USER_ONLY
1460 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1461 an async event happened and we need to process it. */
1463 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1464 cpu_abort(env
, "Raised interrupt while not in I/O function");
1468 tb
= env
->current_tb
;
1469 /* if the cpu is currently executing code, we must unlink it and
1470 all the potentially executing TB */
1471 if (tb
&& !testandset(&interrupt_lock
)) {
1472 env
->current_tb
= NULL
;
1473 tb_reset_jump_recursive(tb
);
1474 resetlock(&interrupt_lock
);
1480 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1482 env
->interrupt_request
&= ~mask
;
1485 CPULogItem cpu_log_items
[] = {
1486 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1487 "show generated host assembly code for each compiled TB" },
1488 { CPU_LOG_TB_IN_ASM
, "in_asm",
1489 "show target assembly code for each compiled TB" },
1490 { CPU_LOG_TB_OP
, "op",
1491 "show micro ops for each compiled TB" },
1492 { CPU_LOG_TB_OP_OPT
, "op_opt",
1495 "before eflags optimization and "
1497 "after liveness analysis" },
1498 { CPU_LOG_INT
, "int",
1499 "show interrupts/exceptions in short format" },
1500 { CPU_LOG_EXEC
, "exec",
1501 "show trace before each executed TB (lots of logs)" },
1502 { CPU_LOG_TB_CPU
, "cpu",
1503 "show CPU state before block translation" },
1505 { CPU_LOG_PCALL
, "pcall",
1506 "show protected mode far calls/returns/exceptions" },
1509 { CPU_LOG_IOPORT
, "ioport",
1510 "show all i/o ports accesses" },
1515 static int cmp1(const char *s1
, int n
, const char *s2
)
1517 if (strlen(s2
) != n
)
1519 return memcmp(s1
, s2
, n
) == 0;
1522 /* takes a comma separated list of log masks. Return 0 if error. */
1523 int cpu_str_to_log_mask(const char *str
)
1532 p1
= strchr(p
, ',');
1535 if(cmp1(p
,p1
-p
,"all")) {
1536 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1540 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1541 if (cmp1(p
, p1
- p
, item
->name
))
1555 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1562 fprintf(stderr
, "qemu: fatal: ");
1563 vfprintf(stderr
, fmt
, ap
);
1564 fprintf(stderr
, "\n");
1566 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1568 cpu_dump_state(env
, stderr
, fprintf
, 0);
1571 fprintf(logfile
, "qemu: fatal: ");
1572 vfprintf(logfile
, fmt
, ap2
);
1573 fprintf(logfile
, "\n");
1575 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1577 cpu_dump_state(env
, logfile
, fprintf
, 0);
1587 CPUState
*cpu_copy(CPUState
*env
)
1589 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1590 /* preserve chaining and index */
1591 CPUState
*next_cpu
= new_env
->next_cpu
;
1592 int cpu_index
= new_env
->cpu_index
;
1593 memcpy(new_env
, env
, sizeof(CPUState
));
1594 new_env
->next_cpu
= next_cpu
;
1595 new_env
->cpu_index
= cpu_index
;
1599 #if !defined(CONFIG_USER_ONLY)
1601 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1605 /* Discard jump cache entries for any tb which might potentially
1606 overlap the flushed page. */
1607 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1608 memset (&env
->tb_jmp_cache
[i
], 0,
1609 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1611 i
= tb_jmp_cache_hash_page(addr
);
1612 memset (&env
->tb_jmp_cache
[i
], 0,
1613 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1616 /* NOTE: if flush_global is true, also flush global entries (not
1618 void tlb_flush(CPUState
*env
, int flush_global
)
1622 #if defined(DEBUG_TLB)
1623 printf("tlb_flush:\n");
1625 /* must reset current TB so that interrupts cannot modify the
1626 links while we are modifying them */
1627 env
->current_tb
= NULL
;
1629 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1630 env
->tlb_table
[0][i
].addr_read
= -1;
1631 env
->tlb_table
[0][i
].addr_write
= -1;
1632 env
->tlb_table
[0][i
].addr_code
= -1;
1633 env
->tlb_table
[1][i
].addr_read
= -1;
1634 env
->tlb_table
[1][i
].addr_write
= -1;
1635 env
->tlb_table
[1][i
].addr_code
= -1;
1636 #if (NB_MMU_MODES >= 3)
1637 env
->tlb_table
[2][i
].addr_read
= -1;
1638 env
->tlb_table
[2][i
].addr_write
= -1;
1639 env
->tlb_table
[2][i
].addr_code
= -1;
1640 #if (NB_MMU_MODES == 4)
1641 env
->tlb_table
[3][i
].addr_read
= -1;
1642 env
->tlb_table
[3][i
].addr_write
= -1;
1643 env
->tlb_table
[3][i
].addr_code
= -1;
1648 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1651 if (env
->kqemu_enabled
) {
1652 kqemu_flush(env
, flush_global
);
1658 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1660 if (addr
== (tlb_entry
->addr_read
&
1661 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1662 addr
== (tlb_entry
->addr_write
&
1663 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1664 addr
== (tlb_entry
->addr_code
&
1665 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1666 tlb_entry
->addr_read
= -1;
1667 tlb_entry
->addr_write
= -1;
1668 tlb_entry
->addr_code
= -1;
1672 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1676 #if defined(DEBUG_TLB)
1677 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1679 /* must reset current TB so that interrupts cannot modify the
1680 links while we are modifying them */
1681 env
->current_tb
= NULL
;
1683 addr
&= TARGET_PAGE_MASK
;
1684 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1685 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1686 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1687 #if (NB_MMU_MODES >= 3)
1688 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1689 #if (NB_MMU_MODES == 4)
1690 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1694 tlb_flush_jmp_cache(env
, addr
);
1697 if (env
->kqemu_enabled
) {
1698 kqemu_flush_page(env
, addr
);
1703 /* update the TLBs so that writes to code in the virtual page 'addr'
1705 static void tlb_protect_code(ram_addr_t ram_addr
)
1707 cpu_physical_memory_reset_dirty(ram_addr
,
1708 ram_addr
+ TARGET_PAGE_SIZE
,
1712 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1713 tested for self modifying code */
1714 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1717 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1720 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1721 unsigned long start
, unsigned long length
)
1724 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1725 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1726 if ((addr
- start
) < length
) {
1727 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1732 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1736 unsigned long length
, start1
;
1740 start
&= TARGET_PAGE_MASK
;
1741 end
= TARGET_PAGE_ALIGN(end
);
1743 length
= end
- start
;
1746 len
= length
>> TARGET_PAGE_BITS
;
1748 /* XXX: should not depend on cpu context */
1750 if (env
->kqemu_enabled
) {
1753 for(i
= 0; i
< len
; i
++) {
1754 kqemu_set_notdirty(env
, addr
);
1755 addr
+= TARGET_PAGE_SIZE
;
1759 mask
= ~dirty_flags
;
1760 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1761 for(i
= 0; i
< len
; i
++)
1764 /* we modify the TLB cache so that the dirty bit will be set again
1765 when accessing the range */
1766 start1
= start
+ (unsigned long)phys_ram_base
;
1767 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1768 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1769 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1770 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1771 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1772 #if (NB_MMU_MODES >= 3)
1773 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1774 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1775 #if (NB_MMU_MODES == 4)
1776 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1777 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1783 int cpu_physical_memory_set_dirty_tracking(int enable
)
1788 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1789 in_migration
= enable
;
1793 int cpu_physical_memory_get_dirty_tracking(void)
1795 return in_migration
;
1798 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1800 ram_addr_t ram_addr
;
1802 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1803 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1804 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1805 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1806 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1811 /* update the TLB according to the current state of the dirty bits */
1812 void cpu_tlb_update_dirty(CPUState
*env
)
1815 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1816 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1817 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1818 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1819 #if (NB_MMU_MODES >= 3)
1820 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1821 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1822 #if (NB_MMU_MODES == 4)
1823 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1824 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1829 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1831 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1832 tlb_entry
->addr_write
= vaddr
;
1835 /* update the TLB corresponding to virtual page vaddr
1836 so that it is no longer dirty */
1837 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1841 vaddr
&= TARGET_PAGE_MASK
;
1842 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1843 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1844 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1845 #if (NB_MMU_MODES >= 3)
1846 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1847 #if (NB_MMU_MODES == 4)
1848 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1853 /* add a new TLB entry. At most one entry for a given virtual address
1854 is permitted. Return 0 if OK or 2 if the page could not be mapped
1855 (can only happen in non SOFTMMU mode for I/O pages or pages
1856 conflicting with the host address space). */
1857 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1858 target_phys_addr_t paddr
, int prot
,
1859 int mmu_idx
, int is_softmmu
)
1864 target_ulong address
;
1865 target_ulong code_address
;
1866 target_phys_addr_t addend
;
1870 target_phys_addr_t iotlb
;
1872 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1874 pd
= IO_MEM_UNASSIGNED
;
1876 pd
= p
->phys_offset
;
1878 #if defined(DEBUG_TLB)
1879 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1880 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1885 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1886 /* IO memory case (romd handled later) */
1887 address
|= TLB_MMIO
;
1889 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1890 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1892 iotlb
= pd
& TARGET_PAGE_MASK
;
1893 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1894 iotlb
|= IO_MEM_NOTDIRTY
;
1896 iotlb
|= IO_MEM_ROM
;
1898 /* IO handlers are currently passed a phsical address.
1899 It would be nice to pass an offset from the base address
1900 of that region. This would avoid having to special case RAM,
1901 and avoid full address decoding in every device.
1902 We can't use the high bits of pd for this because
1903 IO_MEM_ROMD uses these as a ram address. */
1904 iotlb
= (pd
& ~TARGET_PAGE_MASK
) + paddr
;
1907 code_address
= address
;
1908 /* Make accesses to pages with watchpoints go via the
1909 watchpoint trap routines. */
1910 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1911 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1912 iotlb
= io_mem_watch
+ paddr
;
1913 /* TODO: The memory case can be optimized by not trapping
1914 reads of pages with a write breakpoint. */
1915 address
|= TLB_MMIO
;
1919 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1920 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
1921 te
= &env
->tlb_table
[mmu_idx
][index
];
1922 te
->addend
= addend
- vaddr
;
1923 if (prot
& PAGE_READ
) {
1924 te
->addr_read
= address
;
1929 if (prot
& PAGE_EXEC
) {
1930 te
->addr_code
= code_address
;
1934 if (prot
& PAGE_WRITE
) {
1935 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1936 (pd
& IO_MEM_ROMD
)) {
1937 /* Write access calls the I/O callback. */
1938 te
->addr_write
= address
| TLB_MMIO
;
1939 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1940 !cpu_physical_memory_is_dirty(pd
)) {
1941 te
->addr_write
= address
| TLB_NOTDIRTY
;
1943 te
->addr_write
= address
;
1946 te
->addr_write
= -1;
1953 void tlb_flush(CPUState
*env
, int flush_global
)
1957 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1961 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1962 target_phys_addr_t paddr
, int prot
,
1963 int mmu_idx
, int is_softmmu
)
1968 /* dump memory mappings */
1969 void page_dump(FILE *f
)
1971 unsigned long start
, end
;
1972 int i
, j
, prot
, prot1
;
1975 fprintf(f
, "%-8s %-8s %-8s %s\n",
1976 "start", "end", "size", "prot");
1980 for(i
= 0; i
<= L1_SIZE
; i
++) {
1985 for(j
= 0;j
< L2_SIZE
; j
++) {
1990 if (prot1
!= prot
) {
1991 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1993 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1994 start
, end
, end
- start
,
1995 prot
& PAGE_READ
? 'r' : '-',
1996 prot
& PAGE_WRITE
? 'w' : '-',
1997 prot
& PAGE_EXEC
? 'x' : '-');
2011 int page_get_flags(target_ulong address
)
2015 p
= page_find(address
>> TARGET_PAGE_BITS
);
2021 /* modify the flags of a page and invalidate the code if
2022 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2023 depending on PAGE_WRITE */
2024 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2029 /* mmap_lock should already be held. */
2030 start
= start
& TARGET_PAGE_MASK
;
2031 end
= TARGET_PAGE_ALIGN(end
);
2032 if (flags
& PAGE_WRITE
)
2033 flags
|= PAGE_WRITE_ORG
;
2034 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2035 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2036 /* We may be called for host regions that are outside guest
2040 /* if the write protection is set, then we invalidate the code
2042 if (!(p
->flags
& PAGE_WRITE
) &&
2043 (flags
& PAGE_WRITE
) &&
2045 tb_invalidate_phys_page(addr
, 0, NULL
);
2051 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2057 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2058 start
= start
& TARGET_PAGE_MASK
;
2061 /* we've wrapped around */
2063 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2064 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2067 if( !(p
->flags
& PAGE_VALID
) )
2070 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2072 if (flags
& PAGE_WRITE
) {
2073 if (!(p
->flags
& PAGE_WRITE_ORG
))
2075 /* unprotect the page if it was put read-only because it
2076 contains translated code */
2077 if (!(p
->flags
& PAGE_WRITE
)) {
2078 if (!page_unprotect(addr
, 0, NULL
))
2087 /* called from signal handler: invalidate the code and unprotect the
2088 page. Return TRUE if the fault was succesfully handled. */
2089 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2091 unsigned int page_index
, prot
, pindex
;
2093 target_ulong host_start
, host_end
, addr
;
2095 /* Technically this isn't safe inside a signal handler. However we
2096 know this only ever happens in a synchronous SEGV handler, so in
2097 practice it seems to be ok. */
2100 host_start
= address
& qemu_host_page_mask
;
2101 page_index
= host_start
>> TARGET_PAGE_BITS
;
2102 p1
= page_find(page_index
);
2107 host_end
= host_start
+ qemu_host_page_size
;
2110 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2114 /* if the page was really writable, then we change its
2115 protection back to writable */
2116 if (prot
& PAGE_WRITE_ORG
) {
2117 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2118 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2119 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2120 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2121 p1
[pindex
].flags
|= PAGE_WRITE
;
2122 /* and since the content will be modified, we must invalidate
2123 the corresponding translated code. */
2124 tb_invalidate_phys_page(address
, pc
, puc
);
2125 #ifdef DEBUG_TB_CHECK
2126 tb_invalidate_check(address
);
2136 static inline void tlb_set_dirty(CPUState
*env
,
2137 unsigned long addr
, target_ulong vaddr
)
2140 #endif /* defined(CONFIG_USER_ONLY) */
2142 #if !defined(CONFIG_USER_ONLY)
2143 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2145 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2146 ram_addr_t orig_memory
);
2147 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2150 if (addr > start_addr) \
2153 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2154 if (start_addr2 > 0) \
2158 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2159 end_addr2 = TARGET_PAGE_SIZE - 1; \
2161 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2162 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2167 /* register physical memory. 'size' must be a multiple of the target
2168 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2170 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2172 ram_addr_t phys_offset
)
2174 target_phys_addr_t addr
, end_addr
;
2177 ram_addr_t orig_size
= size
;
2181 /* XXX: should not depend on cpu context */
2183 if (env
->kqemu_enabled
) {
2184 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2187 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2188 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2189 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2190 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2191 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2192 ram_addr_t orig_memory
= p
->phys_offset
;
2193 target_phys_addr_t start_addr2
, end_addr2
;
2194 int need_subpage
= 0;
2196 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2198 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2199 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2200 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2201 &p
->phys_offset
, orig_memory
);
2203 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2206 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2208 p
->phys_offset
= phys_offset
;
2209 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2210 (phys_offset
& IO_MEM_ROMD
))
2211 phys_offset
+= TARGET_PAGE_SIZE
;
2214 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2215 p
->phys_offset
= phys_offset
;
2216 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2217 (phys_offset
& IO_MEM_ROMD
))
2218 phys_offset
+= TARGET_PAGE_SIZE
;
2220 target_phys_addr_t start_addr2
, end_addr2
;
2221 int need_subpage
= 0;
2223 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2224 end_addr2
, need_subpage
);
2226 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2227 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2228 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2229 subpage_register(subpage
, start_addr2
, end_addr2
,
2236 /* since each CPU stores ram addresses in its TLB cache, we must
2237 reset the modified entries */
2239 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2244 /* XXX: temporary until new memory mapping API */
2245 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2249 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2251 return IO_MEM_UNASSIGNED
;
2252 return p
->phys_offset
;
2255 /* XXX: better than nothing */
2256 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2259 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2260 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
"\n",
2261 (uint64_t)size
, (uint64_t)phys_ram_size
);
2264 addr
= phys_ram_alloc_offset
;
2265 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2269 void qemu_ram_free(ram_addr_t addr
)
2273 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2275 #ifdef DEBUG_UNASSIGNED
2276 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2279 do_unassigned_access(addr
, 0, 0, 0);
2281 do_unassigned_access(addr
, 0, 0, 0);
2286 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2288 #ifdef DEBUG_UNASSIGNED
2289 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2292 do_unassigned_access(addr
, 1, 0, 0);
2294 do_unassigned_access(addr
, 1, 0, 0);
2298 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2299 unassigned_mem_readb
,
2300 unassigned_mem_readb
,
2301 unassigned_mem_readb
,
2304 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2305 unassigned_mem_writeb
,
2306 unassigned_mem_writeb
,
2307 unassigned_mem_writeb
,
2310 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2314 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2315 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2316 #if !defined(CONFIG_USER_ONLY)
2317 tb_invalidate_phys_page_fast(ram_addr
, 1);
2318 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2321 stb_p(phys_ram_base
+ ram_addr
, val
);
2323 if (cpu_single_env
->kqemu_enabled
&&
2324 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2325 kqemu_modify_page(cpu_single_env
, ram_addr
);
2327 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2328 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2329 /* we remove the notdirty callback only if the code has been
2331 if (dirty_flags
== 0xff)
2332 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2335 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2339 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2340 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2341 #if !defined(CONFIG_USER_ONLY)
2342 tb_invalidate_phys_page_fast(ram_addr
, 2);
2343 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2346 stw_p(phys_ram_base
+ ram_addr
, val
);
2348 if (cpu_single_env
->kqemu_enabled
&&
2349 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2350 kqemu_modify_page(cpu_single_env
, ram_addr
);
2352 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2353 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2354 /* we remove the notdirty callback only if the code has been
2356 if (dirty_flags
== 0xff)
2357 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2360 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2364 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2365 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2366 #if !defined(CONFIG_USER_ONLY)
2367 tb_invalidate_phys_page_fast(ram_addr
, 4);
2368 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2371 stl_p(phys_ram_base
+ ram_addr
, val
);
2373 if (cpu_single_env
->kqemu_enabled
&&
2374 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2375 kqemu_modify_page(cpu_single_env
, ram_addr
);
2377 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2378 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2379 /* we remove the notdirty callback only if the code has been
2381 if (dirty_flags
== 0xff)
2382 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2385 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2386 NULL
, /* never used */
2387 NULL
, /* never used */
2388 NULL
, /* never used */
2391 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2392 notdirty_mem_writeb
,
2393 notdirty_mem_writew
,
2394 notdirty_mem_writel
,
2397 /* Generate a debug exception if a watchpoint has been hit. */
2398 static void check_watchpoint(int offset
, int flags
)
2400 CPUState
*env
= cpu_single_env
;
2404 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2405 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2406 if (vaddr
== env
->watchpoint
[i
].vaddr
2407 && (env
->watchpoint
[i
].type
& flags
)) {
2408 env
->watchpoint_hit
= i
+ 1;
2409 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2415 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2416 so these check for a hit then pass through to the normal out-of-line
2418 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2420 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2421 return ldub_phys(addr
);
2424 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2426 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2427 return lduw_phys(addr
);
2430 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2432 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2433 return ldl_phys(addr
);
2436 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2439 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2440 stb_phys(addr
, val
);
2443 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2446 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2447 stw_phys(addr
, val
);
2450 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2453 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2454 stl_phys(addr
, val
);
2457 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2463 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2469 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2475 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2476 #if defined(DEBUG_SUBPAGE)
2477 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2478 mmio
, len
, addr
, idx
);
2480 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2485 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2486 uint32_t value
, unsigned int len
)
2490 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2491 #if defined(DEBUG_SUBPAGE)
2492 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2493 mmio
, len
, addr
, idx
, value
);
2495 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2498 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2500 #if defined(DEBUG_SUBPAGE)
2501 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2504 return subpage_readlen(opaque
, addr
, 0);
2507 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2510 #if defined(DEBUG_SUBPAGE)
2511 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2513 subpage_writelen(opaque
, addr
, value
, 0);
2516 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2518 #if defined(DEBUG_SUBPAGE)
2519 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2522 return subpage_readlen(opaque
, addr
, 1);
2525 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2528 #if defined(DEBUG_SUBPAGE)
2529 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2531 subpage_writelen(opaque
, addr
, value
, 1);
2534 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2536 #if defined(DEBUG_SUBPAGE)
2537 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2540 return subpage_readlen(opaque
, addr
, 2);
2543 static void subpage_writel (void *opaque
,
2544 target_phys_addr_t addr
, uint32_t value
)
2546 #if defined(DEBUG_SUBPAGE)
2547 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2549 subpage_writelen(opaque
, addr
, value
, 2);
2552 static CPUReadMemoryFunc
*subpage_read
[] = {
2558 static CPUWriteMemoryFunc
*subpage_write
[] = {
2564 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2570 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2572 idx
= SUBPAGE_IDX(start
);
2573 eidx
= SUBPAGE_IDX(end
);
2574 #if defined(DEBUG_SUBPAGE)
2575 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2576 mmio
, start
, end
, idx
, eidx
, memory
);
2578 memory
>>= IO_MEM_SHIFT
;
2579 for (; idx
<= eidx
; idx
++) {
2580 for (i
= 0; i
< 4; i
++) {
2581 if (io_mem_read
[memory
][i
]) {
2582 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2583 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2585 if (io_mem_write
[memory
][i
]) {
2586 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2587 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2595 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2596 ram_addr_t orig_memory
)
2601 mmio
= qemu_mallocz(sizeof(subpage_t
));
2604 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2605 #if defined(DEBUG_SUBPAGE)
2606 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2607 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2609 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2610 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2616 static int get_free_io_mem_idx(void)
2620 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2621 if (!io_mem_used
[i
]) {
2629 static void io_mem_init(void)
2633 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2634 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2635 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2639 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2640 watch_mem_write
, NULL
);
2641 /* alloc dirty bits array */
2642 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2643 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2646 /* mem_read and mem_write are arrays of functions containing the
2647 function to access byte (index 0), word (index 1) and dword (index
2648 2). Functions can be omitted with a NULL function pointer. The
2649 registered functions may be modified dynamically later.
2650 If io_index is non zero, the corresponding io zone is
2651 modified. If it is zero, a new io zone is allocated. The return
2652 value can be used with cpu_register_physical_memory(). (-1) is
2653 returned if error. */
2654 int cpu_register_io_memory(int io_index
,
2655 CPUReadMemoryFunc
**mem_read
,
2656 CPUWriteMemoryFunc
**mem_write
,
2659 int i
, subwidth
= 0;
2661 if (io_index
<= 0) {
2662 io_index
= get_free_io_mem_idx();
2666 if (io_index
>= IO_MEM_NB_ENTRIES
)
2670 for(i
= 0;i
< 3; i
++) {
2671 if (!mem_read
[i
] || !mem_write
[i
])
2672 subwidth
= IO_MEM_SUBWIDTH
;
2673 io_mem_read
[io_index
][i
] = mem_read
[i
];
2674 io_mem_write
[io_index
][i
] = mem_write
[i
];
2676 io_mem_opaque
[io_index
] = opaque
;
2677 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2680 void cpu_unregister_io_memory(int io_table_address
)
2683 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
2685 for (i
=0;i
< 3; i
++) {
2686 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
2687 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
2689 io_mem_opaque
[io_index
] = NULL
;
2690 io_mem_used
[io_index
] = 0;
2693 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2695 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2698 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2700 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2703 #endif /* !defined(CONFIG_USER_ONLY) */
2705 /* physical memory access (slow version, mainly for debug) */
2706 #if defined(CONFIG_USER_ONLY)
2707 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2708 int len
, int is_write
)
2715 page
= addr
& TARGET_PAGE_MASK
;
2716 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2719 flags
= page_get_flags(page
);
2720 if (!(flags
& PAGE_VALID
))
2723 if (!(flags
& PAGE_WRITE
))
2725 /* XXX: this code should not depend on lock_user */
2726 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2727 /* FIXME - should this return an error rather than just fail? */
2730 unlock_user(p
, addr
, l
);
2732 if (!(flags
& PAGE_READ
))
2734 /* XXX: this code should not depend on lock_user */
2735 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2736 /* FIXME - should this return an error rather than just fail? */
2739 unlock_user(p
, addr
, 0);
2748 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2749 int len
, int is_write
)
2754 target_phys_addr_t page
;
2759 page
= addr
& TARGET_PAGE_MASK
;
2760 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2763 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2765 pd
= IO_MEM_UNASSIGNED
;
2767 pd
= p
->phys_offset
;
2771 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2772 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2773 /* XXX: could force cpu_single_env to NULL to avoid
2775 if (l
>= 4 && ((addr
& 3) == 0)) {
2776 /* 32 bit write access */
2778 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2780 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2781 /* 16 bit write access */
2783 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2786 /* 8 bit write access */
2788 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2792 unsigned long addr1
;
2793 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2795 ptr
= phys_ram_base
+ addr1
;
2796 memcpy(ptr
, buf
, l
);
2797 if (!cpu_physical_memory_is_dirty(addr1
)) {
2798 /* invalidate code */
2799 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2801 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2802 (0xff & ~CODE_DIRTY_FLAG
);
2804 /* qemu doesn't execute guest code directly, but kvm does
2805 therefore fluch instruction caches */
2807 flush_icache_range((unsigned long)ptr
,
2808 ((unsigned long)ptr
)+l
);
2811 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2812 !(pd
& IO_MEM_ROMD
)) {
2814 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2815 if (l
>= 4 && ((addr
& 3) == 0)) {
2816 /* 32 bit read access */
2817 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2820 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2821 /* 16 bit read access */
2822 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2826 /* 8 bit read access */
2827 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2833 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2834 (addr
& ~TARGET_PAGE_MASK
);
2835 memcpy(buf
, ptr
, l
);
2844 /* used for ROM loading : can write in RAM and ROM */
2845 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2846 const uint8_t *buf
, int len
)
2850 target_phys_addr_t page
;
2855 page
= addr
& TARGET_PAGE_MASK
;
2856 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2859 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2861 pd
= IO_MEM_UNASSIGNED
;
2863 pd
= p
->phys_offset
;
2866 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2867 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2868 !(pd
& IO_MEM_ROMD
)) {
2871 unsigned long addr1
;
2872 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2874 ptr
= phys_ram_base
+ addr1
;
2875 memcpy(ptr
, buf
, l
);
2884 /* warning: addr must be aligned */
2885 uint32_t ldl_phys(target_phys_addr_t addr
)
2893 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2895 pd
= IO_MEM_UNASSIGNED
;
2897 pd
= p
->phys_offset
;
2900 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2901 !(pd
& IO_MEM_ROMD
)) {
2903 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2904 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2907 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2908 (addr
& ~TARGET_PAGE_MASK
);
2914 /* warning: addr must be aligned */
2915 uint64_t ldq_phys(target_phys_addr_t addr
)
2923 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2925 pd
= IO_MEM_UNASSIGNED
;
2927 pd
= p
->phys_offset
;
2930 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2931 !(pd
& IO_MEM_ROMD
)) {
2933 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2934 #ifdef TARGET_WORDS_BIGENDIAN
2935 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2936 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2938 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2939 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2943 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2944 (addr
& ~TARGET_PAGE_MASK
);
2951 uint32_t ldub_phys(target_phys_addr_t addr
)
2954 cpu_physical_memory_read(addr
, &val
, 1);
2959 uint32_t lduw_phys(target_phys_addr_t addr
)
2962 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2963 return tswap16(val
);
2967 #define likely(x) __builtin_expect(!!(x), 1)
2968 #define unlikely(x) __builtin_expect(!!(x), 0)
2971 #define unlikely(x) x
2974 /* warning: addr must be aligned. The ram page is not masked as dirty
2975 and the code inside is not invalidated. It is useful if the dirty
2976 bits are used to track modified PTEs */
2977 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2984 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2986 pd
= IO_MEM_UNASSIGNED
;
2988 pd
= p
->phys_offset
;
2991 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2992 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2993 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2995 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2996 ptr
= phys_ram_base
+ addr1
;
2999 if (unlikely(in_migration
)) {
3000 if (!cpu_physical_memory_is_dirty(addr1
)) {
3001 /* invalidate code */
3002 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3004 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3005 (0xff & ~CODE_DIRTY_FLAG
);
3011 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3018 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3020 pd
= IO_MEM_UNASSIGNED
;
3022 pd
= p
->phys_offset
;
3025 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3026 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3027 #ifdef TARGET_WORDS_BIGENDIAN
3028 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3029 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3031 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3032 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3035 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3036 (addr
& ~TARGET_PAGE_MASK
);
3041 /* warning: addr must be aligned */
3042 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3049 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3051 pd
= IO_MEM_UNASSIGNED
;
3053 pd
= p
->phys_offset
;
3056 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3057 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3058 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3060 unsigned long addr1
;
3061 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3063 ptr
= phys_ram_base
+ addr1
;
3065 if (!cpu_physical_memory_is_dirty(addr1
)) {
3066 /* invalidate code */
3067 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3069 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3070 (0xff & ~CODE_DIRTY_FLAG
);
3076 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3079 cpu_physical_memory_write(addr
, &v
, 1);
3083 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3085 uint16_t v
= tswap16(val
);
3086 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3090 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3093 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3098 /* virtual memory access for debug */
3099 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3100 uint8_t *buf
, int len
, int is_write
)
3103 target_phys_addr_t phys_addr
;
3107 page
= addr
& TARGET_PAGE_MASK
;
3108 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3109 /* if no physical page mapped, return an error */
3110 if (phys_addr
== -1)
3112 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3115 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3124 /* in deterministic execution mode, instructions doing device I/Os
3125 must be at the end of the TB */
3126 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3128 TranslationBlock
*tb
;
3130 target_ulong pc
, cs_base
;
3133 tb
= tb_find_pc((unsigned long)retaddr
);
3135 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3138 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3139 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3140 /* Calculate how many instructions had been executed before the fault
3142 n
= n
- env
->icount_decr
.u16
.low
;
3143 /* Generate a new TB ending on the I/O insn. */
3145 /* On MIPS and SH, delay slot instructions can only be restarted if
3146 they were already the first instruction in the TB. If this is not
3147 the first instruction in a TB then re-execute the preceding
3149 #if defined(TARGET_MIPS)
3150 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3151 env
->active_tc
.PC
-= 4;
3152 env
->icount_decr
.u16
.low
++;
3153 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3155 #elif defined(TARGET_SH4)
3156 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3159 env
->icount_decr
.u16
.low
++;
3160 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3163 /* This should never happen. */
3164 if (n
> CF_COUNT_MASK
)
3165 cpu_abort(env
, "TB too big during recompile");
3167 cflags
= n
| CF_LAST_IO
;
3169 cs_base
= tb
->cs_base
;
3171 tb_phys_invalidate(tb
, -1);
3172 /* FIXME: In theory this could raise an exception. In practice
3173 we have already translated the block once so it's probably ok. */
3174 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3175 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3176 the first in the TB) then we end up generating a whole new TB and
3177 repeating the fault, which is horribly inefficient.
3178 Better would be to execute just this insn uncached, or generate a
3180 cpu_resume_from_signal(env
, NULL
);
3183 void dump_exec_info(FILE *f
,
3184 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3186 int i
, target_code_size
, max_target_code_size
;
3187 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3188 TranslationBlock
*tb
;
3190 target_code_size
= 0;
3191 max_target_code_size
= 0;
3193 direct_jmp_count
= 0;
3194 direct_jmp2_count
= 0;
3195 for(i
= 0; i
< nb_tbs
; i
++) {
3197 target_code_size
+= tb
->size
;
3198 if (tb
->size
> max_target_code_size
)
3199 max_target_code_size
= tb
->size
;
3200 if (tb
->page_addr
[1] != -1)
3202 if (tb
->tb_next_offset
[0] != 0xffff) {
3204 if (tb
->tb_next_offset
[1] != 0xffff) {
3205 direct_jmp2_count
++;
3209 /* XXX: avoid using doubles ? */
3210 cpu_fprintf(f
, "Translation buffer state:\n");
3211 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3212 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3213 cpu_fprintf(f
, "TB count %d/%d\n",
3214 nb_tbs
, code_gen_max_blocks
);
3215 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3216 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3217 max_target_code_size
);
3218 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3219 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3220 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3221 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3223 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3224 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3226 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3228 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3229 cpu_fprintf(f
, "\nStatistics:\n");
3230 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3231 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3232 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3233 tcg_dump_info(f
, cpu_fprintf
);
3236 #if !defined(CONFIG_USER_ONLY)
3238 #define MMUSUFFIX _cmmu
3239 #define GETPC() NULL
3240 #define env cpu_single_env
3241 #define SOFTMMU_CODE_ACCESS
3244 #include "softmmu_template.h"
3247 #include "softmmu_template.h"
3250 #include "softmmu_template.h"
3253 #include "softmmu_template.h"