2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
46 #if defined(CONFIG_USER_ONLY)
50 //#define DEBUG_TB_INVALIDATE
53 //#define DEBUG_UNASSIGNED
55 /* make various TB consistency checks */
56 //#define DEBUG_TB_CHECK
57 //#define DEBUG_TLB_CHECK
59 //#define DEBUG_IOPORT
60 //#define DEBUG_SUBPAGE
62 #if !defined(CONFIG_USER_ONLY)
63 /* TB consistency checks only implemented for usermode emulation. */
67 #define SMC_BITMAP_USE_THRESHOLD 10
69 #define MMAP_AREA_START 0x00000000
70 #define MMAP_AREA_END 0xa8000000
72 #if defined(TARGET_SPARC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 41
74 #elif defined(TARGET_SPARC)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 36
76 #elif defined(TARGET_ALPHA)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #define TARGET_VIRT_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_PPC64)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 36
86 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
87 #define TARGET_PHYS_ADDR_SPACE_BITS 32
90 TranslationBlock
*tbs
;
91 int code_gen_max_blocks
;
92 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
94 /* any access to the tbs or the page table must use this lock */
95 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
97 #if defined(__arm__) || defined(__sparc_v9__)
98 /* The prologue must be reachable with a direct jump. ARM and Sparc64
99 have limited branch ranges (possibly also PPC) so place it in a
100 section close to code segment. */
101 #define code_gen_section \
102 __attribute__((__section__(".gen_code"))) \
103 __attribute__((aligned (32)))
105 #define code_gen_section \
106 __attribute__((aligned (32)))
109 uint8_t code_gen_prologue
[1024] code_gen_section
;
110 uint8_t *code_gen_buffer
;
111 unsigned long code_gen_buffer_size
;
112 /* threshold to flush the translated code buffer */
113 unsigned long code_gen_buffer_max_size
;
114 uint8_t *code_gen_ptr
;
116 #if !defined(CONFIG_USER_ONLY)
117 ram_addr_t phys_ram_size
;
119 uint8_t *phys_ram_base
;
120 uint8_t *phys_ram_dirty
;
122 static int in_migration
;
123 static ram_addr_t phys_ram_alloc_offset
= 0;
127 /* current CPU in the current thread. It is only valid inside
129 CPUState
*cpu_single_env
;
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
134 /* Current instruction counter. While executing translated code this may
135 include some instructions that have not yet been executed. */
138 typedef struct PageDesc
{
139 /* list of TBs intersecting this ram page */
140 TranslationBlock
*first_tb
;
141 /* in order to optimize self modifying code, we count the number
142 of lookups we do to a given page to use a bitmap */
143 unsigned int code_write_count
;
144 uint8_t *code_bitmap
;
145 #if defined(CONFIG_USER_ONLY)
150 typedef struct PhysPageDesc
{
151 /* offset in host memory of the page + io_index in the low bits */
152 ram_addr_t phys_offset
;
156 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
157 /* XXX: this is a temporary hack for alpha target.
158 * In the future, this is to be replaced by a multi-level table
159 * to actually be able to handle the complete 64 bits address space.
161 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
163 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
166 #define L1_SIZE (1 << L1_BITS)
167 #define L2_SIZE (1 << L2_BITS)
169 unsigned long qemu_real_host_page_size
;
170 unsigned long qemu_host_page_bits
;
171 unsigned long qemu_host_page_size
;
172 unsigned long qemu_host_page_mask
;
174 /* XXX: for system emulation, it could just be an array */
175 static PageDesc
*l1_map
[L1_SIZE
];
176 PhysPageDesc
**l1_phys_map
;
178 #if !defined(CONFIG_USER_ONLY)
179 static void io_mem_init(void);
181 /* io memory support */
182 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
183 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
184 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
185 char io_mem_used
[IO_MEM_NB_ENTRIES
];
186 static int io_mem_watch
;
190 char *logfilename
= "/tmp/qemu.log";
193 static int log_append
= 0;
196 static int tlb_flush_count
;
197 static int tb_flush_count
;
198 static int tb_phys_invalidate_count
;
200 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
201 typedef struct subpage_t
{
202 target_phys_addr_t base
;
203 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
204 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
205 void *opaque
[TARGET_PAGE_SIZE
][2][4];
209 static void map_exec(void *addr
, long size
)
212 VirtualProtect(addr
, size
,
213 PAGE_EXECUTE_READWRITE
, &old_protect
);
217 static void map_exec(void *addr
, long size
)
219 unsigned long start
, end
, page_size
;
221 page_size
= getpagesize();
222 start
= (unsigned long)addr
;
223 start
&= ~(page_size
- 1);
225 end
= (unsigned long)addr
+ size
;
226 end
+= page_size
- 1;
227 end
&= ~(page_size
- 1);
229 mprotect((void *)start
, end
- start
,
230 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
234 static void page_init(void)
236 /* NOTE: we can always suppose that qemu_host_page_size >=
240 SYSTEM_INFO system_info
;
243 GetSystemInfo(&system_info
);
244 qemu_real_host_page_size
= system_info
.dwPageSize
;
247 qemu_real_host_page_size
= getpagesize();
249 if (qemu_host_page_size
== 0)
250 qemu_host_page_size
= qemu_real_host_page_size
;
251 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
252 qemu_host_page_size
= TARGET_PAGE_SIZE
;
253 qemu_host_page_bits
= 0;
254 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
255 qemu_host_page_bits
++;
256 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
257 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
258 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
260 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
262 long long startaddr
, endaddr
;
267 last_brk
= (unsigned long)sbrk(0);
268 f
= fopen("/proc/self/maps", "r");
271 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
273 startaddr
= MIN(startaddr
,
274 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
275 endaddr
= MIN(endaddr
,
276 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
277 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
278 TARGET_PAGE_ALIGN(endaddr
),
289 static inline PageDesc
*page_find_alloc(target_ulong index
)
293 #if TARGET_LONG_BITS > 32
294 /* Host memory outside guest VM. For 32-bit targets we have already
295 excluded high addresses. */
296 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
* TARGET_PAGE_SIZE
))
299 lp
= &l1_map
[index
>> L2_BITS
];
302 /* allocate if not found */
303 #if defined(CONFIG_USER_ONLY)
305 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
306 /* Don't use qemu_malloc because it may recurse. */
307 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
308 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
311 if (addr
== (target_ulong
)addr
) {
312 page_set_flags(addr
& TARGET_PAGE_MASK
,
313 TARGET_PAGE_ALIGN(addr
+ len
),
317 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
321 return p
+ (index
& (L2_SIZE
- 1));
324 static inline PageDesc
*page_find(target_ulong index
)
328 p
= l1_map
[index
>> L2_BITS
];
331 return p
+ (index
& (L2_SIZE
- 1));
334 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
339 p
= (void **)l1_phys_map
;
340 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
342 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
343 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
345 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
348 /* allocate if not found */
351 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
352 memset(p
, 0, sizeof(void *) * L1_SIZE
);
356 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
360 /* allocate if not found */
363 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
365 for (i
= 0; i
< L2_SIZE
; i
++)
366 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
368 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
371 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
373 return phys_page_find_alloc(index
, 0);
376 #if !defined(CONFIG_USER_ONLY)
377 static void tlb_protect_code(ram_addr_t ram_addr
);
378 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
380 #define mmap_lock() do { } while(0)
381 #define mmap_unlock() do { } while(0)
384 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
386 #if defined(CONFIG_USER_ONLY)
387 /* Currently it is not recommanded to allocate big chunks of data in
388 user mode. It will change when a dedicated libc will be used */
389 #define USE_STATIC_CODE_GEN_BUFFER
392 #ifdef USE_STATIC_CODE_GEN_BUFFER
393 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
396 static void code_gen_alloc(unsigned long tb_size
)
398 #ifdef USE_STATIC_CODE_GEN_BUFFER
399 code_gen_buffer
= static_code_gen_buffer
;
400 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
401 map_exec(code_gen_buffer
, code_gen_buffer_size
);
403 code_gen_buffer_size
= tb_size
;
404 if (code_gen_buffer_size
== 0) {
405 #if defined(CONFIG_USER_ONLY)
406 /* in user mode, phys_ram_size is not meaningful */
407 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
409 /* XXX: needs ajustments */
410 code_gen_buffer_size
= (int)(phys_ram_size
/ 4);
413 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
414 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
415 /* The code gen buffer location may have constraints depending on
416 the host cpu and OS */
417 #if defined(__linux__)
422 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
423 #if defined(__x86_64__)
425 /* Cannot map more than that */
426 if (code_gen_buffer_size
> (800 * 1024 * 1024))
427 code_gen_buffer_size
= (800 * 1024 * 1024);
428 #elif defined(__sparc_v9__)
429 // Map the buffer below 2G, so we can use direct calls and branches
431 start
= (void *) 0x60000000UL
;
432 if (code_gen_buffer_size
> (512 * 1024 * 1024))
433 code_gen_buffer_size
= (512 * 1024 * 1024);
435 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
436 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
438 if (code_gen_buffer
== MAP_FAILED
) {
439 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
444 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
445 if (!code_gen_buffer
) {
446 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
449 map_exec(code_gen_buffer
, code_gen_buffer_size
);
451 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
452 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
453 code_gen_buffer_max_size
= code_gen_buffer_size
-
454 code_gen_max_block_size();
455 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
456 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
459 /* Must be called before using the QEMU cpus. 'tb_size' is the size
460 (in bytes) allocated to the translation buffer. Zero means default
462 void cpu_exec_init_all(unsigned long tb_size
)
465 code_gen_alloc(tb_size
);
466 code_gen_ptr
= code_gen_buffer
;
468 #if !defined(CONFIG_USER_ONLY)
473 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
475 #define CPU_COMMON_SAVE_VERSION 1
477 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
479 CPUState
*env
= opaque
;
481 qemu_put_be32s(f
, &env
->halted
);
482 qemu_put_be32s(f
, &env
->interrupt_request
);
485 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
487 CPUState
*env
= opaque
;
489 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
492 qemu_get_be32s(f
, &env
->halted
);
493 qemu_get_be32s(f
, &env
->interrupt_request
);
500 void cpu_exec_init(CPUState
*env
)
505 env
->next_cpu
= NULL
;
508 while (*penv
!= NULL
) {
509 penv
= (CPUState
**)&(*penv
)->next_cpu
;
512 env
->cpu_index
= cpu_index
;
513 env
->nb_watchpoints
= 0;
515 env
->thread_id
= GetCurrentProcessId();
517 env
->thread_id
= getpid();
520 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
521 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
522 cpu_common_save
, cpu_common_load
, env
);
523 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
524 cpu_save
, cpu_load
, env
);
528 static inline void invalidate_page_bitmap(PageDesc
*p
)
530 if (p
->code_bitmap
) {
531 qemu_free(p
->code_bitmap
);
532 p
->code_bitmap
= NULL
;
534 p
->code_write_count
= 0;
537 /* set to NULL all the 'first_tb' fields in all PageDescs */
538 static void page_flush_tb(void)
543 for(i
= 0; i
< L1_SIZE
; i
++) {
546 for(j
= 0; j
< L2_SIZE
; j
++) {
548 invalidate_page_bitmap(p
);
555 /* flush all the translation blocks */
556 /* XXX: tb_flush is currently not thread safe */
557 void tb_flush(CPUState
*env1
)
560 #if defined(DEBUG_FLUSH)
561 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
562 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
564 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
566 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
567 cpu_abort(env1
, "Internal error: code buffer overflow\n");
571 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
572 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
575 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
578 code_gen_ptr
= code_gen_buffer
;
579 /* XXX: flush processor icache at this point if cache flush is
584 #ifdef DEBUG_TB_CHECK
586 static void tb_invalidate_check(target_ulong address
)
588 TranslationBlock
*tb
;
590 address
&= TARGET_PAGE_MASK
;
591 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
592 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
593 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
594 address
>= tb
->pc
+ tb
->size
)) {
595 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
596 address
, (long)tb
->pc
, tb
->size
);
602 /* verify that all the pages have correct rights for code */
603 static void tb_page_check(void)
605 TranslationBlock
*tb
;
606 int i
, flags1
, flags2
;
608 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
609 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
610 flags1
= page_get_flags(tb
->pc
);
611 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
612 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
613 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
614 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
620 void tb_jmp_check(TranslationBlock
*tb
)
622 TranslationBlock
*tb1
;
625 /* suppress any remaining jumps to this TB */
629 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
632 tb1
= tb1
->jmp_next
[n1
];
634 /* check end of list */
636 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
642 /* invalidate one TB */
643 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
646 TranslationBlock
*tb1
;
650 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
653 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
657 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
659 TranslationBlock
*tb1
;
665 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
667 *ptb
= tb1
->page_next
[n1
];
670 ptb
= &tb1
->page_next
[n1
];
674 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
676 TranslationBlock
*tb1
, **ptb
;
679 ptb
= &tb
->jmp_next
[n
];
682 /* find tb(n) in circular list */
686 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
687 if (n1
== n
&& tb1
== tb
)
690 ptb
= &tb1
->jmp_first
;
692 ptb
= &tb1
->jmp_next
[n1
];
695 /* now we can suppress tb(n) from the list */
696 *ptb
= tb
->jmp_next
[n
];
698 tb
->jmp_next
[n
] = NULL
;
702 /* reset the jump entry 'n' of a TB so that it is not chained to
704 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
706 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
709 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
714 target_phys_addr_t phys_pc
;
715 TranslationBlock
*tb1
, *tb2
;
717 /* remove the TB from the hash list */
718 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
719 h
= tb_phys_hash_func(phys_pc
);
720 tb_remove(&tb_phys_hash
[h
], tb
,
721 offsetof(TranslationBlock
, phys_hash_next
));
723 /* remove the TB from the page list */
724 if (tb
->page_addr
[0] != page_addr
) {
725 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
726 tb_page_remove(&p
->first_tb
, tb
);
727 invalidate_page_bitmap(p
);
729 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
730 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
731 tb_page_remove(&p
->first_tb
, tb
);
732 invalidate_page_bitmap(p
);
735 tb_invalidated_flag
= 1;
737 /* remove the TB from the hash list */
738 h
= tb_jmp_cache_hash_func(tb
->pc
);
739 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
740 if (env
->tb_jmp_cache
[h
] == tb
)
741 env
->tb_jmp_cache
[h
] = NULL
;
744 /* suppress this TB from the two jump lists */
745 tb_jmp_remove(tb
, 0);
746 tb_jmp_remove(tb
, 1);
748 /* suppress any remaining jumps to this TB */
754 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
755 tb2
= tb1
->jmp_next
[n1
];
756 tb_reset_jump(tb1
, n1
);
757 tb1
->jmp_next
[n1
] = NULL
;
760 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
762 tb_phys_invalidate_count
++;
765 static inline void set_bits(uint8_t *tab
, int start
, int len
)
771 mask
= 0xff << (start
& 7);
772 if ((start
& ~7) == (end
& ~7)) {
774 mask
&= ~(0xff << (end
& 7));
779 start
= (start
+ 8) & ~7;
781 while (start
< end1
) {
786 mask
= ~(0xff << (end
& 7));
792 static void build_page_bitmap(PageDesc
*p
)
794 int n
, tb_start
, tb_end
;
795 TranslationBlock
*tb
;
797 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
804 tb
= (TranslationBlock
*)((long)tb
& ~3);
805 /* NOTE: this is subtle as a TB may span two physical pages */
807 /* NOTE: tb_end may be after the end of the page, but
808 it is not a problem */
809 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
810 tb_end
= tb_start
+ tb
->size
;
811 if (tb_end
> TARGET_PAGE_SIZE
)
812 tb_end
= TARGET_PAGE_SIZE
;
815 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
817 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
818 tb
= tb
->page_next
[n
];
822 TranslationBlock
*tb_gen_code(CPUState
*env
,
823 target_ulong pc
, target_ulong cs_base
,
824 int flags
, int cflags
)
826 TranslationBlock
*tb
;
828 target_ulong phys_pc
, phys_page2
, virt_page2
;
831 phys_pc
= get_phys_addr_code(env
, pc
);
834 /* flush must be done */
836 /* cannot fail at this point */
838 /* Don't forget to invalidate previous TB info. */
839 tb_invalidated_flag
= 1;
841 tc_ptr
= code_gen_ptr
;
843 tb
->cs_base
= cs_base
;
846 cpu_gen_code(env
, tb
, &code_gen_size
);
847 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
849 /* check next page if needed */
850 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
852 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
853 phys_page2
= get_phys_addr_code(env
, virt_page2
);
855 tb_link_phys(tb
, phys_pc
, phys_page2
);
859 /* invalidate all TBs which intersect with the target physical page
860 starting in range [start;end[. NOTE: start and end must refer to
861 the same physical page. 'is_cpu_write_access' should be true if called
862 from a real cpu write access: the virtual CPU will exit the current
863 TB if code is modified inside this TB. */
864 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
865 int is_cpu_write_access
)
867 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
868 CPUState
*env
= cpu_single_env
;
870 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
871 target_ulong tb_start
, tb_end
;
872 target_ulong current_pc
, current_cs_base
;
874 p
= page_find(start
>> TARGET_PAGE_BITS
);
877 if (!p
->code_bitmap
&&
878 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
879 is_cpu_write_access
) {
880 /* build code bitmap */
881 build_page_bitmap(p
);
884 /* we remove all the TBs in the range [start, end[ */
885 /* XXX: see if in some cases it could be faster to invalidate all the code */
886 current_tb_not_found
= is_cpu_write_access
;
887 current_tb_modified
= 0;
888 current_tb
= NULL
; /* avoid warning */
889 current_pc
= 0; /* avoid warning */
890 current_cs_base
= 0; /* avoid warning */
891 current_flags
= 0; /* avoid warning */
895 tb
= (TranslationBlock
*)((long)tb
& ~3);
896 tb_next
= tb
->page_next
[n
];
897 /* NOTE: this is subtle as a TB may span two physical pages */
899 /* NOTE: tb_end may be after the end of the page, but
900 it is not a problem */
901 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
902 tb_end
= tb_start
+ tb
->size
;
904 tb_start
= tb
->page_addr
[1];
905 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
907 if (!(tb_end
<= start
|| tb_start
>= end
)) {
908 #ifdef TARGET_HAS_PRECISE_SMC
909 if (current_tb_not_found
) {
910 current_tb_not_found
= 0;
912 if (env
->mem_io_pc
) {
913 /* now we have a real cpu fault */
914 current_tb
= tb_find_pc(env
->mem_io_pc
);
917 if (current_tb
== tb
&&
918 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
919 /* If we are modifying the current TB, we must stop
920 its execution. We could be more precise by checking
921 that the modification is after the current PC, but it
922 would require a specialized function to partially
923 restore the CPU state */
925 current_tb_modified
= 1;
926 cpu_restore_state(current_tb
, env
,
927 env
->mem_io_pc
, NULL
);
928 #if defined(TARGET_I386)
929 current_flags
= env
->hflags
;
930 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
931 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
932 current_pc
= current_cs_base
+ env
->eip
;
934 #error unsupported CPU
937 #endif /* TARGET_HAS_PRECISE_SMC */
938 /* we need to do that to handle the case where a signal
939 occurs while doing tb_phys_invalidate() */
942 saved_tb
= env
->current_tb
;
943 env
->current_tb
= NULL
;
945 tb_phys_invalidate(tb
, -1);
947 env
->current_tb
= saved_tb
;
948 if (env
->interrupt_request
&& env
->current_tb
)
949 cpu_interrupt(env
, env
->interrupt_request
);
954 #if !defined(CONFIG_USER_ONLY)
955 /* if no code remaining, no need to continue to use slow writes */
957 invalidate_page_bitmap(p
);
958 if (is_cpu_write_access
) {
959 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
963 #ifdef TARGET_HAS_PRECISE_SMC
964 if (current_tb_modified
) {
965 /* we generate a block containing just the instruction
966 modifying the memory. It will ensure that it cannot modify
968 env
->current_tb
= NULL
;
969 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
970 cpu_resume_from_signal(env
, NULL
);
975 /* len must be <= 8 and start must be a multiple of len */
976 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
983 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
984 cpu_single_env
->mem_io_vaddr
, len
,
986 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
990 p
= page_find(start
>> TARGET_PAGE_BITS
);
993 if (p
->code_bitmap
) {
994 offset
= start
& ~TARGET_PAGE_MASK
;
995 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
996 if (b
& ((1 << len
) - 1))
1000 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1004 #if !defined(CONFIG_SOFTMMU)
1005 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1006 unsigned long pc
, void *puc
)
1008 int n
, current_flags
, current_tb_modified
;
1009 target_ulong current_pc
, current_cs_base
;
1011 TranslationBlock
*tb
, *current_tb
;
1012 #ifdef TARGET_HAS_PRECISE_SMC
1013 CPUState
*env
= cpu_single_env
;
1016 addr
&= TARGET_PAGE_MASK
;
1017 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1021 current_tb_modified
= 0;
1023 current_pc
= 0; /* avoid warning */
1024 current_cs_base
= 0; /* avoid warning */
1025 current_flags
= 0; /* avoid warning */
1026 #ifdef TARGET_HAS_PRECISE_SMC
1027 if (tb
&& pc
!= 0) {
1028 current_tb
= tb_find_pc(pc
);
1031 while (tb
!= NULL
) {
1033 tb
= (TranslationBlock
*)((long)tb
& ~3);
1034 #ifdef TARGET_HAS_PRECISE_SMC
1035 if (current_tb
== tb
&&
1036 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1037 /* If we are modifying the current TB, we must stop
1038 its execution. We could be more precise by checking
1039 that the modification is after the current PC, but it
1040 would require a specialized function to partially
1041 restore the CPU state */
1043 current_tb_modified
= 1;
1044 cpu_restore_state(current_tb
, env
, pc
, puc
);
1045 #if defined(TARGET_I386)
1046 current_flags
= env
->hflags
;
1047 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
1048 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
1049 current_pc
= current_cs_base
+ env
->eip
;
1051 #error unsupported CPU
1054 #endif /* TARGET_HAS_PRECISE_SMC */
1055 tb_phys_invalidate(tb
, addr
);
1056 tb
= tb
->page_next
[n
];
1059 #ifdef TARGET_HAS_PRECISE_SMC
1060 if (current_tb_modified
) {
1061 /* we generate a block containing just the instruction
1062 modifying the memory. It will ensure that it cannot modify
1064 env
->current_tb
= NULL
;
1065 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1066 cpu_resume_from_signal(env
, puc
);
1072 /* add the tb in the target page and protect it if necessary */
1073 static inline void tb_alloc_page(TranslationBlock
*tb
,
1074 unsigned int n
, target_ulong page_addr
)
1077 TranslationBlock
*last_first_tb
;
1079 tb
->page_addr
[n
] = page_addr
;
1080 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1081 tb
->page_next
[n
] = p
->first_tb
;
1082 last_first_tb
= p
->first_tb
;
1083 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1084 invalidate_page_bitmap(p
);
1086 #if defined(TARGET_HAS_SMC) || 1
1088 #if defined(CONFIG_USER_ONLY)
1089 if (p
->flags
& PAGE_WRITE
) {
1094 /* force the host page as non writable (writes will have a
1095 page fault + mprotect overhead) */
1096 page_addr
&= qemu_host_page_mask
;
1098 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1099 addr
+= TARGET_PAGE_SIZE
) {
1101 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1105 p2
->flags
&= ~PAGE_WRITE
;
1106 page_get_flags(addr
);
1108 mprotect(g2h(page_addr
), qemu_host_page_size
,
1109 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1110 #ifdef DEBUG_TB_INVALIDATE
1111 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1116 /* if some code is already present, then the pages are already
1117 protected. So we handle the case where only the first TB is
1118 allocated in a physical page */
1119 if (!last_first_tb
) {
1120 tlb_protect_code(page_addr
);
1124 #endif /* TARGET_HAS_SMC */
1127 /* Allocate a new translation block. Flush the translation buffer if
1128 too many translation blocks or too much generated code. */
1129 TranslationBlock
*tb_alloc(target_ulong pc
)
1131 TranslationBlock
*tb
;
1133 if (nb_tbs
>= code_gen_max_blocks
||
1134 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1136 tb
= &tbs
[nb_tbs
++];
1142 void tb_free(TranslationBlock
*tb
)
1144 /* In practice this is mostly used for single use temporary TB
1145 Ignore the hard cases and just back up if this TB happens to
1146 be the last one generated. */
1147 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1148 code_gen_ptr
= tb
->tc_ptr
;
1153 /* add a new TB and link it to the physical page tables. phys_page2 is
1154 (-1) to indicate that only one page contains the TB. */
1155 void tb_link_phys(TranslationBlock
*tb
,
1156 target_ulong phys_pc
, target_ulong phys_page2
)
1159 TranslationBlock
**ptb
;
1161 /* Grab the mmap lock to stop another thread invalidating this TB
1162 before we are done. */
1164 /* add in the physical hash table */
1165 h
= tb_phys_hash_func(phys_pc
);
1166 ptb
= &tb_phys_hash
[h
];
1167 tb
->phys_hash_next
= *ptb
;
1170 /* add in the page list */
1171 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1172 if (phys_page2
!= -1)
1173 tb_alloc_page(tb
, 1, phys_page2
);
1175 tb
->page_addr
[1] = -1;
1177 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1178 tb
->jmp_next
[0] = NULL
;
1179 tb
->jmp_next
[1] = NULL
;
1181 /* init original jump addresses */
1182 if (tb
->tb_next_offset
[0] != 0xffff)
1183 tb_reset_jump(tb
, 0);
1184 if (tb
->tb_next_offset
[1] != 0xffff)
1185 tb_reset_jump(tb
, 1);
1187 #ifdef DEBUG_TB_CHECK
1193 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1194 tb[1].tc_ptr. Return NULL if not found */
1195 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1197 int m_min
, m_max
, m
;
1199 TranslationBlock
*tb
;
1203 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1204 tc_ptr
>= (unsigned long)code_gen_ptr
)
1206 /* binary search (cf Knuth) */
1209 while (m_min
<= m_max
) {
1210 m
= (m_min
+ m_max
) >> 1;
1212 v
= (unsigned long)tb
->tc_ptr
;
1215 else if (tc_ptr
< v
) {
1224 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1226 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1228 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1231 tb1
= tb
->jmp_next
[n
];
1233 /* find head of list */
1236 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1239 tb1
= tb1
->jmp_next
[n1
];
1241 /* we are now sure now that tb jumps to tb1 */
1244 /* remove tb from the jmp_first list */
1245 ptb
= &tb_next
->jmp_first
;
1249 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1250 if (n1
== n
&& tb1
== tb
)
1252 ptb
= &tb1
->jmp_next
[n1
];
1254 *ptb
= tb
->jmp_next
[n
];
1255 tb
->jmp_next
[n
] = NULL
;
1257 /* suppress the jump to next tb in generated code */
1258 tb_reset_jump(tb
, n
);
1260 /* suppress jumps in the tb on which we could have jumped */
1261 tb_reset_jump_recursive(tb_next
);
1265 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1267 tb_reset_jump_recursive2(tb
, 0);
1268 tb_reset_jump_recursive2(tb
, 1);
1271 #if defined(TARGET_HAS_ICE)
1272 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1274 target_phys_addr_t addr
;
1276 ram_addr_t ram_addr
;
1279 addr
= cpu_get_phys_page_debug(env
, pc
);
1280 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1282 pd
= IO_MEM_UNASSIGNED
;
1284 pd
= p
->phys_offset
;
1286 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1287 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1291 /* Add a watchpoint. */
1292 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, int type
)
1296 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1297 if (addr
== env
->watchpoint
[i
].vaddr
)
1300 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1303 i
= env
->nb_watchpoints
++;
1304 env
->watchpoint
[i
].vaddr
= addr
;
1305 env
->watchpoint
[i
].type
= type
;
1306 tlb_flush_page(env
, addr
);
1307 /* FIXME: This flush is needed because of the hack to make memory ops
1308 terminate the TB. It can be removed once the proper IO trap and
1309 re-execute bits are in. */
1314 /* Remove a watchpoint. */
1315 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1319 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1320 if (addr
== env
->watchpoint
[i
].vaddr
) {
1321 env
->nb_watchpoints
--;
1322 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1323 tlb_flush_page(env
, addr
);
1330 /* Remove all watchpoints. */
1331 void cpu_watchpoint_remove_all(CPUState
*env
) {
1334 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1335 tlb_flush_page(env
, env
->watchpoint
[i
].vaddr
);
1337 env
->nb_watchpoints
= 0;
1340 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1341 breakpoint is reached */
1342 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1344 #if defined(TARGET_HAS_ICE)
1347 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1348 if (env
->breakpoints
[i
] == pc
)
1352 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1354 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1357 kvm_update_debugger(env
);
1359 breakpoint_invalidate(env
, pc
);
1366 /* remove all breakpoints */
1367 void cpu_breakpoint_remove_all(CPUState
*env
) {
1368 #if defined(TARGET_HAS_ICE)
1370 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1371 breakpoint_invalidate(env
, env
->breakpoints
[i
]);
1373 env
->nb_breakpoints
= 0;
1377 /* remove a breakpoint */
1378 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1380 #if defined(TARGET_HAS_ICE)
1382 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1383 if (env
->breakpoints
[i
] == pc
)
1388 env
->nb_breakpoints
--;
1389 if (i
< env
->nb_breakpoints
)
1390 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1393 kvm_update_debugger(env
);
1395 breakpoint_invalidate(env
, pc
);
1402 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1403 CPU loop after each instruction */
1404 void cpu_single_step(CPUState
*env
, int enabled
)
1406 #if defined(TARGET_HAS_ICE)
1407 if (env
->singlestep_enabled
!= enabled
) {
1408 env
->singlestep_enabled
= enabled
;
1409 /* must flush all the translated code to avoid inconsistancies */
1410 /* XXX: only flush what is necessary */
1414 kvm_update_debugger(env
);
1418 /* enable or disable low levels log */
1419 void cpu_set_log(int log_flags
)
1421 loglevel
= log_flags
;
1422 if (loglevel
&& !logfile
) {
1423 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1425 perror(logfilename
);
1428 #if !defined(CONFIG_SOFTMMU)
1429 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1431 static uint8_t logfile_buf
[4096];
1432 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1435 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1439 if (!loglevel
&& logfile
) {
1445 void cpu_set_log_filename(const char *filename
)
1447 logfilename
= strdup(filename
);
1452 cpu_set_log(loglevel
);
1455 /* mask must never be zero, except for A20 change call */
1456 void cpu_interrupt(CPUState
*env
, int mask
)
1458 #if !defined(USE_NPTL)
1459 TranslationBlock
*tb
;
1460 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1464 old_mask
= env
->interrupt_request
;
1465 /* FIXME: This is probably not threadsafe. A different thread could
1466 be in the middle of a read-modify-write operation. */
1467 env
->interrupt_request
|= mask
;
1468 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1469 kvm_update_interrupt_request(env
);
1470 #if defined(USE_NPTL)
1471 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1472 problem and hope the cpu will stop of its own accord. For userspace
1473 emulation this often isn't actually as bad as it sounds. Often
1474 signals are used primarily to interrupt blocking syscalls. */
1477 env
->icount_decr
.u16
.high
= 0xffff;
1478 #ifndef CONFIG_USER_ONLY
1479 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1480 an async event happened and we need to process it. */
1482 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1483 cpu_abort(env
, "Raised interrupt while not in I/O function");
1487 tb
= env
->current_tb
;
1488 /* if the cpu is currently executing code, we must unlink it and
1489 all the potentially executing TB */
1490 if (tb
&& !testandset(&interrupt_lock
)) {
1491 env
->current_tb
= NULL
;
1492 tb_reset_jump_recursive(tb
);
1493 resetlock(&interrupt_lock
);
1499 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1501 env
->interrupt_request
&= ~mask
;
1504 CPULogItem cpu_log_items
[] = {
1505 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1506 "show generated host assembly code for each compiled TB" },
1507 { CPU_LOG_TB_IN_ASM
, "in_asm",
1508 "show target assembly code for each compiled TB" },
1509 { CPU_LOG_TB_OP
, "op",
1510 "show micro ops for each compiled TB" },
1511 { CPU_LOG_TB_OP_OPT
, "op_opt",
1514 "before eflags optimization and "
1516 "after liveness analysis" },
1517 { CPU_LOG_INT
, "int",
1518 "show interrupts/exceptions in short format" },
1519 { CPU_LOG_EXEC
, "exec",
1520 "show trace before each executed TB (lots of logs)" },
1521 { CPU_LOG_TB_CPU
, "cpu",
1522 "show CPU state before block translation" },
1524 { CPU_LOG_PCALL
, "pcall",
1525 "show protected mode far calls/returns/exceptions" },
1528 { CPU_LOG_IOPORT
, "ioport",
1529 "show all i/o ports accesses" },
1534 static int cmp1(const char *s1
, int n
, const char *s2
)
1536 if (strlen(s2
) != n
)
1538 return memcmp(s1
, s2
, n
) == 0;
1541 /* takes a comma separated list of log masks. Return 0 if error. */
1542 int cpu_str_to_log_mask(const char *str
)
1551 p1
= strchr(p
, ',');
1554 if(cmp1(p
,p1
-p
,"all")) {
1555 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1559 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1560 if (cmp1(p
, p1
- p
, item
->name
))
1574 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1581 fprintf(stderr
, "qemu: fatal: ");
1582 vfprintf(stderr
, fmt
, ap
);
1583 fprintf(stderr
, "\n");
1585 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1587 cpu_dump_state(env
, stderr
, fprintf
, 0);
1590 fprintf(logfile
, "qemu: fatal: ");
1591 vfprintf(logfile
, fmt
, ap2
);
1592 fprintf(logfile
, "\n");
1594 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1596 cpu_dump_state(env
, logfile
, fprintf
, 0);
1606 CPUState
*cpu_copy(CPUState
*env
)
1608 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1609 /* preserve chaining and index */
1610 CPUState
*next_cpu
= new_env
->next_cpu
;
1611 int cpu_index
= new_env
->cpu_index
;
1612 memcpy(new_env
, env
, sizeof(CPUState
));
1613 new_env
->next_cpu
= next_cpu
;
1614 new_env
->cpu_index
= cpu_index
;
1618 #if !defined(CONFIG_USER_ONLY)
1620 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1624 /* Discard jump cache entries for any tb which might potentially
1625 overlap the flushed page. */
1626 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1627 memset (&env
->tb_jmp_cache
[i
], 0,
1628 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1630 i
= tb_jmp_cache_hash_page(addr
);
1631 memset (&env
->tb_jmp_cache
[i
], 0,
1632 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1635 /* NOTE: if flush_global is true, also flush global entries (not
1637 void tlb_flush(CPUState
*env
, int flush_global
)
1641 #if defined(DEBUG_TLB)
1642 printf("tlb_flush:\n");
1644 /* must reset current TB so that interrupts cannot modify the
1645 links while we are modifying them */
1646 env
->current_tb
= NULL
;
1648 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1649 env
->tlb_table
[0][i
].addr_read
= -1;
1650 env
->tlb_table
[0][i
].addr_write
= -1;
1651 env
->tlb_table
[0][i
].addr_code
= -1;
1652 env
->tlb_table
[1][i
].addr_read
= -1;
1653 env
->tlb_table
[1][i
].addr_write
= -1;
1654 env
->tlb_table
[1][i
].addr_code
= -1;
1655 #if (NB_MMU_MODES >= 3)
1656 env
->tlb_table
[2][i
].addr_read
= -1;
1657 env
->tlb_table
[2][i
].addr_write
= -1;
1658 env
->tlb_table
[2][i
].addr_code
= -1;
1659 #if (NB_MMU_MODES == 4)
1660 env
->tlb_table
[3][i
].addr_read
= -1;
1661 env
->tlb_table
[3][i
].addr_write
= -1;
1662 env
->tlb_table
[3][i
].addr_code
= -1;
1667 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1670 if (env
->kqemu_enabled
) {
1671 kqemu_flush(env
, flush_global
);
1677 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1679 if (addr
== (tlb_entry
->addr_read
&
1680 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1681 addr
== (tlb_entry
->addr_write
&
1682 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1683 addr
== (tlb_entry
->addr_code
&
1684 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1685 tlb_entry
->addr_read
= -1;
1686 tlb_entry
->addr_write
= -1;
1687 tlb_entry
->addr_code
= -1;
1691 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1695 #if defined(DEBUG_TLB)
1696 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1698 /* must reset current TB so that interrupts cannot modify the
1699 links while we are modifying them */
1700 env
->current_tb
= NULL
;
1702 addr
&= TARGET_PAGE_MASK
;
1703 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1704 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1705 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1706 #if (NB_MMU_MODES >= 3)
1707 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1708 #if (NB_MMU_MODES == 4)
1709 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1713 tlb_flush_jmp_cache(env
, addr
);
1716 if (env
->kqemu_enabled
) {
1717 kqemu_flush_page(env
, addr
);
1722 /* update the TLBs so that writes to code in the virtual page 'addr'
1724 static void tlb_protect_code(ram_addr_t ram_addr
)
1726 cpu_physical_memory_reset_dirty(ram_addr
,
1727 ram_addr
+ TARGET_PAGE_SIZE
,
1731 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1732 tested for self modifying code */
1733 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1736 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1739 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1740 unsigned long start
, unsigned long length
)
1743 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1744 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1745 if ((addr
- start
) < length
) {
1746 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1751 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1755 unsigned long length
, start1
;
1759 start
&= TARGET_PAGE_MASK
;
1760 end
= TARGET_PAGE_ALIGN(end
);
1762 length
= end
- start
;
1765 len
= length
>> TARGET_PAGE_BITS
;
1767 /* XXX: should not depend on cpu context */
1769 if (env
->kqemu_enabled
) {
1772 for(i
= 0; i
< len
; i
++) {
1773 kqemu_set_notdirty(env
, addr
);
1774 addr
+= TARGET_PAGE_SIZE
;
1778 mask
= ~dirty_flags
;
1779 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1780 for(i
= 0; i
< len
; i
++)
1783 /* we modify the TLB cache so that the dirty bit will be set again
1784 when accessing the range */
1785 start1
= start
+ (unsigned long)phys_ram_base
;
1786 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1787 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1788 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1789 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1790 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1791 #if (NB_MMU_MODES >= 3)
1792 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1793 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1794 #if (NB_MMU_MODES == 4)
1795 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1796 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1802 int cpu_physical_memory_set_dirty_tracking(int enable
)
1807 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1808 in_migration
= enable
;
1812 int cpu_physical_memory_get_dirty_tracking(void)
1814 return in_migration
;
1817 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1819 ram_addr_t ram_addr
;
1821 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1822 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1823 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1824 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1825 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1830 /* update the TLB according to the current state of the dirty bits */
1831 void cpu_tlb_update_dirty(CPUState
*env
)
1834 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1835 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1836 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1837 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1838 #if (NB_MMU_MODES >= 3)
1839 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1840 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1841 #if (NB_MMU_MODES == 4)
1842 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1843 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1848 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1850 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1851 tlb_entry
->addr_write
= vaddr
;
1854 /* update the TLB corresponding to virtual page vaddr
1855 so that it is no longer dirty */
1856 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1860 vaddr
&= TARGET_PAGE_MASK
;
1861 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1862 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1863 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1864 #if (NB_MMU_MODES >= 3)
1865 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1866 #if (NB_MMU_MODES == 4)
1867 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1872 /* add a new TLB entry. At most one entry for a given virtual address
1873 is permitted. Return 0 if OK or 2 if the page could not be mapped
1874 (can only happen in non SOFTMMU mode for I/O pages or pages
1875 conflicting with the host address space). */
1876 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1877 target_phys_addr_t paddr
, int prot
,
1878 int mmu_idx
, int is_softmmu
)
1883 target_ulong address
;
1884 target_ulong code_address
;
1885 target_phys_addr_t addend
;
1889 target_phys_addr_t iotlb
;
1891 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1893 pd
= IO_MEM_UNASSIGNED
;
1895 pd
= p
->phys_offset
;
1897 #if defined(DEBUG_TLB)
1898 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1899 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1904 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1905 /* IO memory case (romd handled later) */
1906 address
|= TLB_MMIO
;
1908 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1909 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1911 iotlb
= pd
& TARGET_PAGE_MASK
;
1912 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1913 iotlb
|= IO_MEM_NOTDIRTY
;
1915 iotlb
|= IO_MEM_ROM
;
1917 /* IO handlers are currently passed a phsical address.
1918 It would be nice to pass an offset from the base address
1919 of that region. This would avoid having to special case RAM,
1920 and avoid full address decoding in every device.
1921 We can't use the high bits of pd for this because
1922 IO_MEM_ROMD uses these as a ram address. */
1923 iotlb
= (pd
& ~TARGET_PAGE_MASK
) + paddr
;
1926 code_address
= address
;
1927 /* Make accesses to pages with watchpoints go via the
1928 watchpoint trap routines. */
1929 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1930 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1931 iotlb
= io_mem_watch
+ paddr
;
1932 /* TODO: The memory case can be optimized by not trapping
1933 reads of pages with a write breakpoint. */
1934 address
|= TLB_MMIO
;
1938 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1939 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
1940 te
= &env
->tlb_table
[mmu_idx
][index
];
1941 te
->addend
= addend
- vaddr
;
1942 if (prot
& PAGE_READ
) {
1943 te
->addr_read
= address
;
1948 if (prot
& PAGE_EXEC
) {
1949 te
->addr_code
= code_address
;
1953 if (prot
& PAGE_WRITE
) {
1954 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1955 (pd
& IO_MEM_ROMD
)) {
1956 /* Write access calls the I/O callback. */
1957 te
->addr_write
= address
| TLB_MMIO
;
1958 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1959 !cpu_physical_memory_is_dirty(pd
)) {
1960 te
->addr_write
= address
| TLB_NOTDIRTY
;
1962 te
->addr_write
= address
;
1965 te
->addr_write
= -1;
1972 void tlb_flush(CPUState
*env
, int flush_global
)
1976 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1980 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1981 target_phys_addr_t paddr
, int prot
,
1982 int mmu_idx
, int is_softmmu
)
1987 /* dump memory mappings */
1988 void page_dump(FILE *f
)
1990 unsigned long start
, end
;
1991 int i
, j
, prot
, prot1
;
1994 fprintf(f
, "%-8s %-8s %-8s %s\n",
1995 "start", "end", "size", "prot");
1999 for(i
= 0; i
<= L1_SIZE
; i
++) {
2004 for(j
= 0;j
< L2_SIZE
; j
++) {
2009 if (prot1
!= prot
) {
2010 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2012 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2013 start
, end
, end
- start
,
2014 prot
& PAGE_READ
? 'r' : '-',
2015 prot
& PAGE_WRITE
? 'w' : '-',
2016 prot
& PAGE_EXEC
? 'x' : '-');
2030 int page_get_flags(target_ulong address
)
2034 p
= page_find(address
>> TARGET_PAGE_BITS
);
2040 /* modify the flags of a page and invalidate the code if
2041 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2042 depending on PAGE_WRITE */
2043 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2048 /* mmap_lock should already be held. */
2049 start
= start
& TARGET_PAGE_MASK
;
2050 end
= TARGET_PAGE_ALIGN(end
);
2051 if (flags
& PAGE_WRITE
)
2052 flags
|= PAGE_WRITE_ORG
;
2053 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2054 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2055 /* We may be called for host regions that are outside guest
2059 /* if the write protection is set, then we invalidate the code
2061 if (!(p
->flags
& PAGE_WRITE
) &&
2062 (flags
& PAGE_WRITE
) &&
2064 tb_invalidate_phys_page(addr
, 0, NULL
);
2070 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2076 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2077 start
= start
& TARGET_PAGE_MASK
;
2080 /* we've wrapped around */
2082 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2083 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2086 if( !(p
->flags
& PAGE_VALID
) )
2089 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2091 if (flags
& PAGE_WRITE
) {
2092 if (!(p
->flags
& PAGE_WRITE_ORG
))
2094 /* unprotect the page if it was put read-only because it
2095 contains translated code */
2096 if (!(p
->flags
& PAGE_WRITE
)) {
2097 if (!page_unprotect(addr
, 0, NULL
))
2106 /* called from signal handler: invalidate the code and unprotect the
2107 page. Return TRUE if the fault was succesfully handled. */
2108 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2110 unsigned int page_index
, prot
, pindex
;
2112 target_ulong host_start
, host_end
, addr
;
2114 /* Technically this isn't safe inside a signal handler. However we
2115 know this only ever happens in a synchronous SEGV handler, so in
2116 practice it seems to be ok. */
2119 host_start
= address
& qemu_host_page_mask
;
2120 page_index
= host_start
>> TARGET_PAGE_BITS
;
2121 p1
= page_find(page_index
);
2126 host_end
= host_start
+ qemu_host_page_size
;
2129 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2133 /* if the page was really writable, then we change its
2134 protection back to writable */
2135 if (prot
& PAGE_WRITE_ORG
) {
2136 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2137 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2138 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2139 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2140 p1
[pindex
].flags
|= PAGE_WRITE
;
2141 /* and since the content will be modified, we must invalidate
2142 the corresponding translated code. */
2143 tb_invalidate_phys_page(address
, pc
, puc
);
2144 #ifdef DEBUG_TB_CHECK
2145 tb_invalidate_check(address
);
2155 static inline void tlb_set_dirty(CPUState
*env
,
2156 unsigned long addr
, target_ulong vaddr
)
2159 #endif /* defined(CONFIG_USER_ONLY) */
2161 #if !defined(CONFIG_USER_ONLY)
2162 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2164 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2165 ram_addr_t orig_memory
);
2166 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2169 if (addr > start_addr) \
2172 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2173 if (start_addr2 > 0) \
2177 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2178 end_addr2 = TARGET_PAGE_SIZE - 1; \
2180 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2181 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2186 /* register physical memory. 'size' must be a multiple of the target
2187 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2189 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2191 ram_addr_t phys_offset
)
2193 target_phys_addr_t addr
, end_addr
;
2196 ram_addr_t orig_size
= size
;
2200 /* XXX: should not depend on cpu context */
2202 if (env
->kqemu_enabled
) {
2203 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2206 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2207 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2208 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2209 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2210 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2211 ram_addr_t orig_memory
= p
->phys_offset
;
2212 target_phys_addr_t start_addr2
, end_addr2
;
2213 int need_subpage
= 0;
2215 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2217 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2218 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2219 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2220 &p
->phys_offset
, orig_memory
);
2222 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2225 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2227 p
->phys_offset
= phys_offset
;
2228 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2229 (phys_offset
& IO_MEM_ROMD
))
2230 phys_offset
+= TARGET_PAGE_SIZE
;
2233 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2234 p
->phys_offset
= phys_offset
;
2235 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2236 (phys_offset
& IO_MEM_ROMD
))
2237 phys_offset
+= TARGET_PAGE_SIZE
;
2239 target_phys_addr_t start_addr2
, end_addr2
;
2240 int need_subpage
= 0;
2242 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2243 end_addr2
, need_subpage
);
2245 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2246 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2247 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2248 subpage_register(subpage
, start_addr2
, end_addr2
,
2255 /* since each CPU stores ram addresses in its TLB cache, we must
2256 reset the modified entries */
2258 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2263 /* XXX: temporary until new memory mapping API */
2264 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2268 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2270 return IO_MEM_UNASSIGNED
;
2271 return p
->phys_offset
;
2274 /* XXX: better than nothing */
2275 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2278 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2279 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
"\n",
2280 (uint64_t)size
, (uint64_t)phys_ram_size
);
2283 addr
= phys_ram_alloc_offset
;
2284 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2288 void qemu_ram_free(ram_addr_t addr
)
2292 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2294 #ifdef DEBUG_UNASSIGNED
2295 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2298 do_unassigned_access(addr
, 0, 0, 0);
2300 do_unassigned_access(addr
, 0, 0, 0);
2305 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2307 #ifdef DEBUG_UNASSIGNED
2308 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2311 do_unassigned_access(addr
, 1, 0, 0);
2313 do_unassigned_access(addr
, 1, 0, 0);
2317 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2318 unassigned_mem_readb
,
2319 unassigned_mem_readb
,
2320 unassigned_mem_readb
,
2323 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2324 unassigned_mem_writeb
,
2325 unassigned_mem_writeb
,
2326 unassigned_mem_writeb
,
2329 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2333 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2334 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2335 #if !defined(CONFIG_USER_ONLY)
2336 tb_invalidate_phys_page_fast(ram_addr
, 1);
2337 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2340 stb_p(phys_ram_base
+ ram_addr
, val
);
2342 if (cpu_single_env
->kqemu_enabled
&&
2343 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2344 kqemu_modify_page(cpu_single_env
, ram_addr
);
2346 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2347 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2348 /* we remove the notdirty callback only if the code has been
2350 if (dirty_flags
== 0xff)
2351 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2354 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2358 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2359 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2360 #if !defined(CONFIG_USER_ONLY)
2361 tb_invalidate_phys_page_fast(ram_addr
, 2);
2362 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2365 stw_p(phys_ram_base
+ ram_addr
, val
);
2367 if (cpu_single_env
->kqemu_enabled
&&
2368 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2369 kqemu_modify_page(cpu_single_env
, ram_addr
);
2371 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2372 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2373 /* we remove the notdirty callback only if the code has been
2375 if (dirty_flags
== 0xff)
2376 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2379 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2383 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2384 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2385 #if !defined(CONFIG_USER_ONLY)
2386 tb_invalidate_phys_page_fast(ram_addr
, 4);
2387 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2390 stl_p(phys_ram_base
+ ram_addr
, val
);
2392 if (cpu_single_env
->kqemu_enabled
&&
2393 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2394 kqemu_modify_page(cpu_single_env
, ram_addr
);
2396 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2397 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2398 /* we remove the notdirty callback only if the code has been
2400 if (dirty_flags
== 0xff)
2401 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2404 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2405 NULL
, /* never used */
2406 NULL
, /* never used */
2407 NULL
, /* never used */
2410 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2411 notdirty_mem_writeb
,
2412 notdirty_mem_writew
,
2413 notdirty_mem_writel
,
2416 /* Generate a debug exception if a watchpoint has been hit. */
2417 static void check_watchpoint(int offset
, int flags
)
2419 CPUState
*env
= cpu_single_env
;
2423 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2424 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2425 if (vaddr
== env
->watchpoint
[i
].vaddr
2426 && (env
->watchpoint
[i
].type
& flags
)) {
2427 env
->watchpoint_hit
= i
+ 1;
2428 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2434 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2435 so these check for a hit then pass through to the normal out-of-line
2437 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2439 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2440 return ldub_phys(addr
);
2443 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2445 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2446 return lduw_phys(addr
);
2449 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2451 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2452 return ldl_phys(addr
);
2455 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2458 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2459 stb_phys(addr
, val
);
2462 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2465 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2466 stw_phys(addr
, val
);
2469 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2472 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2473 stl_phys(addr
, val
);
2476 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2482 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2488 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2494 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2495 #if defined(DEBUG_SUBPAGE)
2496 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2497 mmio
, len
, addr
, idx
);
2499 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2504 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2505 uint32_t value
, unsigned int len
)
2509 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2510 #if defined(DEBUG_SUBPAGE)
2511 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2512 mmio
, len
, addr
, idx
, value
);
2514 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2517 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2519 #if defined(DEBUG_SUBPAGE)
2520 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2523 return subpage_readlen(opaque
, addr
, 0);
2526 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2529 #if defined(DEBUG_SUBPAGE)
2530 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2532 subpage_writelen(opaque
, addr
, value
, 0);
2535 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2537 #if defined(DEBUG_SUBPAGE)
2538 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2541 return subpage_readlen(opaque
, addr
, 1);
2544 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2547 #if defined(DEBUG_SUBPAGE)
2548 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2550 subpage_writelen(opaque
, addr
, value
, 1);
2553 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2555 #if defined(DEBUG_SUBPAGE)
2556 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2559 return subpage_readlen(opaque
, addr
, 2);
2562 static void subpage_writel (void *opaque
,
2563 target_phys_addr_t addr
, uint32_t value
)
2565 #if defined(DEBUG_SUBPAGE)
2566 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2568 subpage_writelen(opaque
, addr
, value
, 2);
2571 static CPUReadMemoryFunc
*subpage_read
[] = {
2577 static CPUWriteMemoryFunc
*subpage_write
[] = {
2583 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2589 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2591 idx
= SUBPAGE_IDX(start
);
2592 eidx
= SUBPAGE_IDX(end
);
2593 #if defined(DEBUG_SUBPAGE)
2594 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2595 mmio
, start
, end
, idx
, eidx
, memory
);
2597 memory
>>= IO_MEM_SHIFT
;
2598 for (; idx
<= eidx
; idx
++) {
2599 for (i
= 0; i
< 4; i
++) {
2600 if (io_mem_read
[memory
][i
]) {
2601 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2602 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2604 if (io_mem_write
[memory
][i
]) {
2605 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2606 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2614 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2615 ram_addr_t orig_memory
)
2620 mmio
= qemu_mallocz(sizeof(subpage_t
));
2623 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2624 #if defined(DEBUG_SUBPAGE)
2625 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2626 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2628 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2629 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2635 static int get_free_io_mem_idx(void)
2639 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2640 if (!io_mem_used
[i
]) {
2648 static void io_mem_init(void)
2652 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2653 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2654 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2658 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2659 watch_mem_write
, NULL
);
2660 /* alloc dirty bits array */
2661 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2662 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2665 /* mem_read and mem_write are arrays of functions containing the
2666 function to access byte (index 0), word (index 1) and dword (index
2667 2). Functions can be omitted with a NULL function pointer. The
2668 registered functions may be modified dynamically later.
2669 If io_index is non zero, the corresponding io zone is
2670 modified. If it is zero, a new io zone is allocated. The return
2671 value can be used with cpu_register_physical_memory(). (-1) is
2672 returned if error. */
2673 int cpu_register_io_memory(int io_index
,
2674 CPUReadMemoryFunc
**mem_read
,
2675 CPUWriteMemoryFunc
**mem_write
,
2678 int i
, subwidth
= 0;
2680 if (io_index
<= 0) {
2681 io_index
= get_free_io_mem_idx();
2685 if (io_index
>= IO_MEM_NB_ENTRIES
)
2689 for(i
= 0;i
< 3; i
++) {
2690 if (!mem_read
[i
] || !mem_write
[i
])
2691 subwidth
= IO_MEM_SUBWIDTH
;
2692 io_mem_read
[io_index
][i
] = mem_read
[i
];
2693 io_mem_write
[io_index
][i
] = mem_write
[i
];
2695 io_mem_opaque
[io_index
] = opaque
;
2696 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2699 void cpu_unregister_io_memory(int io_table_address
)
2702 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
2704 for (i
=0;i
< 3; i
++) {
2705 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
2706 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
2708 io_mem_opaque
[io_index
] = NULL
;
2709 io_mem_used
[io_index
] = 0;
2712 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2714 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2717 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2719 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2722 #endif /* !defined(CONFIG_USER_ONLY) */
2724 /* physical memory access (slow version, mainly for debug) */
2725 #if defined(CONFIG_USER_ONLY)
2726 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2727 int len
, int is_write
)
2734 page
= addr
& TARGET_PAGE_MASK
;
2735 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2738 flags
= page_get_flags(page
);
2739 if (!(flags
& PAGE_VALID
))
2742 if (!(flags
& PAGE_WRITE
))
2744 /* XXX: this code should not depend on lock_user */
2745 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2746 /* FIXME - should this return an error rather than just fail? */
2749 unlock_user(p
, addr
, l
);
2751 if (!(flags
& PAGE_READ
))
2753 /* XXX: this code should not depend on lock_user */
2754 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2755 /* FIXME - should this return an error rather than just fail? */
2758 unlock_user(p
, addr
, 0);
2767 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2768 int len
, int is_write
)
2773 target_phys_addr_t page
;
2778 page
= addr
& TARGET_PAGE_MASK
;
2779 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2782 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2784 pd
= IO_MEM_UNASSIGNED
;
2786 pd
= p
->phys_offset
;
2790 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2791 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2792 /* XXX: could force cpu_single_env to NULL to avoid
2794 if (l
>= 4 && ((addr
& 3) == 0)) {
2795 /* 32 bit write access */
2797 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2799 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2800 /* 16 bit write access */
2802 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2805 /* 8 bit write access */
2807 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2811 unsigned long addr1
;
2812 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2814 ptr
= phys_ram_base
+ addr1
;
2815 memcpy(ptr
, buf
, l
);
2816 if (!cpu_physical_memory_is_dirty(addr1
)) {
2817 /* invalidate code */
2818 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2820 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2821 (0xff & ~CODE_DIRTY_FLAG
);
2823 /* qemu doesn't execute guest code directly, but kvm does
2824 therefore fluch instruction caches */
2826 flush_icache_range((unsigned long)ptr
,
2827 ((unsigned long)ptr
)+l
);
2830 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2831 !(pd
& IO_MEM_ROMD
)) {
2833 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2834 if (l
>= 4 && ((addr
& 3) == 0)) {
2835 /* 32 bit read access */
2836 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2839 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2840 /* 16 bit read access */
2841 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2845 /* 8 bit read access */
2846 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2852 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2853 (addr
& ~TARGET_PAGE_MASK
);
2854 memcpy(buf
, ptr
, l
);
2863 /* used for ROM loading : can write in RAM and ROM */
2864 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2865 const uint8_t *buf
, int len
)
2869 target_phys_addr_t page
;
2874 page
= addr
& TARGET_PAGE_MASK
;
2875 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2878 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2880 pd
= IO_MEM_UNASSIGNED
;
2882 pd
= p
->phys_offset
;
2885 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2886 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2887 !(pd
& IO_MEM_ROMD
)) {
2890 unsigned long addr1
;
2891 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2893 ptr
= phys_ram_base
+ addr1
;
2894 memcpy(ptr
, buf
, l
);
2903 /* warning: addr must be aligned */
2904 uint32_t ldl_phys(target_phys_addr_t addr
)
2912 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2914 pd
= IO_MEM_UNASSIGNED
;
2916 pd
= p
->phys_offset
;
2919 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2920 !(pd
& IO_MEM_ROMD
)) {
2922 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2923 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2926 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2927 (addr
& ~TARGET_PAGE_MASK
);
2933 /* warning: addr must be aligned */
2934 uint64_t ldq_phys(target_phys_addr_t addr
)
2942 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2944 pd
= IO_MEM_UNASSIGNED
;
2946 pd
= p
->phys_offset
;
2949 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2950 !(pd
& IO_MEM_ROMD
)) {
2952 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2953 #ifdef TARGET_WORDS_BIGENDIAN
2954 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2955 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2957 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2958 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2962 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2963 (addr
& ~TARGET_PAGE_MASK
);
2970 uint32_t ldub_phys(target_phys_addr_t addr
)
2973 cpu_physical_memory_read(addr
, &val
, 1);
2978 uint32_t lduw_phys(target_phys_addr_t addr
)
2981 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2982 return tswap16(val
);
2986 #define likely(x) __builtin_expect(!!(x), 1)
2987 #define unlikely(x) __builtin_expect(!!(x), 0)
2990 #define unlikely(x) x
2993 /* warning: addr must be aligned. The ram page is not masked as dirty
2994 and the code inside is not invalidated. It is useful if the dirty
2995 bits are used to track modified PTEs */
2996 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3003 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3005 pd
= IO_MEM_UNASSIGNED
;
3007 pd
= p
->phys_offset
;
3010 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3011 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3012 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3014 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3015 ptr
= phys_ram_base
+ addr1
;
3018 if (unlikely(in_migration
)) {
3019 if (!cpu_physical_memory_is_dirty(addr1
)) {
3020 /* invalidate code */
3021 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3023 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3024 (0xff & ~CODE_DIRTY_FLAG
);
3030 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3037 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3039 pd
= IO_MEM_UNASSIGNED
;
3041 pd
= p
->phys_offset
;
3044 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3045 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3046 #ifdef TARGET_WORDS_BIGENDIAN
3047 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3048 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3050 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3051 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3054 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3055 (addr
& ~TARGET_PAGE_MASK
);
3060 /* warning: addr must be aligned */
3061 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3068 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3070 pd
= IO_MEM_UNASSIGNED
;
3072 pd
= p
->phys_offset
;
3075 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3076 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3077 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3079 unsigned long addr1
;
3080 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3082 ptr
= phys_ram_base
+ addr1
;
3084 if (!cpu_physical_memory_is_dirty(addr1
)) {
3085 /* invalidate code */
3086 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3088 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3089 (0xff & ~CODE_DIRTY_FLAG
);
3095 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3098 cpu_physical_memory_write(addr
, &v
, 1);
3102 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3104 uint16_t v
= tswap16(val
);
3105 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3109 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3112 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3117 /* virtual memory access for debug */
3118 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3119 uint8_t *buf
, int len
, int is_write
)
3122 target_phys_addr_t phys_addr
;
3126 page
= addr
& TARGET_PAGE_MASK
;
3127 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3128 /* if no physical page mapped, return an error */
3129 if (phys_addr
== -1)
3131 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3134 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3143 /* in deterministic execution mode, instructions doing device I/Os
3144 must be at the end of the TB */
3145 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3147 TranslationBlock
*tb
;
3149 target_ulong pc
, cs_base
;
3152 tb
= tb_find_pc((unsigned long)retaddr
);
3154 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3157 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3158 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3159 /* Calculate how many instructions had been executed before the fault
3161 n
= n
- env
->icount_decr
.u16
.low
;
3162 /* Generate a new TB ending on the I/O insn. */
3164 /* On MIPS and SH, delay slot instructions can only be restarted if
3165 they were already the first instruction in the TB. If this is not
3166 the first instruction in a TB then re-execute the preceding
3168 #if defined(TARGET_MIPS)
3169 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3170 env
->active_tc
.PC
-= 4;
3171 env
->icount_decr
.u16
.low
++;
3172 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3174 #elif defined(TARGET_SH4)
3175 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3178 env
->icount_decr
.u16
.low
++;
3179 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3182 /* This should never happen. */
3183 if (n
> CF_COUNT_MASK
)
3184 cpu_abort(env
, "TB too big during recompile");
3186 cflags
= n
| CF_LAST_IO
;
3188 cs_base
= tb
->cs_base
;
3190 tb_phys_invalidate(tb
, -1);
3191 /* FIXME: In theory this could raise an exception. In practice
3192 we have already translated the block once so it's probably ok. */
3193 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3194 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3195 the first in the TB) then we end up generating a whole new TB and
3196 repeating the fault, which is horribly inefficient.
3197 Better would be to execute just this insn uncached, or generate a
3199 cpu_resume_from_signal(env
, NULL
);
3202 void dump_exec_info(FILE *f
,
3203 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3205 int i
, target_code_size
, max_target_code_size
;
3206 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3207 TranslationBlock
*tb
;
3209 target_code_size
= 0;
3210 max_target_code_size
= 0;
3212 direct_jmp_count
= 0;
3213 direct_jmp2_count
= 0;
3214 for(i
= 0; i
< nb_tbs
; i
++) {
3216 target_code_size
+= tb
->size
;
3217 if (tb
->size
> max_target_code_size
)
3218 max_target_code_size
= tb
->size
;
3219 if (tb
->page_addr
[1] != -1)
3221 if (tb
->tb_next_offset
[0] != 0xffff) {
3223 if (tb
->tb_next_offset
[1] != 0xffff) {
3224 direct_jmp2_count
++;
3228 /* XXX: avoid using doubles ? */
3229 cpu_fprintf(f
, "Translation buffer state:\n");
3230 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3231 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3232 cpu_fprintf(f
, "TB count %d/%d\n",
3233 nb_tbs
, code_gen_max_blocks
);
3234 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3235 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3236 max_target_code_size
);
3237 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3238 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3239 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3240 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3242 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3243 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3245 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3247 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3248 cpu_fprintf(f
, "\nStatistics:\n");
3249 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3250 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3251 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3252 tcg_dump_info(f
, cpu_fprintf
);
3255 #if !defined(CONFIG_USER_ONLY)
3257 #define MMUSUFFIX _cmmu
3258 #define GETPC() NULL
3259 #define env cpu_single_env
3260 #define SOFTMMU_CODE_ACCESS
3263 #include "softmmu_template.h"
3266 #include "softmmu_template.h"
3269 #include "softmmu_template.h"
3272 #include "softmmu_template.h"