2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
41 #if defined(CONFIG_USER_ONLY)
45 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_UNASSIGNED
50 /* make various TB consistency checks */
51 //#define DEBUG_TB_CHECK
52 //#define DEBUG_TLB_CHECK
54 //#define DEBUG_IOPORT
55 //#define DEBUG_SUBPAGE
57 #if !defined(CONFIG_USER_ONLY)
58 /* TB consistency checks only implemented for usermode emulation. */
62 #define SMC_BITMAP_USE_THRESHOLD 10
64 #define MMAP_AREA_START 0x00000000
65 #define MMAP_AREA_END 0xa8000000
67 #if defined(TARGET_SPARC64)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 41
69 #elif defined(TARGET_SPARC)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 36
71 #elif defined(TARGET_ALPHA)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 42
73 #define TARGET_VIRT_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_PPC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 36
81 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 TranslationBlock
*tbs
;
86 int code_gen_max_blocks
;
87 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
93 /* The prologue must be reachable with a direct jump. ARM has a
94 limited branch range (possibly also PPC and SPARC?) so place it in a
95 section close to code segment. */
96 #define code_gen_section \
97 __attribute__((__section__(".gen_code"))) \
98 __attribute__((aligned (32)))
100 #define code_gen_section \
101 __attribute__((aligned (32)))
104 uint8_t code_gen_prologue
[1024] code_gen_section
;
105 uint8_t *code_gen_buffer
;
106 unsigned long code_gen_buffer_size
;
107 /* threshold to flush the translated code buffer */
108 unsigned long code_gen_buffer_max_size
;
109 uint8_t *code_gen_ptr
;
111 #if !defined(CONFIG_USER_ONLY)
112 ram_addr_t phys_ram_size
;
114 uint8_t *phys_ram_base
;
115 uint8_t *phys_ram_dirty
;
116 static ram_addr_t phys_ram_alloc_offset
= 0;
120 /* current CPU in the current thread. It is only valid inside
122 CPUState
*cpu_single_env
;
123 /* 0 = Do not count executed instructions.
124 1 = Precise instruction counting.
125 2 = Adaptive rate instruction counting. */
127 /* Current instruction counter. While executing translated code this may
128 include some instructions that have not yet been executed. */
131 typedef struct PageDesc
{
132 /* list of TBs intersecting this ram page */
133 TranslationBlock
*first_tb
;
134 /* in order to optimize self modifying code, we count the number
135 of lookups we do to a given page to use a bitmap */
136 unsigned int code_write_count
;
137 uint8_t *code_bitmap
;
138 #if defined(CONFIG_USER_ONLY)
143 typedef struct PhysPageDesc
{
144 /* offset in host memory of the page + io_index in the low bits */
145 ram_addr_t phys_offset
;
149 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150 /* XXX: this is a temporary hack for alpha target.
151 * In the future, this is to be replaced by a multi-level table
152 * to actually be able to handle the complete 64 bits address space.
154 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
156 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
159 #define L1_SIZE (1 << L1_BITS)
160 #define L2_SIZE (1 << L2_BITS)
162 unsigned long qemu_real_host_page_size
;
163 unsigned long qemu_host_page_bits
;
164 unsigned long qemu_host_page_size
;
165 unsigned long qemu_host_page_mask
;
167 /* XXX: for system emulation, it could just be an array */
168 static PageDesc
*l1_map
[L1_SIZE
];
169 PhysPageDesc
**l1_phys_map
;
171 #if !defined(CONFIG_USER_ONLY)
172 static void io_mem_init(void);
174 /* io memory support */
175 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
176 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
177 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
178 static int io_mem_nb
;
179 static int io_mem_watch
;
183 char *logfilename
= "/tmp/qemu.log";
186 static int log_append
= 0;
189 static int tlb_flush_count
;
190 static int tb_flush_count
;
191 static int tb_phys_invalidate_count
;
193 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194 typedef struct subpage_t
{
195 target_phys_addr_t base
;
196 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
197 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
198 void *opaque
[TARGET_PAGE_SIZE
][2][4];
202 static void map_exec(void *addr
, long size
)
205 VirtualProtect(addr
, size
,
206 PAGE_EXECUTE_READWRITE
, &old_protect
);
210 static void map_exec(void *addr
, long size
)
212 unsigned long start
, end
, page_size
;
214 page_size
= getpagesize();
215 start
= (unsigned long)addr
;
216 start
&= ~(page_size
- 1);
218 end
= (unsigned long)addr
+ size
;
219 end
+= page_size
- 1;
220 end
&= ~(page_size
- 1);
222 mprotect((void *)start
, end
- start
,
223 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
227 static void page_init(void)
229 /* NOTE: we can always suppose that qemu_host_page_size >=
233 SYSTEM_INFO system_info
;
236 GetSystemInfo(&system_info
);
237 qemu_real_host_page_size
= system_info
.dwPageSize
;
240 qemu_real_host_page_size
= getpagesize();
242 if (qemu_host_page_size
== 0)
243 qemu_host_page_size
= qemu_real_host_page_size
;
244 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
245 qemu_host_page_size
= TARGET_PAGE_SIZE
;
246 qemu_host_page_bits
= 0;
247 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
248 qemu_host_page_bits
++;
249 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
250 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
251 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
253 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
255 long long startaddr
, endaddr
;
260 last_brk
= (unsigned long)sbrk(0);
261 f
= fopen("/proc/self/maps", "r");
264 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
266 startaddr
= MIN(startaddr
,
267 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
268 endaddr
= MIN(endaddr
,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
270 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
271 TARGET_PAGE_ALIGN(endaddr
),
282 static inline PageDesc
*page_find_alloc(target_ulong index
)
286 #if TARGET_LONG_BITS > 32
287 /* Host memory outside guest VM. For 32-bit targets we have already
288 excluded high addresses. */
289 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
* TARGET_PAGE_SIZE
))
292 lp
= &l1_map
[index
>> L2_BITS
];
295 /* allocate if not found */
296 #if defined(CONFIG_USER_ONLY)
298 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
299 /* Don't use qemu_malloc because it may recurse. */
300 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
301 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
304 if (addr
== (target_ulong
)addr
) {
305 page_set_flags(addr
& TARGET_PAGE_MASK
,
306 TARGET_PAGE_ALIGN(addr
+ len
),
310 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
314 return p
+ (index
& (L2_SIZE
- 1));
317 static inline PageDesc
*page_find(target_ulong index
)
321 p
= l1_map
[index
>> L2_BITS
];
324 return p
+ (index
& (L2_SIZE
- 1));
327 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
332 p
= (void **)l1_phys_map
;
333 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
335 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
336 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
338 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
341 /* allocate if not found */
344 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
345 memset(p
, 0, sizeof(void *) * L1_SIZE
);
349 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
353 /* allocate if not found */
356 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
358 for (i
= 0; i
< L2_SIZE
; i
++)
359 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
361 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
364 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
366 return phys_page_find_alloc(index
, 0);
369 #if !defined(CONFIG_USER_ONLY)
370 static void tlb_protect_code(ram_addr_t ram_addr
);
371 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
373 #define mmap_lock() do { } while(0)
374 #define mmap_unlock() do { } while(0)
377 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
379 #if defined(CONFIG_USER_ONLY)
380 /* Currently it is not recommanded to allocate big chunks of data in
381 user mode. It will change when a dedicated libc will be used */
382 #define USE_STATIC_CODE_GEN_BUFFER
385 #ifdef USE_STATIC_CODE_GEN_BUFFER
386 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
389 void code_gen_alloc(unsigned long tb_size
)
391 #ifdef USE_STATIC_CODE_GEN_BUFFER
392 code_gen_buffer
= static_code_gen_buffer
;
393 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
394 map_exec(code_gen_buffer
, code_gen_buffer_size
);
396 code_gen_buffer_size
= tb_size
;
397 if (code_gen_buffer_size
== 0) {
398 #if defined(CONFIG_USER_ONLY)
399 /* in user mode, phys_ram_size is not meaningful */
400 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
402 /* XXX: needs ajustments */
403 code_gen_buffer_size
= (int)(phys_ram_size
/ 4);
406 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
407 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
408 /* The code gen buffer location may have constraints depending on
409 the host cpu and OS */
410 #if defined(__linux__)
413 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
414 #if defined(__x86_64__)
416 /* Cannot map more than that */
417 if (code_gen_buffer_size
> (800 * 1024 * 1024))
418 code_gen_buffer_size
= (800 * 1024 * 1024);
420 code_gen_buffer
= mmap(NULL
, code_gen_buffer_size
,
421 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
423 if (code_gen_buffer
== MAP_FAILED
) {
424 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
429 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
430 if (!code_gen_buffer
) {
431 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
434 map_exec(code_gen_buffer
, code_gen_buffer_size
);
436 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
437 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
438 code_gen_buffer_max_size
= code_gen_buffer_size
-
439 code_gen_max_block_size();
440 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
441 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
444 /* Must be called before using the QEMU cpus. 'tb_size' is the size
445 (in bytes) allocated to the translation buffer. Zero means default
447 void cpu_exec_init_all(unsigned long tb_size
)
450 code_gen_alloc(tb_size
);
451 code_gen_ptr
= code_gen_buffer
;
453 #if !defined(CONFIG_USER_ONLY)
458 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
460 #define CPU_COMMON_SAVE_VERSION 1
462 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
464 CPUState
*env
= opaque
;
466 qemu_put_be32s(f
, &env
->halted
);
467 qemu_put_be32s(f
, &env
->interrupt_request
);
470 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
472 CPUState
*env
= opaque
;
474 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
477 qemu_get_be32s(f
, &env
->halted
);
478 qemu_get_be32s(f
, &env
->interrupt_request
);
485 void cpu_exec_init(CPUState
*env
)
490 env
->next_cpu
= NULL
;
493 while (*penv
!= NULL
) {
494 penv
= (CPUState
**)&(*penv
)->next_cpu
;
497 env
->cpu_index
= cpu_index
;
498 env
->nb_watchpoints
= 0;
500 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
501 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
502 cpu_common_save
, cpu_common_load
, env
);
503 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
504 cpu_save
, cpu_load
, env
);
508 static inline void invalidate_page_bitmap(PageDesc
*p
)
510 if (p
->code_bitmap
) {
511 qemu_free(p
->code_bitmap
);
512 p
->code_bitmap
= NULL
;
514 p
->code_write_count
= 0;
517 /* set to NULL all the 'first_tb' fields in all PageDescs */
518 static void page_flush_tb(void)
523 for(i
= 0; i
< L1_SIZE
; i
++) {
526 for(j
= 0; j
< L2_SIZE
; j
++) {
528 invalidate_page_bitmap(p
);
535 /* flush all the translation blocks */
536 /* XXX: tb_flush is currently not thread safe */
537 void tb_flush(CPUState
*env1
)
540 #if defined(DEBUG_FLUSH)
541 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
542 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
544 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
546 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
547 cpu_abort(env1
, "Internal error: code buffer overflow\n");
551 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
552 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
555 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
558 code_gen_ptr
= code_gen_buffer
;
559 /* XXX: flush processor icache at this point if cache flush is
564 #ifdef DEBUG_TB_CHECK
566 static void tb_invalidate_check(target_ulong address
)
568 TranslationBlock
*tb
;
570 address
&= TARGET_PAGE_MASK
;
571 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
572 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
573 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
574 address
>= tb
->pc
+ tb
->size
)) {
575 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
576 address
, (long)tb
->pc
, tb
->size
);
582 /* verify that all the pages have correct rights for code */
583 static void tb_page_check(void)
585 TranslationBlock
*tb
;
586 int i
, flags1
, flags2
;
588 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
589 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
590 flags1
= page_get_flags(tb
->pc
);
591 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
592 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
593 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
594 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
600 void tb_jmp_check(TranslationBlock
*tb
)
602 TranslationBlock
*tb1
;
605 /* suppress any remaining jumps to this TB */
609 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
612 tb1
= tb1
->jmp_next
[n1
];
614 /* check end of list */
616 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
622 /* invalidate one TB */
623 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
626 TranslationBlock
*tb1
;
630 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
633 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
637 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
639 TranslationBlock
*tb1
;
645 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
647 *ptb
= tb1
->page_next
[n1
];
650 ptb
= &tb1
->page_next
[n1
];
654 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
656 TranslationBlock
*tb1
, **ptb
;
659 ptb
= &tb
->jmp_next
[n
];
662 /* find tb(n) in circular list */
666 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
667 if (n1
== n
&& tb1
== tb
)
670 ptb
= &tb1
->jmp_first
;
672 ptb
= &tb1
->jmp_next
[n1
];
675 /* now we can suppress tb(n) from the list */
676 *ptb
= tb
->jmp_next
[n
];
678 tb
->jmp_next
[n
] = NULL
;
682 /* reset the jump entry 'n' of a TB so that it is not chained to
684 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
686 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
689 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
694 target_phys_addr_t phys_pc
;
695 TranslationBlock
*tb1
, *tb2
;
697 /* remove the TB from the hash list */
698 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
699 h
= tb_phys_hash_func(phys_pc
);
700 tb_remove(&tb_phys_hash
[h
], tb
,
701 offsetof(TranslationBlock
, phys_hash_next
));
703 /* remove the TB from the page list */
704 if (tb
->page_addr
[0] != page_addr
) {
705 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
706 tb_page_remove(&p
->first_tb
, tb
);
707 invalidate_page_bitmap(p
);
709 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
710 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
711 tb_page_remove(&p
->first_tb
, tb
);
712 invalidate_page_bitmap(p
);
715 tb_invalidated_flag
= 1;
717 /* remove the TB from the hash list */
718 h
= tb_jmp_cache_hash_func(tb
->pc
);
719 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
720 if (env
->tb_jmp_cache
[h
] == tb
)
721 env
->tb_jmp_cache
[h
] = NULL
;
724 /* suppress this TB from the two jump lists */
725 tb_jmp_remove(tb
, 0);
726 tb_jmp_remove(tb
, 1);
728 /* suppress any remaining jumps to this TB */
734 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
735 tb2
= tb1
->jmp_next
[n1
];
736 tb_reset_jump(tb1
, n1
);
737 tb1
->jmp_next
[n1
] = NULL
;
740 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
742 tb_phys_invalidate_count
++;
745 static inline void set_bits(uint8_t *tab
, int start
, int len
)
751 mask
= 0xff << (start
& 7);
752 if ((start
& ~7) == (end
& ~7)) {
754 mask
&= ~(0xff << (end
& 7));
759 start
= (start
+ 8) & ~7;
761 while (start
< end1
) {
766 mask
= ~(0xff << (end
& 7));
772 static void build_page_bitmap(PageDesc
*p
)
774 int n
, tb_start
, tb_end
;
775 TranslationBlock
*tb
;
777 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
784 tb
= (TranslationBlock
*)((long)tb
& ~3);
785 /* NOTE: this is subtle as a TB may span two physical pages */
787 /* NOTE: tb_end may be after the end of the page, but
788 it is not a problem */
789 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
790 tb_end
= tb_start
+ tb
->size
;
791 if (tb_end
> TARGET_PAGE_SIZE
)
792 tb_end
= TARGET_PAGE_SIZE
;
795 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
797 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
798 tb
= tb
->page_next
[n
];
802 TranslationBlock
*tb_gen_code(CPUState
*env
,
803 target_ulong pc
, target_ulong cs_base
,
804 int flags
, int cflags
)
806 TranslationBlock
*tb
;
808 target_ulong phys_pc
, phys_page2
, virt_page2
;
811 phys_pc
= get_phys_addr_code(env
, pc
);
814 /* flush must be done */
816 /* cannot fail at this point */
818 /* Don't forget to invalidate previous TB info. */
819 tb_invalidated_flag
= 1;
821 tc_ptr
= code_gen_ptr
;
823 tb
->cs_base
= cs_base
;
826 cpu_gen_code(env
, tb
, &code_gen_size
);
827 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
829 /* check next page if needed */
830 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
832 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
833 phys_page2
= get_phys_addr_code(env
, virt_page2
);
835 tb_link_phys(tb
, phys_pc
, phys_page2
);
839 /* invalidate all TBs which intersect with the target physical page
840 starting in range [start;end[. NOTE: start and end must refer to
841 the same physical page. 'is_cpu_write_access' should be true if called
842 from a real cpu write access: the virtual CPU will exit the current
843 TB if code is modified inside this TB. */
844 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
845 int is_cpu_write_access
)
847 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
848 CPUState
*env
= cpu_single_env
;
850 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
851 target_ulong tb_start
, tb_end
;
852 target_ulong current_pc
, current_cs_base
;
854 p
= page_find(start
>> TARGET_PAGE_BITS
);
857 if (!p
->code_bitmap
&&
858 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
859 is_cpu_write_access
) {
860 /* build code bitmap */
861 build_page_bitmap(p
);
864 /* we remove all the TBs in the range [start, end[ */
865 /* XXX: see if in some cases it could be faster to invalidate all the code */
866 current_tb_not_found
= is_cpu_write_access
;
867 current_tb_modified
= 0;
868 current_tb
= NULL
; /* avoid warning */
869 current_pc
= 0; /* avoid warning */
870 current_cs_base
= 0; /* avoid warning */
871 current_flags
= 0; /* avoid warning */
875 tb
= (TranslationBlock
*)((long)tb
& ~3);
876 tb_next
= tb
->page_next
[n
];
877 /* NOTE: this is subtle as a TB may span two physical pages */
879 /* NOTE: tb_end may be after the end of the page, but
880 it is not a problem */
881 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
882 tb_end
= tb_start
+ tb
->size
;
884 tb_start
= tb
->page_addr
[1];
885 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
887 if (!(tb_end
<= start
|| tb_start
>= end
)) {
888 #ifdef TARGET_HAS_PRECISE_SMC
889 if (current_tb_not_found
) {
890 current_tb_not_found
= 0;
892 if (env
->mem_io_pc
) {
893 /* now we have a real cpu fault */
894 current_tb
= tb_find_pc(env
->mem_io_pc
);
897 if (current_tb
== tb
&&
898 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
899 /* If we are modifying the current TB, we must stop
900 its execution. We could be more precise by checking
901 that the modification is after the current PC, but it
902 would require a specialized function to partially
903 restore the CPU state */
905 current_tb_modified
= 1;
906 cpu_restore_state(current_tb
, env
,
907 env
->mem_io_pc
, NULL
);
908 #if defined(TARGET_I386)
909 current_flags
= env
->hflags
;
910 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
911 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
912 current_pc
= current_cs_base
+ env
->eip
;
914 #error unsupported CPU
917 #endif /* TARGET_HAS_PRECISE_SMC */
918 /* we need to do that to handle the case where a signal
919 occurs while doing tb_phys_invalidate() */
922 saved_tb
= env
->current_tb
;
923 env
->current_tb
= NULL
;
925 tb_phys_invalidate(tb
, -1);
927 env
->current_tb
= saved_tb
;
928 if (env
->interrupt_request
&& env
->current_tb
)
929 cpu_interrupt(env
, env
->interrupt_request
);
934 #if !defined(CONFIG_USER_ONLY)
935 /* if no code remaining, no need to continue to use slow writes */
937 invalidate_page_bitmap(p
);
938 if (is_cpu_write_access
) {
939 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
943 #ifdef TARGET_HAS_PRECISE_SMC
944 if (current_tb_modified
) {
945 /* we generate a block containing just the instruction
946 modifying the memory. It will ensure that it cannot modify
948 env
->current_tb
= NULL
;
949 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
950 cpu_resume_from_signal(env
, NULL
);
955 /* len must be <= 8 and start must be a multiple of len */
956 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
963 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
964 cpu_single_env
->mem_io_vaddr
, len
,
966 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
970 p
= page_find(start
>> TARGET_PAGE_BITS
);
973 if (p
->code_bitmap
) {
974 offset
= start
& ~TARGET_PAGE_MASK
;
975 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
976 if (b
& ((1 << len
) - 1))
980 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
984 #if !defined(CONFIG_SOFTMMU)
985 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
986 unsigned long pc
, void *puc
)
988 int n
, current_flags
, current_tb_modified
;
989 target_ulong current_pc
, current_cs_base
;
991 TranslationBlock
*tb
, *current_tb
;
992 #ifdef TARGET_HAS_PRECISE_SMC
993 CPUState
*env
= cpu_single_env
;
996 addr
&= TARGET_PAGE_MASK
;
997 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1001 current_tb_modified
= 0;
1003 current_pc
= 0; /* avoid warning */
1004 current_cs_base
= 0; /* avoid warning */
1005 current_flags
= 0; /* avoid warning */
1006 #ifdef TARGET_HAS_PRECISE_SMC
1007 if (tb
&& pc
!= 0) {
1008 current_tb
= tb_find_pc(pc
);
1011 while (tb
!= NULL
) {
1013 tb
= (TranslationBlock
*)((long)tb
& ~3);
1014 #ifdef TARGET_HAS_PRECISE_SMC
1015 if (current_tb
== tb
&&
1016 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1017 /* If we are modifying the current TB, we must stop
1018 its execution. We could be more precise by checking
1019 that the modification is after the current PC, but it
1020 would require a specialized function to partially
1021 restore the CPU state */
1023 current_tb_modified
= 1;
1024 cpu_restore_state(current_tb
, env
, pc
, puc
);
1025 #if defined(TARGET_I386)
1026 current_flags
= env
->hflags
;
1027 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
1028 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
1029 current_pc
= current_cs_base
+ env
->eip
;
1031 #error unsupported CPU
1034 #endif /* TARGET_HAS_PRECISE_SMC */
1035 tb_phys_invalidate(tb
, addr
);
1036 tb
= tb
->page_next
[n
];
1039 #ifdef TARGET_HAS_PRECISE_SMC
1040 if (current_tb_modified
) {
1041 /* we generate a block containing just the instruction
1042 modifying the memory. It will ensure that it cannot modify
1044 env
->current_tb
= NULL
;
1045 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1046 cpu_resume_from_signal(env
, puc
);
1052 /* add the tb in the target page and protect it if necessary */
1053 static inline void tb_alloc_page(TranslationBlock
*tb
,
1054 unsigned int n
, target_ulong page_addr
)
1057 TranslationBlock
*last_first_tb
;
1059 tb
->page_addr
[n
] = page_addr
;
1060 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1061 tb
->page_next
[n
] = p
->first_tb
;
1062 last_first_tb
= p
->first_tb
;
1063 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1064 invalidate_page_bitmap(p
);
1066 #if defined(TARGET_HAS_SMC) || 1
1068 #if defined(CONFIG_USER_ONLY)
1069 if (p
->flags
& PAGE_WRITE
) {
1074 /* force the host page as non writable (writes will have a
1075 page fault + mprotect overhead) */
1076 page_addr
&= qemu_host_page_mask
;
1078 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1079 addr
+= TARGET_PAGE_SIZE
) {
1081 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1085 p2
->flags
&= ~PAGE_WRITE
;
1086 page_get_flags(addr
);
1088 mprotect(g2h(page_addr
), qemu_host_page_size
,
1089 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1090 #ifdef DEBUG_TB_INVALIDATE
1091 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1096 /* if some code is already present, then the pages are already
1097 protected. So we handle the case where only the first TB is
1098 allocated in a physical page */
1099 if (!last_first_tb
) {
1100 tlb_protect_code(page_addr
);
1104 #endif /* TARGET_HAS_SMC */
1107 /* Allocate a new translation block. Flush the translation buffer if
1108 too many translation blocks or too much generated code. */
1109 TranslationBlock
*tb_alloc(target_ulong pc
)
1111 TranslationBlock
*tb
;
1113 if (nb_tbs
>= code_gen_max_blocks
||
1114 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1116 tb
= &tbs
[nb_tbs
++];
1122 void tb_free(TranslationBlock
*tb
)
1124 /* In practice this is mostly used for single use temporary TB
1125 Ignore the hard cases and just back up if this TB happens to
1126 be the last one generated. */
1127 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1128 code_gen_ptr
= tb
->tc_ptr
;
1133 /* add a new TB and link it to the physical page tables. phys_page2 is
1134 (-1) to indicate that only one page contains the TB. */
1135 void tb_link_phys(TranslationBlock
*tb
,
1136 target_ulong phys_pc
, target_ulong phys_page2
)
1139 TranslationBlock
**ptb
;
1141 /* Grab the mmap lock to stop another thread invalidating this TB
1142 before we are done. */
1144 /* add in the physical hash table */
1145 h
= tb_phys_hash_func(phys_pc
);
1146 ptb
= &tb_phys_hash
[h
];
1147 tb
->phys_hash_next
= *ptb
;
1150 /* add in the page list */
1151 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1152 if (phys_page2
!= -1)
1153 tb_alloc_page(tb
, 1, phys_page2
);
1155 tb
->page_addr
[1] = -1;
1157 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1158 tb
->jmp_next
[0] = NULL
;
1159 tb
->jmp_next
[1] = NULL
;
1161 /* init original jump addresses */
1162 if (tb
->tb_next_offset
[0] != 0xffff)
1163 tb_reset_jump(tb
, 0);
1164 if (tb
->tb_next_offset
[1] != 0xffff)
1165 tb_reset_jump(tb
, 1);
1167 #ifdef DEBUG_TB_CHECK
1173 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1174 tb[1].tc_ptr. Return NULL if not found */
1175 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1177 int m_min
, m_max
, m
;
1179 TranslationBlock
*tb
;
1183 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1184 tc_ptr
>= (unsigned long)code_gen_ptr
)
1186 /* binary search (cf Knuth) */
1189 while (m_min
<= m_max
) {
1190 m
= (m_min
+ m_max
) >> 1;
1192 v
= (unsigned long)tb
->tc_ptr
;
1195 else if (tc_ptr
< v
) {
1204 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1206 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1208 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1211 tb1
= tb
->jmp_next
[n
];
1213 /* find head of list */
1216 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1219 tb1
= tb1
->jmp_next
[n1
];
1221 /* we are now sure now that tb jumps to tb1 */
1224 /* remove tb from the jmp_first list */
1225 ptb
= &tb_next
->jmp_first
;
1229 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1230 if (n1
== n
&& tb1
== tb
)
1232 ptb
= &tb1
->jmp_next
[n1
];
1234 *ptb
= tb
->jmp_next
[n
];
1235 tb
->jmp_next
[n
] = NULL
;
1237 /* suppress the jump to next tb in generated code */
1238 tb_reset_jump(tb
, n
);
1240 /* suppress jumps in the tb on which we could have jumped */
1241 tb_reset_jump_recursive(tb_next
);
1245 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1247 tb_reset_jump_recursive2(tb
, 0);
1248 tb_reset_jump_recursive2(tb
, 1);
1251 #if defined(TARGET_HAS_ICE)
1252 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1254 target_phys_addr_t addr
;
1256 ram_addr_t ram_addr
;
1259 addr
= cpu_get_phys_page_debug(env
, pc
);
1260 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1262 pd
= IO_MEM_UNASSIGNED
;
1264 pd
= p
->phys_offset
;
1266 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1267 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1271 /* Add a watchpoint. */
1272 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, int type
)
1276 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1277 if (addr
== env
->watchpoint
[i
].vaddr
)
1280 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1283 i
= env
->nb_watchpoints
++;
1284 env
->watchpoint
[i
].vaddr
= addr
;
1285 env
->watchpoint
[i
].type
= type
;
1286 tlb_flush_page(env
, addr
);
1287 /* FIXME: This flush is needed because of the hack to make memory ops
1288 terminate the TB. It can be removed once the proper IO trap and
1289 re-execute bits are in. */
1294 /* Remove a watchpoint. */
1295 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1299 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1300 if (addr
== env
->watchpoint
[i
].vaddr
) {
1301 env
->nb_watchpoints
--;
1302 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1303 tlb_flush_page(env
, addr
);
1310 /* Remove all watchpoints. */
1311 void cpu_watchpoint_remove_all(CPUState
*env
) {
1314 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1315 tlb_flush_page(env
, env
->watchpoint
[i
].vaddr
);
1317 env
->nb_watchpoints
= 0;
1320 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1321 breakpoint is reached */
1322 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1324 #if defined(TARGET_HAS_ICE)
1327 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1328 if (env
->breakpoints
[i
] == pc
)
1332 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1334 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1336 breakpoint_invalidate(env
, pc
);
1343 /* remove all breakpoints */
1344 void cpu_breakpoint_remove_all(CPUState
*env
) {
1345 #if defined(TARGET_HAS_ICE)
1347 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1348 breakpoint_invalidate(env
, env
->breakpoints
[i
]);
1350 env
->nb_breakpoints
= 0;
1354 /* remove a breakpoint */
1355 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1357 #if defined(TARGET_HAS_ICE)
1359 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1360 if (env
->breakpoints
[i
] == pc
)
1365 env
->nb_breakpoints
--;
1366 if (i
< env
->nb_breakpoints
)
1367 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1369 breakpoint_invalidate(env
, pc
);
1376 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1377 CPU loop after each instruction */
1378 void cpu_single_step(CPUState
*env
, int enabled
)
1380 #if defined(TARGET_HAS_ICE)
1381 if (env
->singlestep_enabled
!= enabled
) {
1382 env
->singlestep_enabled
= enabled
;
1383 /* must flush all the translated code to avoid inconsistancies */
1384 /* XXX: only flush what is necessary */
1390 /* enable or disable low levels log */
1391 void cpu_set_log(int log_flags
)
1393 loglevel
= log_flags
;
1394 if (loglevel
&& !logfile
) {
1395 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1397 perror(logfilename
);
1400 #if !defined(CONFIG_SOFTMMU)
1401 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1403 static uint8_t logfile_buf
[4096];
1404 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1407 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1411 if (!loglevel
&& logfile
) {
1417 void cpu_set_log_filename(const char *filename
)
1419 logfilename
= strdup(filename
);
1424 cpu_set_log(loglevel
);
1427 /* mask must never be zero, except for A20 change call */
1428 void cpu_interrupt(CPUState
*env
, int mask
)
1430 #if !defined(USE_NPTL)
1431 TranslationBlock
*tb
;
1432 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1436 old_mask
= env
->interrupt_request
;
1437 /* FIXME: This is probably not threadsafe. A different thread could
1438 be in the middle of a read-modify-write operation. */
1439 env
->interrupt_request
|= mask
;
1440 #if defined(USE_NPTL)
1441 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1442 problem and hope the cpu will stop of its own accord. For userspace
1443 emulation this often isn't actually as bad as it sounds. Often
1444 signals are used primarily to interrupt blocking syscalls. */
1447 env
->icount_decr
.u16
.high
= 0xffff;
1448 #ifndef CONFIG_USER_ONLY
1449 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1450 an async event happened and we need to process it. */
1452 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1453 cpu_abort(env
, "Raised interrupt while not in I/O function");
1457 tb
= env
->current_tb
;
1458 /* if the cpu is currently executing code, we must unlink it and
1459 all the potentially executing TB */
1460 if (tb
&& !testandset(&interrupt_lock
)) {
1461 env
->current_tb
= NULL
;
1462 tb_reset_jump_recursive(tb
);
1463 resetlock(&interrupt_lock
);
1469 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1471 env
->interrupt_request
&= ~mask
;
1474 CPULogItem cpu_log_items
[] = {
1475 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1476 "show generated host assembly code for each compiled TB" },
1477 { CPU_LOG_TB_IN_ASM
, "in_asm",
1478 "show target assembly code for each compiled TB" },
1479 { CPU_LOG_TB_OP
, "op",
1480 "show micro ops for each compiled TB" },
1481 { CPU_LOG_TB_OP_OPT
, "op_opt",
1484 "before eflags optimization and "
1486 "after liveness analysis" },
1487 { CPU_LOG_INT
, "int",
1488 "show interrupts/exceptions in short format" },
1489 { CPU_LOG_EXEC
, "exec",
1490 "show trace before each executed TB (lots of logs)" },
1491 { CPU_LOG_TB_CPU
, "cpu",
1492 "show CPU state before block translation" },
1494 { CPU_LOG_PCALL
, "pcall",
1495 "show protected mode far calls/returns/exceptions" },
1498 { CPU_LOG_IOPORT
, "ioport",
1499 "show all i/o ports accesses" },
1504 static int cmp1(const char *s1
, int n
, const char *s2
)
1506 if (strlen(s2
) != n
)
1508 return memcmp(s1
, s2
, n
) == 0;
1511 /* takes a comma separated list of log masks. Return 0 if error. */
1512 int cpu_str_to_log_mask(const char *str
)
1521 p1
= strchr(p
, ',');
1524 if(cmp1(p
,p1
-p
,"all")) {
1525 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1529 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1530 if (cmp1(p
, p1
- p
, item
->name
))
1544 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1551 fprintf(stderr
, "qemu: fatal: ");
1552 vfprintf(stderr
, fmt
, ap
);
1553 fprintf(stderr
, "\n");
1555 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1557 cpu_dump_state(env
, stderr
, fprintf
, 0);
1560 fprintf(logfile
, "qemu: fatal: ");
1561 vfprintf(logfile
, fmt
, ap2
);
1562 fprintf(logfile
, "\n");
1564 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1566 cpu_dump_state(env
, logfile
, fprintf
, 0);
1576 CPUState
*cpu_copy(CPUState
*env
)
1578 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1579 /* preserve chaining and index */
1580 CPUState
*next_cpu
= new_env
->next_cpu
;
1581 int cpu_index
= new_env
->cpu_index
;
1582 memcpy(new_env
, env
, sizeof(CPUState
));
1583 new_env
->next_cpu
= next_cpu
;
1584 new_env
->cpu_index
= cpu_index
;
1588 #if !defined(CONFIG_USER_ONLY)
1590 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1594 /* Discard jump cache entries for any tb which might potentially
1595 overlap the flushed page. */
1596 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1597 memset (&env
->tb_jmp_cache
[i
], 0,
1598 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1600 i
= tb_jmp_cache_hash_page(addr
);
1601 memset (&env
->tb_jmp_cache
[i
], 0,
1602 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1605 /* NOTE: if flush_global is true, also flush global entries (not
1607 void tlb_flush(CPUState
*env
, int flush_global
)
1611 #if defined(DEBUG_TLB)
1612 printf("tlb_flush:\n");
1614 /* must reset current TB so that interrupts cannot modify the
1615 links while we are modifying them */
1616 env
->current_tb
= NULL
;
1618 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1619 env
->tlb_table
[0][i
].addr_read
= -1;
1620 env
->tlb_table
[0][i
].addr_write
= -1;
1621 env
->tlb_table
[0][i
].addr_code
= -1;
1622 env
->tlb_table
[1][i
].addr_read
= -1;
1623 env
->tlb_table
[1][i
].addr_write
= -1;
1624 env
->tlb_table
[1][i
].addr_code
= -1;
1625 #if (NB_MMU_MODES >= 3)
1626 env
->tlb_table
[2][i
].addr_read
= -1;
1627 env
->tlb_table
[2][i
].addr_write
= -1;
1628 env
->tlb_table
[2][i
].addr_code
= -1;
1629 #if (NB_MMU_MODES == 4)
1630 env
->tlb_table
[3][i
].addr_read
= -1;
1631 env
->tlb_table
[3][i
].addr_write
= -1;
1632 env
->tlb_table
[3][i
].addr_code
= -1;
1637 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1640 if (env
->kqemu_enabled
) {
1641 kqemu_flush(env
, flush_global
);
1647 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1649 if (addr
== (tlb_entry
->addr_read
&
1650 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1651 addr
== (tlb_entry
->addr_write
&
1652 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1653 addr
== (tlb_entry
->addr_code
&
1654 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1655 tlb_entry
->addr_read
= -1;
1656 tlb_entry
->addr_write
= -1;
1657 tlb_entry
->addr_code
= -1;
1661 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1665 #if defined(DEBUG_TLB)
1666 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1668 /* must reset current TB so that interrupts cannot modify the
1669 links while we are modifying them */
1670 env
->current_tb
= NULL
;
1672 addr
&= TARGET_PAGE_MASK
;
1673 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1674 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1675 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1676 #if (NB_MMU_MODES >= 3)
1677 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1678 #if (NB_MMU_MODES == 4)
1679 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1683 tlb_flush_jmp_cache(env
, addr
);
1686 if (env
->kqemu_enabled
) {
1687 kqemu_flush_page(env
, addr
);
1692 /* update the TLBs so that writes to code in the virtual page 'addr'
1694 static void tlb_protect_code(ram_addr_t ram_addr
)
1696 cpu_physical_memory_reset_dirty(ram_addr
,
1697 ram_addr
+ TARGET_PAGE_SIZE
,
1701 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1702 tested for self modifying code */
1703 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1706 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1709 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1710 unsigned long start
, unsigned long length
)
1713 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1714 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1715 if ((addr
- start
) < length
) {
1716 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1721 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1725 unsigned long length
, start1
;
1729 start
&= TARGET_PAGE_MASK
;
1730 end
= TARGET_PAGE_ALIGN(end
);
1732 length
= end
- start
;
1735 len
= length
>> TARGET_PAGE_BITS
;
1737 /* XXX: should not depend on cpu context */
1739 if (env
->kqemu_enabled
) {
1742 for(i
= 0; i
< len
; i
++) {
1743 kqemu_set_notdirty(env
, addr
);
1744 addr
+= TARGET_PAGE_SIZE
;
1748 mask
= ~dirty_flags
;
1749 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1750 for(i
= 0; i
< len
; i
++)
1753 /* we modify the TLB cache so that the dirty bit will be set again
1754 when accessing the range */
1755 start1
= start
+ (unsigned long)phys_ram_base
;
1756 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1757 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1758 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1759 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1760 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1761 #if (NB_MMU_MODES >= 3)
1762 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1763 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1764 #if (NB_MMU_MODES == 4)
1765 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1766 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1772 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1774 ram_addr_t ram_addr
;
1776 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1777 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1778 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1779 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1780 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1785 /* update the TLB according to the current state of the dirty bits */
1786 void cpu_tlb_update_dirty(CPUState
*env
)
1789 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1790 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1791 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1792 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1793 #if (NB_MMU_MODES >= 3)
1794 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1795 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1796 #if (NB_MMU_MODES == 4)
1797 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1798 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1803 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1805 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1806 tlb_entry
->addr_write
= vaddr
;
1809 /* update the TLB corresponding to virtual page vaddr
1810 so that it is no longer dirty */
1811 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1815 vaddr
&= TARGET_PAGE_MASK
;
1816 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1817 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1818 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1819 #if (NB_MMU_MODES >= 3)
1820 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1821 #if (NB_MMU_MODES == 4)
1822 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1827 /* add a new TLB entry. At most one entry for a given virtual address
1828 is permitted. Return 0 if OK or 2 if the page could not be mapped
1829 (can only happen in non SOFTMMU mode for I/O pages or pages
1830 conflicting with the host address space). */
1831 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1832 target_phys_addr_t paddr
, int prot
,
1833 int mmu_idx
, int is_softmmu
)
1838 target_ulong address
;
1839 target_ulong code_address
;
1840 target_phys_addr_t addend
;
1844 target_phys_addr_t iotlb
;
1846 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1848 pd
= IO_MEM_UNASSIGNED
;
1850 pd
= p
->phys_offset
;
1852 #if defined(DEBUG_TLB)
1853 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1854 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1859 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1860 /* IO memory case (romd handled later) */
1861 address
|= TLB_MMIO
;
1863 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1864 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1866 iotlb
= pd
& TARGET_PAGE_MASK
;
1867 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1868 iotlb
|= IO_MEM_NOTDIRTY
;
1870 iotlb
|= IO_MEM_ROM
;
1872 /* IO handlers are currently passed a phsical address.
1873 It would be nice to pass an offset from the base address
1874 of that region. This would avoid having to special case RAM,
1875 and avoid full address decoding in every device.
1876 We can't use the high bits of pd for this because
1877 IO_MEM_ROMD uses these as a ram address. */
1878 iotlb
= (pd
& ~TARGET_PAGE_MASK
) + paddr
;
1881 code_address
= address
;
1882 /* Make accesses to pages with watchpoints go via the
1883 watchpoint trap routines. */
1884 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1885 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1886 iotlb
= io_mem_watch
+ paddr
;
1887 /* TODO: The memory case can be optimized by not trapping
1888 reads of pages with a write breakpoint. */
1889 address
|= TLB_MMIO
;
1893 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1894 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
1895 te
= &env
->tlb_table
[mmu_idx
][index
];
1896 te
->addend
= addend
- vaddr
;
1897 if (prot
& PAGE_READ
) {
1898 te
->addr_read
= address
;
1903 if (prot
& PAGE_EXEC
) {
1904 te
->addr_code
= code_address
;
1908 if (prot
& PAGE_WRITE
) {
1909 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1910 (pd
& IO_MEM_ROMD
)) {
1911 /* Write access calls the I/O callback. */
1912 te
->addr_write
= address
| TLB_MMIO
;
1913 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1914 !cpu_physical_memory_is_dirty(pd
)) {
1915 te
->addr_write
= address
| TLB_NOTDIRTY
;
1917 te
->addr_write
= address
;
1920 te
->addr_write
= -1;
1927 void tlb_flush(CPUState
*env
, int flush_global
)
1931 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1935 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1936 target_phys_addr_t paddr
, int prot
,
1937 int mmu_idx
, int is_softmmu
)
1942 /* dump memory mappings */
1943 void page_dump(FILE *f
)
1945 unsigned long start
, end
;
1946 int i
, j
, prot
, prot1
;
1949 fprintf(f
, "%-8s %-8s %-8s %s\n",
1950 "start", "end", "size", "prot");
1954 for(i
= 0; i
<= L1_SIZE
; i
++) {
1959 for(j
= 0;j
< L2_SIZE
; j
++) {
1964 if (prot1
!= prot
) {
1965 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1967 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1968 start
, end
, end
- start
,
1969 prot
& PAGE_READ
? 'r' : '-',
1970 prot
& PAGE_WRITE
? 'w' : '-',
1971 prot
& PAGE_EXEC
? 'x' : '-');
1985 int page_get_flags(target_ulong address
)
1989 p
= page_find(address
>> TARGET_PAGE_BITS
);
1995 /* modify the flags of a page and invalidate the code if
1996 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1997 depending on PAGE_WRITE */
1998 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2003 /* mmap_lock should already be held. */
2004 start
= start
& TARGET_PAGE_MASK
;
2005 end
= TARGET_PAGE_ALIGN(end
);
2006 if (flags
& PAGE_WRITE
)
2007 flags
|= PAGE_WRITE_ORG
;
2008 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2009 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2010 /* We may be called for host regions that are outside guest
2014 /* if the write protection is set, then we invalidate the code
2016 if (!(p
->flags
& PAGE_WRITE
) &&
2017 (flags
& PAGE_WRITE
) &&
2019 tb_invalidate_phys_page(addr
, 0, NULL
);
2025 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2031 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2032 start
= start
& TARGET_PAGE_MASK
;
2035 /* we've wrapped around */
2037 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2038 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2041 if( !(p
->flags
& PAGE_VALID
) )
2044 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2046 if (flags
& PAGE_WRITE
) {
2047 if (!(p
->flags
& PAGE_WRITE_ORG
))
2049 /* unprotect the page if it was put read-only because it
2050 contains translated code */
2051 if (!(p
->flags
& PAGE_WRITE
)) {
2052 if (!page_unprotect(addr
, 0, NULL
))
2061 /* called from signal handler: invalidate the code and unprotect the
2062 page. Return TRUE if the fault was succesfully handled. */
2063 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2065 unsigned int page_index
, prot
, pindex
;
2067 target_ulong host_start
, host_end
, addr
;
2069 /* Technically this isn't safe inside a signal handler. However we
2070 know this only ever happens in a synchronous SEGV handler, so in
2071 practice it seems to be ok. */
2074 host_start
= address
& qemu_host_page_mask
;
2075 page_index
= host_start
>> TARGET_PAGE_BITS
;
2076 p1
= page_find(page_index
);
2081 host_end
= host_start
+ qemu_host_page_size
;
2084 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2088 /* if the page was really writable, then we change its
2089 protection back to writable */
2090 if (prot
& PAGE_WRITE_ORG
) {
2091 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2092 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2093 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2094 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2095 p1
[pindex
].flags
|= PAGE_WRITE
;
2096 /* and since the content will be modified, we must invalidate
2097 the corresponding translated code. */
2098 tb_invalidate_phys_page(address
, pc
, puc
);
2099 #ifdef DEBUG_TB_CHECK
2100 tb_invalidate_check(address
);
2110 static inline void tlb_set_dirty(CPUState
*env
,
2111 unsigned long addr
, target_ulong vaddr
)
2114 #endif /* defined(CONFIG_USER_ONLY) */
2116 #if !defined(CONFIG_USER_ONLY)
2117 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2119 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2120 ram_addr_t orig_memory
);
2121 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2124 if (addr > start_addr) \
2127 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2128 if (start_addr2 > 0) \
2132 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2133 end_addr2 = TARGET_PAGE_SIZE - 1; \
2135 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2136 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2141 /* register physical memory. 'size' must be a multiple of the target
2142 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2144 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2146 ram_addr_t phys_offset
)
2148 target_phys_addr_t addr
, end_addr
;
2151 ram_addr_t orig_size
= size
;
2155 /* XXX: should not depend on cpu context */
2157 if (env
->kqemu_enabled
) {
2158 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2161 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2162 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2163 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2164 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2165 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2166 ram_addr_t orig_memory
= p
->phys_offset
;
2167 target_phys_addr_t start_addr2
, end_addr2
;
2168 int need_subpage
= 0;
2170 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2172 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2173 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2174 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2175 &p
->phys_offset
, orig_memory
);
2177 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2180 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2182 p
->phys_offset
= phys_offset
;
2183 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2184 (phys_offset
& IO_MEM_ROMD
))
2185 phys_offset
+= TARGET_PAGE_SIZE
;
2188 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2189 p
->phys_offset
= phys_offset
;
2190 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2191 (phys_offset
& IO_MEM_ROMD
))
2192 phys_offset
+= TARGET_PAGE_SIZE
;
2194 target_phys_addr_t start_addr2
, end_addr2
;
2195 int need_subpage
= 0;
2197 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2198 end_addr2
, need_subpage
);
2200 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2201 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2202 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2203 subpage_register(subpage
, start_addr2
, end_addr2
,
2210 /* since each CPU stores ram addresses in its TLB cache, we must
2211 reset the modified entries */
2213 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2218 /* XXX: temporary until new memory mapping API */
2219 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2223 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2225 return IO_MEM_UNASSIGNED
;
2226 return p
->phys_offset
;
2229 /* XXX: better than nothing */
2230 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2233 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2234 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
"\n",
2235 (uint64_t)size
, (uint64_t)phys_ram_size
);
2238 addr
= phys_ram_alloc_offset
;
2239 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2243 void qemu_ram_free(ram_addr_t addr
)
2247 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2249 #ifdef DEBUG_UNASSIGNED
2250 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2253 do_unassigned_access(addr
, 0, 0, 0);
2255 do_unassigned_access(addr
, 0, 0, 0);
2260 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2262 #ifdef DEBUG_UNASSIGNED
2263 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2266 do_unassigned_access(addr
, 1, 0, 0);
2268 do_unassigned_access(addr
, 1, 0, 0);
2272 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2273 unassigned_mem_readb
,
2274 unassigned_mem_readb
,
2275 unassigned_mem_readb
,
2278 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2279 unassigned_mem_writeb
,
2280 unassigned_mem_writeb
,
2281 unassigned_mem_writeb
,
2284 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2288 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2289 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2290 #if !defined(CONFIG_USER_ONLY)
2291 tb_invalidate_phys_page_fast(ram_addr
, 1);
2292 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2295 stb_p(phys_ram_base
+ ram_addr
, val
);
2297 if (cpu_single_env
->kqemu_enabled
&&
2298 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2299 kqemu_modify_page(cpu_single_env
, ram_addr
);
2301 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2302 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2303 /* we remove the notdirty callback only if the code has been
2305 if (dirty_flags
== 0xff)
2306 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2309 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2313 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2314 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2315 #if !defined(CONFIG_USER_ONLY)
2316 tb_invalidate_phys_page_fast(ram_addr
, 2);
2317 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2320 stw_p(phys_ram_base
+ ram_addr
, val
);
2322 if (cpu_single_env
->kqemu_enabled
&&
2323 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2324 kqemu_modify_page(cpu_single_env
, ram_addr
);
2326 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2327 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2328 /* we remove the notdirty callback only if the code has been
2330 if (dirty_flags
== 0xff)
2331 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2334 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2338 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2339 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2340 #if !defined(CONFIG_USER_ONLY)
2341 tb_invalidate_phys_page_fast(ram_addr
, 4);
2342 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2345 stl_p(phys_ram_base
+ ram_addr
, val
);
2347 if (cpu_single_env
->kqemu_enabled
&&
2348 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2349 kqemu_modify_page(cpu_single_env
, ram_addr
);
2351 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2352 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2353 /* we remove the notdirty callback only if the code has been
2355 if (dirty_flags
== 0xff)
2356 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2359 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2360 NULL
, /* never used */
2361 NULL
, /* never used */
2362 NULL
, /* never used */
2365 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2366 notdirty_mem_writeb
,
2367 notdirty_mem_writew
,
2368 notdirty_mem_writel
,
2371 /* Generate a debug exception if a watchpoint has been hit. */
2372 static void check_watchpoint(int offset
, int flags
)
2374 CPUState
*env
= cpu_single_env
;
2378 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2379 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2380 if (vaddr
== env
->watchpoint
[i
].vaddr
2381 && (env
->watchpoint
[i
].type
& flags
)) {
2382 env
->watchpoint_hit
= i
+ 1;
2383 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2389 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2390 so these check for a hit then pass through to the normal out-of-line
2392 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2394 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2395 return ldub_phys(addr
);
2398 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2400 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2401 return lduw_phys(addr
);
2404 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2406 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2407 return ldl_phys(addr
);
2410 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2413 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2414 stb_phys(addr
, val
);
2417 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2420 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2421 stw_phys(addr
, val
);
2424 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2427 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2428 stl_phys(addr
, val
);
2431 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2437 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2443 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2449 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2450 #if defined(DEBUG_SUBPAGE)
2451 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2452 mmio
, len
, addr
, idx
);
2454 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2459 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2460 uint32_t value
, unsigned int len
)
2464 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2465 #if defined(DEBUG_SUBPAGE)
2466 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2467 mmio
, len
, addr
, idx
, value
);
2469 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2472 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2474 #if defined(DEBUG_SUBPAGE)
2475 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2478 return subpage_readlen(opaque
, addr
, 0);
2481 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2484 #if defined(DEBUG_SUBPAGE)
2485 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2487 subpage_writelen(opaque
, addr
, value
, 0);
2490 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2492 #if defined(DEBUG_SUBPAGE)
2493 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2496 return subpage_readlen(opaque
, addr
, 1);
2499 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2502 #if defined(DEBUG_SUBPAGE)
2503 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2505 subpage_writelen(opaque
, addr
, value
, 1);
2508 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2510 #if defined(DEBUG_SUBPAGE)
2511 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2514 return subpage_readlen(opaque
, addr
, 2);
2517 static void subpage_writel (void *opaque
,
2518 target_phys_addr_t addr
, uint32_t value
)
2520 #if defined(DEBUG_SUBPAGE)
2521 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2523 subpage_writelen(opaque
, addr
, value
, 2);
2526 static CPUReadMemoryFunc
*subpage_read
[] = {
2532 static CPUWriteMemoryFunc
*subpage_write
[] = {
2538 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2544 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2546 idx
= SUBPAGE_IDX(start
);
2547 eidx
= SUBPAGE_IDX(end
);
2548 #if defined(DEBUG_SUBPAGE)
2549 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2550 mmio
, start
, end
, idx
, eidx
, memory
);
2552 memory
>>= IO_MEM_SHIFT
;
2553 for (; idx
<= eidx
; idx
++) {
2554 for (i
= 0; i
< 4; i
++) {
2555 if (io_mem_read
[memory
][i
]) {
2556 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2557 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2559 if (io_mem_write
[memory
][i
]) {
2560 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2561 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2569 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2570 ram_addr_t orig_memory
)
2575 mmio
= qemu_mallocz(sizeof(subpage_t
));
2578 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2579 #if defined(DEBUG_SUBPAGE)
2580 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2581 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2583 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2584 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2590 static void io_mem_init(void)
2592 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2593 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2594 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2597 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2598 watch_mem_write
, NULL
);
2599 /* alloc dirty bits array */
2600 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2601 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2604 /* mem_read and mem_write are arrays of functions containing the
2605 function to access byte (index 0), word (index 1) and dword (index
2606 2). Functions can be omitted with a NULL function pointer. The
2607 registered functions may be modified dynamically later.
2608 If io_index is non zero, the corresponding io zone is
2609 modified. If it is zero, a new io zone is allocated. The return
2610 value can be used with cpu_register_physical_memory(). (-1) is
2611 returned if error. */
2612 int cpu_register_io_memory(int io_index
,
2613 CPUReadMemoryFunc
**mem_read
,
2614 CPUWriteMemoryFunc
**mem_write
,
2617 int i
, subwidth
= 0;
2619 if (io_index
<= 0) {
2620 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2622 io_index
= io_mem_nb
++;
2624 if (io_index
>= IO_MEM_NB_ENTRIES
)
2628 for(i
= 0;i
< 3; i
++) {
2629 if (!mem_read
[i
] || !mem_write
[i
])
2630 subwidth
= IO_MEM_SUBWIDTH
;
2631 io_mem_read
[io_index
][i
] = mem_read
[i
];
2632 io_mem_write
[io_index
][i
] = mem_write
[i
];
2634 io_mem_opaque
[io_index
] = opaque
;
2635 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2638 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2640 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2643 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2645 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2648 #endif /* !defined(CONFIG_USER_ONLY) */
2650 /* physical memory access (slow version, mainly for debug) */
2651 #if defined(CONFIG_USER_ONLY)
2652 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2653 int len
, int is_write
)
2660 page
= addr
& TARGET_PAGE_MASK
;
2661 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2664 flags
= page_get_flags(page
);
2665 if (!(flags
& PAGE_VALID
))
2668 if (!(flags
& PAGE_WRITE
))
2670 /* XXX: this code should not depend on lock_user */
2671 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2672 /* FIXME - should this return an error rather than just fail? */
2675 unlock_user(p
, addr
, l
);
2677 if (!(flags
& PAGE_READ
))
2679 /* XXX: this code should not depend on lock_user */
2680 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2681 /* FIXME - should this return an error rather than just fail? */
2684 unlock_user(p
, addr
, 0);
2693 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2694 int len
, int is_write
)
2699 target_phys_addr_t page
;
2704 page
= addr
& TARGET_PAGE_MASK
;
2705 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2708 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2710 pd
= IO_MEM_UNASSIGNED
;
2712 pd
= p
->phys_offset
;
2716 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2717 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2718 /* XXX: could force cpu_single_env to NULL to avoid
2720 if (l
>= 4 && ((addr
& 3) == 0)) {
2721 /* 32 bit write access */
2723 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2725 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2726 /* 16 bit write access */
2728 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2731 /* 8 bit write access */
2733 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2737 unsigned long addr1
;
2738 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2740 ptr
= phys_ram_base
+ addr1
;
2741 memcpy(ptr
, buf
, l
);
2742 if (!cpu_physical_memory_is_dirty(addr1
)) {
2743 /* invalidate code */
2744 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2746 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2747 (0xff & ~CODE_DIRTY_FLAG
);
2751 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2752 !(pd
& IO_MEM_ROMD
)) {
2754 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2755 if (l
>= 4 && ((addr
& 3) == 0)) {
2756 /* 32 bit read access */
2757 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2760 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2761 /* 16 bit read access */
2762 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2766 /* 8 bit read access */
2767 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2773 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2774 (addr
& ~TARGET_PAGE_MASK
);
2775 memcpy(buf
, ptr
, l
);
2784 /* used for ROM loading : can write in RAM and ROM */
2785 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2786 const uint8_t *buf
, int len
)
2790 target_phys_addr_t page
;
2795 page
= addr
& TARGET_PAGE_MASK
;
2796 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2799 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2801 pd
= IO_MEM_UNASSIGNED
;
2803 pd
= p
->phys_offset
;
2806 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2807 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2808 !(pd
& IO_MEM_ROMD
)) {
2811 unsigned long addr1
;
2812 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2814 ptr
= phys_ram_base
+ addr1
;
2815 memcpy(ptr
, buf
, l
);
2824 /* warning: addr must be aligned */
2825 uint32_t ldl_phys(target_phys_addr_t addr
)
2833 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2835 pd
= IO_MEM_UNASSIGNED
;
2837 pd
= p
->phys_offset
;
2840 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2841 !(pd
& IO_MEM_ROMD
)) {
2843 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2844 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2847 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2848 (addr
& ~TARGET_PAGE_MASK
);
2854 /* warning: addr must be aligned */
2855 uint64_t ldq_phys(target_phys_addr_t addr
)
2863 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2865 pd
= IO_MEM_UNASSIGNED
;
2867 pd
= p
->phys_offset
;
2870 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2871 !(pd
& IO_MEM_ROMD
)) {
2873 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2874 #ifdef TARGET_WORDS_BIGENDIAN
2875 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2876 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2878 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2879 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2883 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2884 (addr
& ~TARGET_PAGE_MASK
);
2891 uint32_t ldub_phys(target_phys_addr_t addr
)
2894 cpu_physical_memory_read(addr
, &val
, 1);
2899 uint32_t lduw_phys(target_phys_addr_t addr
)
2902 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2903 return tswap16(val
);
2906 /* warning: addr must be aligned. The ram page is not masked as dirty
2907 and the code inside is not invalidated. It is useful if the dirty
2908 bits are used to track modified PTEs */
2909 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2916 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2918 pd
= IO_MEM_UNASSIGNED
;
2920 pd
= p
->phys_offset
;
2923 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2924 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2925 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2927 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2928 (addr
& ~TARGET_PAGE_MASK
);
2933 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2940 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2942 pd
= IO_MEM_UNASSIGNED
;
2944 pd
= p
->phys_offset
;
2947 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2948 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2949 #ifdef TARGET_WORDS_BIGENDIAN
2950 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2951 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2953 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2954 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2957 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2958 (addr
& ~TARGET_PAGE_MASK
);
2963 /* warning: addr must be aligned */
2964 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2971 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2973 pd
= IO_MEM_UNASSIGNED
;
2975 pd
= p
->phys_offset
;
2978 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2979 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2980 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2982 unsigned long addr1
;
2983 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2985 ptr
= phys_ram_base
+ addr1
;
2987 if (!cpu_physical_memory_is_dirty(addr1
)) {
2988 /* invalidate code */
2989 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2991 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2992 (0xff & ~CODE_DIRTY_FLAG
);
2998 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3001 cpu_physical_memory_write(addr
, &v
, 1);
3005 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3007 uint16_t v
= tswap16(val
);
3008 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3012 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3015 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3020 /* virtual memory access for debug */
3021 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3022 uint8_t *buf
, int len
, int is_write
)
3025 target_phys_addr_t phys_addr
;
3029 page
= addr
& TARGET_PAGE_MASK
;
3030 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3031 /* if no physical page mapped, return an error */
3032 if (phys_addr
== -1)
3034 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3037 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3046 /* in deterministic execution mode, instructions doing device I/Os
3047 must be at the end of the TB */
3048 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3050 TranslationBlock
*tb
;
3052 target_ulong pc
, cs_base
;
3055 tb
= tb_find_pc((unsigned long)retaddr
);
3057 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3060 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3061 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3062 /* Calculate how many instructions had been executed before the fault
3064 n
= n
- env
->icount_decr
.u16
.low
;
3065 /* Generate a new TB ending on the I/O insn. */
3067 /* On MIPS and SH, delay slot instructions can only be restarted if
3068 they were already the first instruction in the TB. If this is not
3069 the first instruction in a TB then re-execute the preceding
3071 #if defined(TARGET_MIPS)
3072 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3073 env
->active_tc
.PC
-= 4;
3074 env
->icount_decr
.u16
.low
++;
3075 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3077 #elif defined(TARGET_SH4)
3078 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3081 env
->icount_decr
.u16
.low
++;
3082 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3085 /* This should never happen. */
3086 if (n
> CF_COUNT_MASK
)
3087 cpu_abort(env
, "TB too big during recompile");
3089 cflags
= n
| CF_LAST_IO
;
3091 cs_base
= tb
->cs_base
;
3093 tb_phys_invalidate(tb
, -1);
3094 /* FIXME: In theory this could raise an exception. In practice
3095 we have already translated the block once so it's probably ok. */
3096 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3097 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3098 the first in the TB) then we end up generating a whole new TB and
3099 repeating the fault, which is horribly inefficient.
3100 Better would be to execute just this insn uncached, or generate a
3102 cpu_resume_from_signal(env
, NULL
);
3105 void dump_exec_info(FILE *f
,
3106 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3108 int i
, target_code_size
, max_target_code_size
;
3109 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3110 TranslationBlock
*tb
;
3112 target_code_size
= 0;
3113 max_target_code_size
= 0;
3115 direct_jmp_count
= 0;
3116 direct_jmp2_count
= 0;
3117 for(i
= 0; i
< nb_tbs
; i
++) {
3119 target_code_size
+= tb
->size
;
3120 if (tb
->size
> max_target_code_size
)
3121 max_target_code_size
= tb
->size
;
3122 if (tb
->page_addr
[1] != -1)
3124 if (tb
->tb_next_offset
[0] != 0xffff) {
3126 if (tb
->tb_next_offset
[1] != 0xffff) {
3127 direct_jmp2_count
++;
3131 /* XXX: avoid using doubles ? */
3132 cpu_fprintf(f
, "Translation buffer state:\n");
3133 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3134 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3135 cpu_fprintf(f
, "TB count %d/%d\n",
3136 nb_tbs
, code_gen_max_blocks
);
3137 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3138 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3139 max_target_code_size
);
3140 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3141 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3142 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3143 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3145 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3146 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3148 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3150 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3151 cpu_fprintf(f
, "\nStatistics:\n");
3152 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3153 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3154 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3155 tcg_dump_info(f
, cpu_fprintf
);
3158 #if !defined(CONFIG_USER_ONLY)
3160 #define MMUSUFFIX _cmmu
3161 #define GETPC() NULL
3162 #define env cpu_single_env
3163 #define SOFTMMU_CODE_ACCESS
3166 #include "softmmu_template.h"
3169 #include "softmmu_template.h"
3172 #include "softmmu_template.h"
3175 #include "softmmu_template.h"