2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
43 #if defined(CONFIG_USER_ONLY)
47 //#define DEBUG_TB_INVALIDATE
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 static TranslationBlock
*tbs
;
88 int code_gen_max_blocks
;
89 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
102 #define code_gen_section \
103 __attribute__((aligned (32)))
106 uint8_t code_gen_prologue
[1024] code_gen_section
;
107 static uint8_t *code_gen_buffer
;
108 static unsigned long code_gen_buffer_size
;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size
;
111 uint8_t *code_gen_ptr
;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size
;
116 uint8_t *phys_ram_base
;
117 uint8_t *phys_ram_dirty
;
118 static int in_migration
;
119 static ram_addr_t phys_ram_alloc_offset
= 0;
123 /* current CPU in the current thread. It is only valid inside
125 CPUState
*cpu_single_env
;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 typedef struct PhysPageDesc
{
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset
;
149 ram_addr_t region_offset
;
153 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154 /* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
158 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
160 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
163 #define L1_SIZE (1 << L1_BITS)
164 #define L2_SIZE (1 << L2_BITS)
166 unsigned long qemu_real_host_page_size
;
167 unsigned long qemu_host_page_bits
;
168 unsigned long qemu_host_page_size
;
169 unsigned long qemu_host_page_mask
;
171 /* XXX: for system emulation, it could just be an array */
172 static PageDesc
*l1_map
[L1_SIZE
];
173 static PhysPageDesc
**l1_phys_map
;
175 #if !defined(CONFIG_USER_ONLY)
176 static void io_mem_init(void);
178 /* io memory support */
179 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
180 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
181 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
182 char io_mem_used
[IO_MEM_NB_ENTRIES
];
183 static int io_mem_watch
;
187 static const char *logfilename
= "/tmp/qemu.log";
190 static int log_append
= 0;
193 static int tlb_flush_count
;
194 static int tb_flush_count
;
195 static int tb_phys_invalidate_count
;
197 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198 typedef struct subpage_t
{
199 target_phys_addr_t base
;
200 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
201 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
202 void *opaque
[TARGET_PAGE_SIZE
][2][4];
203 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
207 static void map_exec(void *addr
, long size
)
210 VirtualProtect(addr
, size
,
211 PAGE_EXECUTE_READWRITE
, &old_protect
);
215 static void map_exec(void *addr
, long size
)
217 unsigned long start
, end
, page_size
;
219 page_size
= getpagesize();
220 start
= (unsigned long)addr
;
221 start
&= ~(page_size
- 1);
223 end
= (unsigned long)addr
+ size
;
224 end
+= page_size
- 1;
225 end
&= ~(page_size
- 1);
227 mprotect((void *)start
, end
- start
,
228 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
232 static void page_init(void)
234 /* NOTE: we can always suppose that qemu_host_page_size >=
238 SYSTEM_INFO system_info
;
240 GetSystemInfo(&system_info
);
241 qemu_real_host_page_size
= system_info
.dwPageSize
;
244 qemu_real_host_page_size
= getpagesize();
246 if (qemu_host_page_size
== 0)
247 qemu_host_page_size
= qemu_real_host_page_size
;
248 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
249 qemu_host_page_size
= TARGET_PAGE_SIZE
;
250 qemu_host_page_bits
= 0;
251 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
252 qemu_host_page_bits
++;
253 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
254 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
255 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
257 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
259 long long startaddr
, endaddr
;
264 last_brk
= (unsigned long)sbrk(0);
265 f
= fopen("/proc/self/maps", "r");
268 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
270 startaddr
= MIN(startaddr
,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
272 endaddr
= MIN(endaddr
,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
274 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
275 TARGET_PAGE_ALIGN(endaddr
),
286 static inline PageDesc
**page_l1_map(target_ulong index
)
288 #if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
291 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
294 return &l1_map
[index
>> L2_BITS
];
297 static inline PageDesc
*page_find_alloc(target_ulong index
)
300 lp
= page_l1_map(index
);
306 /* allocate if not found */
307 #if defined(CONFIG_USER_ONLY)
308 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
309 /* Don't use qemu_malloc because it may recurse. */
310 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
311 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
314 unsigned long addr
= h2g(p
);
315 page_set_flags(addr
& TARGET_PAGE_MASK
,
316 TARGET_PAGE_ALIGN(addr
+ len
),
320 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
324 return p
+ (index
& (L2_SIZE
- 1));
327 static inline PageDesc
*page_find(target_ulong index
)
330 lp
= page_l1_map(index
);
337 return p
+ (index
& (L2_SIZE
- 1));
340 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
345 p
= (void **)l1_phys_map
;
346 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
348 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
351 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
354 /* allocate if not found */
357 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
358 memset(p
, 0, sizeof(void *) * L1_SIZE
);
362 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
366 /* allocate if not found */
369 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
371 for (i
= 0; i
< L2_SIZE
; i
++) {
372 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
373 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
376 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
379 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
381 return phys_page_find_alloc(index
, 0);
384 #if !defined(CONFIG_USER_ONLY)
385 static void tlb_protect_code(ram_addr_t ram_addr
);
386 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
388 #define mmap_lock() do { } while(0)
389 #define mmap_unlock() do { } while(0)
392 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
394 #if defined(CONFIG_USER_ONLY)
395 /* Currently it is not recommanded to allocate big chunks of data in
396 user mode. It will change when a dedicated libc will be used */
397 #define USE_STATIC_CODE_GEN_BUFFER
400 #ifdef USE_STATIC_CODE_GEN_BUFFER
401 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
404 static void code_gen_alloc(unsigned long tb_size
)
406 #ifdef USE_STATIC_CODE_GEN_BUFFER
407 code_gen_buffer
= static_code_gen_buffer
;
408 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
409 map_exec(code_gen_buffer
, code_gen_buffer_size
);
411 code_gen_buffer_size
= tb_size
;
412 if (code_gen_buffer_size
== 0) {
413 #if defined(CONFIG_USER_ONLY)
414 /* in user mode, phys_ram_size is not meaningful */
415 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
417 /* XXX: needs ajustments */
418 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
421 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
422 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
423 /* The code gen buffer location may have constraints depending on
424 the host cpu and OS */
425 #if defined(__linux__)
430 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
431 #if defined(__x86_64__)
433 /* Cannot map more than that */
434 if (code_gen_buffer_size
> (800 * 1024 * 1024))
435 code_gen_buffer_size
= (800 * 1024 * 1024);
436 #elif defined(__sparc_v9__)
437 // Map the buffer below 2G, so we can use direct calls and branches
439 start
= (void *) 0x60000000UL
;
440 if (code_gen_buffer_size
> (512 * 1024 * 1024))
441 code_gen_buffer_size
= (512 * 1024 * 1024);
442 #elif defined(__arm__)
443 /* Map the buffer below 32M, so we can use direct calls and branches */
445 start
= (void *) 0x01000000UL
;
446 if (code_gen_buffer_size
> 16 * 1024 * 1024)
447 code_gen_buffer_size
= 16 * 1024 * 1024;
449 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
450 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
452 if (code_gen_buffer
== MAP_FAILED
) {
453 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
457 #elif defined(__FreeBSD__)
461 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
462 #if defined(__x86_64__)
463 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
464 * 0x40000000 is free */
466 addr
= (void *)0x40000000;
467 /* Cannot map more than that */
468 if (code_gen_buffer_size
> (800 * 1024 * 1024))
469 code_gen_buffer_size
= (800 * 1024 * 1024);
471 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
472 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
474 if (code_gen_buffer
== MAP_FAILED
) {
475 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
480 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
481 map_exec(code_gen_buffer
, code_gen_buffer_size
);
483 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
484 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
485 code_gen_buffer_max_size
= code_gen_buffer_size
-
486 code_gen_max_block_size();
487 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
488 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
491 /* Must be called before using the QEMU cpus. 'tb_size' is the size
492 (in bytes) allocated to the translation buffer. Zero means default
494 void cpu_exec_init_all(unsigned long tb_size
)
497 code_gen_alloc(tb_size
);
498 code_gen_ptr
= code_gen_buffer
;
500 #if !defined(CONFIG_USER_ONLY)
505 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
507 #define CPU_COMMON_SAVE_VERSION 1
509 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
511 CPUState
*env
= opaque
;
513 qemu_put_be32s(f
, &env
->halted
);
514 qemu_put_be32s(f
, &env
->interrupt_request
);
517 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
519 CPUState
*env
= opaque
;
521 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
524 qemu_get_be32s(f
, &env
->halted
);
525 qemu_get_be32s(f
, &env
->interrupt_request
);
526 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
533 void cpu_exec_init(CPUState
*env
)
538 env
->next_cpu
= NULL
;
541 while (*penv
!= NULL
) {
542 penv
= (CPUState
**)&(*penv
)->next_cpu
;
545 env
->cpu_index
= cpu_index
;
546 TAILQ_INIT(&env
->breakpoints
);
547 TAILQ_INIT(&env
->watchpoints
);
549 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
550 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
551 cpu_common_save
, cpu_common_load
, env
);
552 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
553 cpu_save
, cpu_load
, env
);
557 static inline void invalidate_page_bitmap(PageDesc
*p
)
559 if (p
->code_bitmap
) {
560 qemu_free(p
->code_bitmap
);
561 p
->code_bitmap
= NULL
;
563 p
->code_write_count
= 0;
566 /* set to NULL all the 'first_tb' fields in all PageDescs */
567 static void page_flush_tb(void)
572 for(i
= 0; i
< L1_SIZE
; i
++) {
575 for(j
= 0; j
< L2_SIZE
; j
++) {
577 invalidate_page_bitmap(p
);
584 /* flush all the translation blocks */
585 /* XXX: tb_flush is currently not thread safe */
586 void tb_flush(CPUState
*env1
)
589 #if defined(DEBUG_FLUSH)
590 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
591 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
593 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
595 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
596 cpu_abort(env1
, "Internal error: code buffer overflow\n");
600 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
601 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
604 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
607 code_gen_ptr
= code_gen_buffer
;
608 /* XXX: flush processor icache at this point if cache flush is
613 #ifdef DEBUG_TB_CHECK
615 static void tb_invalidate_check(target_ulong address
)
617 TranslationBlock
*tb
;
619 address
&= TARGET_PAGE_MASK
;
620 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
621 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
622 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
623 address
>= tb
->pc
+ tb
->size
)) {
624 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
625 address
, (long)tb
->pc
, tb
->size
);
631 /* verify that all the pages have correct rights for code */
632 static void tb_page_check(void)
634 TranslationBlock
*tb
;
635 int i
, flags1
, flags2
;
637 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
638 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
639 flags1
= page_get_flags(tb
->pc
);
640 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
641 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
642 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
643 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
649 static void tb_jmp_check(TranslationBlock
*tb
)
651 TranslationBlock
*tb1
;
654 /* suppress any remaining jumps to this TB */
658 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
661 tb1
= tb1
->jmp_next
[n1
];
663 /* check end of list */
665 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
671 /* invalidate one TB */
672 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
675 TranslationBlock
*tb1
;
679 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
682 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
686 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
688 TranslationBlock
*tb1
;
694 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
696 *ptb
= tb1
->page_next
[n1
];
699 ptb
= &tb1
->page_next
[n1
];
703 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
705 TranslationBlock
*tb1
, **ptb
;
708 ptb
= &tb
->jmp_next
[n
];
711 /* find tb(n) in circular list */
715 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
716 if (n1
== n
&& tb1
== tb
)
719 ptb
= &tb1
->jmp_first
;
721 ptb
= &tb1
->jmp_next
[n1
];
724 /* now we can suppress tb(n) from the list */
725 *ptb
= tb
->jmp_next
[n
];
727 tb
->jmp_next
[n
] = NULL
;
731 /* reset the jump entry 'n' of a TB so that it is not chained to
733 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
735 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
738 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
743 target_phys_addr_t phys_pc
;
744 TranslationBlock
*tb1
, *tb2
;
746 /* remove the TB from the hash list */
747 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
748 h
= tb_phys_hash_func(phys_pc
);
749 tb_remove(&tb_phys_hash
[h
], tb
,
750 offsetof(TranslationBlock
, phys_hash_next
));
752 /* remove the TB from the page list */
753 if (tb
->page_addr
[0] != page_addr
) {
754 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
755 tb_page_remove(&p
->first_tb
, tb
);
756 invalidate_page_bitmap(p
);
758 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
759 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
760 tb_page_remove(&p
->first_tb
, tb
);
761 invalidate_page_bitmap(p
);
764 tb_invalidated_flag
= 1;
766 /* remove the TB from the hash list */
767 h
= tb_jmp_cache_hash_func(tb
->pc
);
768 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
769 if (env
->tb_jmp_cache
[h
] == tb
)
770 env
->tb_jmp_cache
[h
] = NULL
;
773 /* suppress this TB from the two jump lists */
774 tb_jmp_remove(tb
, 0);
775 tb_jmp_remove(tb
, 1);
777 /* suppress any remaining jumps to this TB */
783 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
784 tb2
= tb1
->jmp_next
[n1
];
785 tb_reset_jump(tb1
, n1
);
786 tb1
->jmp_next
[n1
] = NULL
;
789 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
791 tb_phys_invalidate_count
++;
794 static inline void set_bits(uint8_t *tab
, int start
, int len
)
800 mask
= 0xff << (start
& 7);
801 if ((start
& ~7) == (end
& ~7)) {
803 mask
&= ~(0xff << (end
& 7));
808 start
= (start
+ 8) & ~7;
810 while (start
< end1
) {
815 mask
= ~(0xff << (end
& 7));
821 static void build_page_bitmap(PageDesc
*p
)
823 int n
, tb_start
, tb_end
;
824 TranslationBlock
*tb
;
826 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
831 tb
= (TranslationBlock
*)((long)tb
& ~3);
832 /* NOTE: this is subtle as a TB may span two physical pages */
834 /* NOTE: tb_end may be after the end of the page, but
835 it is not a problem */
836 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
837 tb_end
= tb_start
+ tb
->size
;
838 if (tb_end
> TARGET_PAGE_SIZE
)
839 tb_end
= TARGET_PAGE_SIZE
;
842 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
844 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
845 tb
= tb
->page_next
[n
];
849 TranslationBlock
*tb_gen_code(CPUState
*env
,
850 target_ulong pc
, target_ulong cs_base
,
851 int flags
, int cflags
)
853 TranslationBlock
*tb
;
855 target_ulong phys_pc
, phys_page2
, virt_page2
;
858 phys_pc
= get_phys_addr_code(env
, pc
);
861 /* flush must be done */
863 /* cannot fail at this point */
865 /* Don't forget to invalidate previous TB info. */
866 tb_invalidated_flag
= 1;
868 tc_ptr
= code_gen_ptr
;
870 tb
->cs_base
= cs_base
;
873 cpu_gen_code(env
, tb
, &code_gen_size
);
874 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
876 /* check next page if needed */
877 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
879 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
880 phys_page2
= get_phys_addr_code(env
, virt_page2
);
882 tb_link_phys(tb
, phys_pc
, phys_page2
);
886 /* invalidate all TBs which intersect with the target physical page
887 starting in range [start;end[. NOTE: start and end must refer to
888 the same physical page. 'is_cpu_write_access' should be true if called
889 from a real cpu write access: the virtual CPU will exit the current
890 TB if code is modified inside this TB. */
891 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
892 int is_cpu_write_access
)
894 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
895 CPUState
*env
= cpu_single_env
;
896 target_ulong tb_start
, tb_end
;
899 #ifdef TARGET_HAS_PRECISE_SMC
900 int current_tb_not_found
= is_cpu_write_access
;
901 TranslationBlock
*current_tb
= NULL
;
902 int current_tb_modified
= 0;
903 target_ulong current_pc
= 0;
904 target_ulong current_cs_base
= 0;
905 int current_flags
= 0;
906 #endif /* TARGET_HAS_PRECISE_SMC */
908 p
= page_find(start
>> TARGET_PAGE_BITS
);
911 if (!p
->code_bitmap
&&
912 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
913 is_cpu_write_access
) {
914 /* build code bitmap */
915 build_page_bitmap(p
);
918 /* we remove all the TBs in the range [start, end[ */
919 /* XXX: see if in some cases it could be faster to invalidate all the code */
923 tb
= (TranslationBlock
*)((long)tb
& ~3);
924 tb_next
= tb
->page_next
[n
];
925 /* NOTE: this is subtle as a TB may span two physical pages */
927 /* NOTE: tb_end may be after the end of the page, but
928 it is not a problem */
929 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
930 tb_end
= tb_start
+ tb
->size
;
932 tb_start
= tb
->page_addr
[1];
933 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
935 if (!(tb_end
<= start
|| tb_start
>= end
)) {
936 #ifdef TARGET_HAS_PRECISE_SMC
937 if (current_tb_not_found
) {
938 current_tb_not_found
= 0;
940 if (env
->mem_io_pc
) {
941 /* now we have a real cpu fault */
942 current_tb
= tb_find_pc(env
->mem_io_pc
);
945 if (current_tb
== tb
&&
946 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
947 /* If we are modifying the current TB, we must stop
948 its execution. We could be more precise by checking
949 that the modification is after the current PC, but it
950 would require a specialized function to partially
951 restore the CPU state */
953 current_tb_modified
= 1;
954 cpu_restore_state(current_tb
, env
,
955 env
->mem_io_pc
, NULL
);
956 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
959 #endif /* TARGET_HAS_PRECISE_SMC */
960 /* we need to do that to handle the case where a signal
961 occurs while doing tb_phys_invalidate() */
964 saved_tb
= env
->current_tb
;
965 env
->current_tb
= NULL
;
967 tb_phys_invalidate(tb
, -1);
969 env
->current_tb
= saved_tb
;
970 if (env
->interrupt_request
&& env
->current_tb
)
971 cpu_interrupt(env
, env
->interrupt_request
);
976 #if !defined(CONFIG_USER_ONLY)
977 /* if no code remaining, no need to continue to use slow writes */
979 invalidate_page_bitmap(p
);
980 if (is_cpu_write_access
) {
981 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
985 #ifdef TARGET_HAS_PRECISE_SMC
986 if (current_tb_modified
) {
987 /* we generate a block containing just the instruction
988 modifying the memory. It will ensure that it cannot modify
990 env
->current_tb
= NULL
;
991 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
992 cpu_resume_from_signal(env
, NULL
);
997 /* len must be <= 8 and start must be a multiple of len */
998 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1004 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1005 cpu_single_env
->mem_io_vaddr
, len
,
1006 cpu_single_env
->eip
,
1007 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1010 p
= page_find(start
>> TARGET_PAGE_BITS
);
1013 if (p
->code_bitmap
) {
1014 offset
= start
& ~TARGET_PAGE_MASK
;
1015 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1016 if (b
& ((1 << len
) - 1))
1020 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1024 #if !defined(CONFIG_SOFTMMU)
1025 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1026 unsigned long pc
, void *puc
)
1028 TranslationBlock
*tb
;
1031 #ifdef TARGET_HAS_PRECISE_SMC
1032 TranslationBlock
*current_tb
= NULL
;
1033 CPUState
*env
= cpu_single_env
;
1034 int current_tb_modified
= 0;
1035 target_ulong current_pc
= 0;
1036 target_ulong current_cs_base
= 0;
1037 int current_flags
= 0;
1040 addr
&= TARGET_PAGE_MASK
;
1041 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1045 #ifdef TARGET_HAS_PRECISE_SMC
1046 if (tb
&& pc
!= 0) {
1047 current_tb
= tb_find_pc(pc
);
1050 while (tb
!= NULL
) {
1052 tb
= (TranslationBlock
*)((long)tb
& ~3);
1053 #ifdef TARGET_HAS_PRECISE_SMC
1054 if (current_tb
== tb
&&
1055 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1056 /* If we are modifying the current TB, we must stop
1057 its execution. We could be more precise by checking
1058 that the modification is after the current PC, but it
1059 would require a specialized function to partially
1060 restore the CPU state */
1062 current_tb_modified
= 1;
1063 cpu_restore_state(current_tb
, env
, pc
, puc
);
1064 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1067 #endif /* TARGET_HAS_PRECISE_SMC */
1068 tb_phys_invalidate(tb
, addr
);
1069 tb
= tb
->page_next
[n
];
1072 #ifdef TARGET_HAS_PRECISE_SMC
1073 if (current_tb_modified
) {
1074 /* we generate a block containing just the instruction
1075 modifying the memory. It will ensure that it cannot modify
1077 env
->current_tb
= NULL
;
1078 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1079 cpu_resume_from_signal(env
, puc
);
1085 /* add the tb in the target page and protect it if necessary */
1086 static inline void tb_alloc_page(TranslationBlock
*tb
,
1087 unsigned int n
, target_ulong page_addr
)
1090 TranslationBlock
*last_first_tb
;
1092 tb
->page_addr
[n
] = page_addr
;
1093 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1094 tb
->page_next
[n
] = p
->first_tb
;
1095 last_first_tb
= p
->first_tb
;
1096 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1097 invalidate_page_bitmap(p
);
1099 #if defined(TARGET_HAS_SMC) || 1
1101 #if defined(CONFIG_USER_ONLY)
1102 if (p
->flags
& PAGE_WRITE
) {
1107 /* force the host page as non writable (writes will have a
1108 page fault + mprotect overhead) */
1109 page_addr
&= qemu_host_page_mask
;
1111 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1112 addr
+= TARGET_PAGE_SIZE
) {
1114 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1118 p2
->flags
&= ~PAGE_WRITE
;
1119 page_get_flags(addr
);
1121 mprotect(g2h(page_addr
), qemu_host_page_size
,
1122 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1123 #ifdef DEBUG_TB_INVALIDATE
1124 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1129 /* if some code is already present, then the pages are already
1130 protected. So we handle the case where only the first TB is
1131 allocated in a physical page */
1132 if (!last_first_tb
) {
1133 tlb_protect_code(page_addr
);
1137 #endif /* TARGET_HAS_SMC */
1140 /* Allocate a new translation block. Flush the translation buffer if
1141 too many translation blocks or too much generated code. */
1142 TranslationBlock
*tb_alloc(target_ulong pc
)
1144 TranslationBlock
*tb
;
1146 if (nb_tbs
>= code_gen_max_blocks
||
1147 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1149 tb
= &tbs
[nb_tbs
++];
1155 void tb_free(TranslationBlock
*tb
)
1157 /* In practice this is mostly used for single use temporary TB
1158 Ignore the hard cases and just back up if this TB happens to
1159 be the last one generated. */
1160 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1161 code_gen_ptr
= tb
->tc_ptr
;
1166 /* add a new TB and link it to the physical page tables. phys_page2 is
1167 (-1) to indicate that only one page contains the TB. */
1168 void tb_link_phys(TranslationBlock
*tb
,
1169 target_ulong phys_pc
, target_ulong phys_page2
)
1172 TranslationBlock
**ptb
;
1174 /* Grab the mmap lock to stop another thread invalidating this TB
1175 before we are done. */
1177 /* add in the physical hash table */
1178 h
= tb_phys_hash_func(phys_pc
);
1179 ptb
= &tb_phys_hash
[h
];
1180 tb
->phys_hash_next
= *ptb
;
1183 /* add in the page list */
1184 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1185 if (phys_page2
!= -1)
1186 tb_alloc_page(tb
, 1, phys_page2
);
1188 tb
->page_addr
[1] = -1;
1190 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1191 tb
->jmp_next
[0] = NULL
;
1192 tb
->jmp_next
[1] = NULL
;
1194 /* init original jump addresses */
1195 if (tb
->tb_next_offset
[0] != 0xffff)
1196 tb_reset_jump(tb
, 0);
1197 if (tb
->tb_next_offset
[1] != 0xffff)
1198 tb_reset_jump(tb
, 1);
1200 #ifdef DEBUG_TB_CHECK
1206 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1207 tb[1].tc_ptr. Return NULL if not found */
1208 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1210 int m_min
, m_max
, m
;
1212 TranslationBlock
*tb
;
1216 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1217 tc_ptr
>= (unsigned long)code_gen_ptr
)
1219 /* binary search (cf Knuth) */
1222 while (m_min
<= m_max
) {
1223 m
= (m_min
+ m_max
) >> 1;
1225 v
= (unsigned long)tb
->tc_ptr
;
1228 else if (tc_ptr
< v
) {
1237 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1239 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1241 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1244 tb1
= tb
->jmp_next
[n
];
1246 /* find head of list */
1249 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1252 tb1
= tb1
->jmp_next
[n1
];
1254 /* we are now sure now that tb jumps to tb1 */
1257 /* remove tb from the jmp_first list */
1258 ptb
= &tb_next
->jmp_first
;
1262 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1263 if (n1
== n
&& tb1
== tb
)
1265 ptb
= &tb1
->jmp_next
[n1
];
1267 *ptb
= tb
->jmp_next
[n
];
1268 tb
->jmp_next
[n
] = NULL
;
1270 /* suppress the jump to next tb in generated code */
1271 tb_reset_jump(tb
, n
);
1273 /* suppress jumps in the tb on which we could have jumped */
1274 tb_reset_jump_recursive(tb_next
);
1278 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1280 tb_reset_jump_recursive2(tb
, 0);
1281 tb_reset_jump_recursive2(tb
, 1);
1284 #if defined(TARGET_HAS_ICE)
1285 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1287 target_phys_addr_t addr
;
1289 ram_addr_t ram_addr
;
1292 addr
= cpu_get_phys_page_debug(env
, pc
);
1293 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1295 pd
= IO_MEM_UNASSIGNED
;
1297 pd
= p
->phys_offset
;
1299 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1300 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1304 /* Add a watchpoint. */
1305 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1306 int flags
, CPUWatchpoint
**watchpoint
)
1308 target_ulong len_mask
= ~(len
- 1);
1311 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1312 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1313 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1314 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1317 wp
= qemu_malloc(sizeof(*wp
));
1320 wp
->len_mask
= len_mask
;
1323 /* keep all GDB-injected watchpoints in front */
1325 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1327 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1329 tlb_flush_page(env
, addr
);
1336 /* Remove a specific watchpoint. */
1337 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1340 target_ulong len_mask
= ~(len
- 1);
1343 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1344 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1345 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1346 cpu_watchpoint_remove_by_ref(env
, wp
);
1353 /* Remove a specific watchpoint by reference. */
1354 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1356 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1358 tlb_flush_page(env
, watchpoint
->vaddr
);
1360 qemu_free(watchpoint
);
1363 /* Remove all matching watchpoints. */
1364 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1366 CPUWatchpoint
*wp
, *next
;
1368 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1369 if (wp
->flags
& mask
)
1370 cpu_watchpoint_remove_by_ref(env
, wp
);
1374 /* Add a breakpoint. */
1375 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1376 CPUBreakpoint
**breakpoint
)
1378 #if defined(TARGET_HAS_ICE)
1381 bp
= qemu_malloc(sizeof(*bp
));
1386 /* keep all GDB-injected breakpoints in front */
1388 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1390 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1392 breakpoint_invalidate(env
, pc
);
1402 /* Remove a specific breakpoint. */
1403 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1405 #if defined(TARGET_HAS_ICE)
1408 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1409 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1410 cpu_breakpoint_remove_by_ref(env
, bp
);
1420 /* Remove a specific breakpoint by reference. */
1421 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1423 #if defined(TARGET_HAS_ICE)
1424 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1426 breakpoint_invalidate(env
, breakpoint
->pc
);
1428 qemu_free(breakpoint
);
1432 /* Remove all matching breakpoints. */
1433 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1435 #if defined(TARGET_HAS_ICE)
1436 CPUBreakpoint
*bp
, *next
;
1438 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1439 if (bp
->flags
& mask
)
1440 cpu_breakpoint_remove_by_ref(env
, bp
);
1445 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1446 CPU loop after each instruction */
1447 void cpu_single_step(CPUState
*env
, int enabled
)
1449 #if defined(TARGET_HAS_ICE)
1450 if (env
->singlestep_enabled
!= enabled
) {
1451 env
->singlestep_enabled
= enabled
;
1452 /* must flush all the translated code to avoid inconsistancies */
1453 /* XXX: only flush what is necessary */
1459 /* enable or disable low levels log */
1460 void cpu_set_log(int log_flags
)
1462 loglevel
= log_flags
;
1463 if (loglevel
&& !logfile
) {
1464 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1466 perror(logfilename
);
1469 #if !defined(CONFIG_SOFTMMU)
1470 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1472 static char logfile_buf
[4096];
1473 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1476 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1480 if (!loglevel
&& logfile
) {
1486 void cpu_set_log_filename(const char *filename
)
1488 logfilename
= strdup(filename
);
1493 cpu_set_log(loglevel
);
1496 /* mask must never be zero, except for A20 change call */
1497 void cpu_interrupt(CPUState
*env
, int mask
)
1499 #if !defined(USE_NPTL)
1500 TranslationBlock
*tb
;
1501 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1505 if (mask
& CPU_INTERRUPT_EXIT
) {
1506 env
->exit_request
= 1;
1507 mask
&= ~CPU_INTERRUPT_EXIT
;
1510 old_mask
= env
->interrupt_request
;
1511 env
->interrupt_request
|= mask
;
1512 #if defined(USE_NPTL)
1513 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1514 problem and hope the cpu will stop of its own accord. For userspace
1515 emulation this often isn't actually as bad as it sounds. Often
1516 signals are used primarily to interrupt blocking syscalls. */
1519 env
->icount_decr
.u16
.high
= 0xffff;
1520 #ifndef CONFIG_USER_ONLY
1522 && (mask
& ~old_mask
) != 0) {
1523 cpu_abort(env
, "Raised interrupt while not in I/O function");
1527 tb
= env
->current_tb
;
1528 /* if the cpu is currently executing code, we must unlink it and
1529 all the potentially executing TB */
1530 if (tb
&& !testandset(&interrupt_lock
)) {
1531 env
->current_tb
= NULL
;
1532 tb_reset_jump_recursive(tb
);
1533 resetlock(&interrupt_lock
);
1539 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1541 env
->interrupt_request
&= ~mask
;
1544 const CPULogItem cpu_log_items
[] = {
1545 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1546 "show generated host assembly code for each compiled TB" },
1547 { CPU_LOG_TB_IN_ASM
, "in_asm",
1548 "show target assembly code for each compiled TB" },
1549 { CPU_LOG_TB_OP
, "op",
1550 "show micro ops for each compiled TB" },
1551 { CPU_LOG_TB_OP_OPT
, "op_opt",
1554 "before eflags optimization and "
1556 "after liveness analysis" },
1557 { CPU_LOG_INT
, "int",
1558 "show interrupts/exceptions in short format" },
1559 { CPU_LOG_EXEC
, "exec",
1560 "show trace before each executed TB (lots of logs)" },
1561 { CPU_LOG_TB_CPU
, "cpu",
1562 "show CPU state before block translation" },
1564 { CPU_LOG_PCALL
, "pcall",
1565 "show protected mode far calls/returns/exceptions" },
1566 { CPU_LOG_RESET
, "cpu_reset",
1567 "show CPU state before CPU resets" },
1570 { CPU_LOG_IOPORT
, "ioport",
1571 "show all i/o ports accesses" },
1576 static int cmp1(const char *s1
, int n
, const char *s2
)
1578 if (strlen(s2
) != n
)
1580 return memcmp(s1
, s2
, n
) == 0;
1583 /* takes a comma separated list of log masks. Return 0 if error. */
1584 int cpu_str_to_log_mask(const char *str
)
1586 const CPULogItem
*item
;
1593 p1
= strchr(p
, ',');
1596 if(cmp1(p
,p1
-p
,"all")) {
1597 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1601 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1602 if (cmp1(p
, p1
- p
, item
->name
))
1616 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1623 fprintf(stderr
, "qemu: fatal: ");
1624 vfprintf(stderr
, fmt
, ap
);
1625 fprintf(stderr
, "\n");
1627 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1629 cpu_dump_state(env
, stderr
, fprintf
, 0);
1631 if (qemu_log_enabled()) {
1632 qemu_log("qemu: fatal: ");
1633 qemu_log_vprintf(fmt
, ap2
);
1636 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1638 log_cpu_state(env
, 0);
1648 CPUState
*cpu_copy(CPUState
*env
)
1650 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1651 CPUState
*next_cpu
= new_env
->next_cpu
;
1652 int cpu_index
= new_env
->cpu_index
;
1653 #if defined(TARGET_HAS_ICE)
1658 memcpy(new_env
, env
, sizeof(CPUState
));
1660 /* Preserve chaining and index. */
1661 new_env
->next_cpu
= next_cpu
;
1662 new_env
->cpu_index
= cpu_index
;
1664 /* Clone all break/watchpoints.
1665 Note: Once we support ptrace with hw-debug register access, make sure
1666 BP_CPU break/watchpoints are handled correctly on clone. */
1667 TAILQ_INIT(&env
->breakpoints
);
1668 TAILQ_INIT(&env
->watchpoints
);
1669 #if defined(TARGET_HAS_ICE)
1670 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1671 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1673 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1674 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1682 #if !defined(CONFIG_USER_ONLY)
1684 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1688 /* Discard jump cache entries for any tb which might potentially
1689 overlap the flushed page. */
1690 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1691 memset (&env
->tb_jmp_cache
[i
], 0,
1692 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1694 i
= tb_jmp_cache_hash_page(addr
);
1695 memset (&env
->tb_jmp_cache
[i
], 0,
1696 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1699 /* NOTE: if flush_global is true, also flush global entries (not
1701 void tlb_flush(CPUState
*env
, int flush_global
)
1705 #if defined(DEBUG_TLB)
1706 printf("tlb_flush:\n");
1708 /* must reset current TB so that interrupts cannot modify the
1709 links while we are modifying them */
1710 env
->current_tb
= NULL
;
1712 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1713 env
->tlb_table
[0][i
].addr_read
= -1;
1714 env
->tlb_table
[0][i
].addr_write
= -1;
1715 env
->tlb_table
[0][i
].addr_code
= -1;
1716 env
->tlb_table
[1][i
].addr_read
= -1;
1717 env
->tlb_table
[1][i
].addr_write
= -1;
1718 env
->tlb_table
[1][i
].addr_code
= -1;
1719 #if (NB_MMU_MODES >= 3)
1720 env
->tlb_table
[2][i
].addr_read
= -1;
1721 env
->tlb_table
[2][i
].addr_write
= -1;
1722 env
->tlb_table
[2][i
].addr_code
= -1;
1723 #if (NB_MMU_MODES == 4)
1724 env
->tlb_table
[3][i
].addr_read
= -1;
1725 env
->tlb_table
[3][i
].addr_write
= -1;
1726 env
->tlb_table
[3][i
].addr_code
= -1;
1731 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1734 if (env
->kqemu_enabled
) {
1735 kqemu_flush(env
, flush_global
);
1741 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1743 if (addr
== (tlb_entry
->addr_read
&
1744 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1745 addr
== (tlb_entry
->addr_write
&
1746 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1747 addr
== (tlb_entry
->addr_code
&
1748 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1749 tlb_entry
->addr_read
= -1;
1750 tlb_entry
->addr_write
= -1;
1751 tlb_entry
->addr_code
= -1;
1755 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1759 #if defined(DEBUG_TLB)
1760 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1762 /* must reset current TB so that interrupts cannot modify the
1763 links while we are modifying them */
1764 env
->current_tb
= NULL
;
1766 addr
&= TARGET_PAGE_MASK
;
1767 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1768 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1769 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1770 #if (NB_MMU_MODES >= 3)
1771 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1772 #if (NB_MMU_MODES == 4)
1773 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1777 tlb_flush_jmp_cache(env
, addr
);
1780 if (env
->kqemu_enabled
) {
1781 kqemu_flush_page(env
, addr
);
1786 /* update the TLBs so that writes to code in the virtual page 'addr'
1788 static void tlb_protect_code(ram_addr_t ram_addr
)
1790 cpu_physical_memory_reset_dirty(ram_addr
,
1791 ram_addr
+ TARGET_PAGE_SIZE
,
1795 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1796 tested for self modifying code */
1797 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1800 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1803 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1804 unsigned long start
, unsigned long length
)
1807 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1808 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1809 if ((addr
- start
) < length
) {
1810 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1815 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1819 unsigned long length
, start1
;
1823 start
&= TARGET_PAGE_MASK
;
1824 end
= TARGET_PAGE_ALIGN(end
);
1826 length
= end
- start
;
1829 len
= length
>> TARGET_PAGE_BITS
;
1831 /* XXX: should not depend on cpu context */
1833 if (env
->kqemu_enabled
) {
1836 for(i
= 0; i
< len
; i
++) {
1837 kqemu_set_notdirty(env
, addr
);
1838 addr
+= TARGET_PAGE_SIZE
;
1842 mask
= ~dirty_flags
;
1843 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1844 for(i
= 0; i
< len
; i
++)
1847 /* we modify the TLB cache so that the dirty bit will be set again
1848 when accessing the range */
1849 start1
= start
+ (unsigned long)phys_ram_base
;
1850 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1851 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1852 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1853 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1854 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1855 #if (NB_MMU_MODES >= 3)
1856 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1857 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1858 #if (NB_MMU_MODES == 4)
1859 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1860 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1866 int cpu_physical_memory_set_dirty_tracking(int enable
)
1868 in_migration
= enable
;
1872 int cpu_physical_memory_get_dirty_tracking(void)
1874 return in_migration
;
1877 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1880 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1883 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1885 ram_addr_t ram_addr
;
1887 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1888 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1889 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1890 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1891 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1896 /* update the TLB according to the current state of the dirty bits */
1897 void cpu_tlb_update_dirty(CPUState
*env
)
1900 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1901 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1902 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1903 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1904 #if (NB_MMU_MODES >= 3)
1905 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1906 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1907 #if (NB_MMU_MODES == 4)
1908 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1909 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1914 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1916 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1917 tlb_entry
->addr_write
= vaddr
;
1920 /* update the TLB corresponding to virtual page vaddr
1921 so that it is no longer dirty */
1922 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1926 vaddr
&= TARGET_PAGE_MASK
;
1927 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1928 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1929 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1930 #if (NB_MMU_MODES >= 3)
1931 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1932 #if (NB_MMU_MODES == 4)
1933 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1938 /* add a new TLB entry. At most one entry for a given virtual address
1939 is permitted. Return 0 if OK or 2 if the page could not be mapped
1940 (can only happen in non SOFTMMU mode for I/O pages or pages
1941 conflicting with the host address space). */
1942 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1943 target_phys_addr_t paddr
, int prot
,
1944 int mmu_idx
, int is_softmmu
)
1949 target_ulong address
;
1950 target_ulong code_address
;
1951 target_phys_addr_t addend
;
1955 target_phys_addr_t iotlb
;
1957 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1959 pd
= IO_MEM_UNASSIGNED
;
1961 pd
= p
->phys_offset
;
1963 #if defined(DEBUG_TLB)
1964 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1965 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1970 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1971 /* IO memory case (romd handled later) */
1972 address
|= TLB_MMIO
;
1974 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1975 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1977 iotlb
= pd
& TARGET_PAGE_MASK
;
1978 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1979 iotlb
|= IO_MEM_NOTDIRTY
;
1981 iotlb
|= IO_MEM_ROM
;
1983 /* IO handlers are currently passed a phsical address.
1984 It would be nice to pass an offset from the base address
1985 of that region. This would avoid having to special case RAM,
1986 and avoid full address decoding in every device.
1987 We can't use the high bits of pd for this because
1988 IO_MEM_ROMD uses these as a ram address. */
1989 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
1991 iotlb
+= p
->region_offset
;
1997 code_address
= address
;
1998 /* Make accesses to pages with watchpoints go via the
1999 watchpoint trap routines. */
2000 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2001 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2002 iotlb
= io_mem_watch
+ paddr
;
2003 /* TODO: The memory case can be optimized by not trapping
2004 reads of pages with a write breakpoint. */
2005 address
|= TLB_MMIO
;
2009 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2010 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2011 te
= &env
->tlb_table
[mmu_idx
][index
];
2012 te
->addend
= addend
- vaddr
;
2013 if (prot
& PAGE_READ
) {
2014 te
->addr_read
= address
;
2019 if (prot
& PAGE_EXEC
) {
2020 te
->addr_code
= code_address
;
2024 if (prot
& PAGE_WRITE
) {
2025 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2026 (pd
& IO_MEM_ROMD
)) {
2027 /* Write access calls the I/O callback. */
2028 te
->addr_write
= address
| TLB_MMIO
;
2029 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2030 !cpu_physical_memory_is_dirty(pd
)) {
2031 te
->addr_write
= address
| TLB_NOTDIRTY
;
2033 te
->addr_write
= address
;
2036 te
->addr_write
= -1;
2043 void tlb_flush(CPUState
*env
, int flush_global
)
2047 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2051 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2052 target_phys_addr_t paddr
, int prot
,
2053 int mmu_idx
, int is_softmmu
)
2058 /* dump memory mappings */
2059 void page_dump(FILE *f
)
2061 unsigned long start
, end
;
2062 int i
, j
, prot
, prot1
;
2065 fprintf(f
, "%-8s %-8s %-8s %s\n",
2066 "start", "end", "size", "prot");
2070 for(i
= 0; i
<= L1_SIZE
; i
++) {
2075 for(j
= 0;j
< L2_SIZE
; j
++) {
2080 if (prot1
!= prot
) {
2081 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2083 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2084 start
, end
, end
- start
,
2085 prot
& PAGE_READ
? 'r' : '-',
2086 prot
& PAGE_WRITE
? 'w' : '-',
2087 prot
& PAGE_EXEC
? 'x' : '-');
2101 int page_get_flags(target_ulong address
)
2105 p
= page_find(address
>> TARGET_PAGE_BITS
);
2111 /* modify the flags of a page and invalidate the code if
2112 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2113 depending on PAGE_WRITE */
2114 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2119 /* mmap_lock should already be held. */
2120 start
= start
& TARGET_PAGE_MASK
;
2121 end
= TARGET_PAGE_ALIGN(end
);
2122 if (flags
& PAGE_WRITE
)
2123 flags
|= PAGE_WRITE_ORG
;
2124 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2125 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2126 /* We may be called for host regions that are outside guest
2130 /* if the write protection is set, then we invalidate the code
2132 if (!(p
->flags
& PAGE_WRITE
) &&
2133 (flags
& PAGE_WRITE
) &&
2135 tb_invalidate_phys_page(addr
, 0, NULL
);
2141 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2147 if (start
+ len
< start
)
2148 /* we've wrapped around */
2151 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2152 start
= start
& TARGET_PAGE_MASK
;
2154 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2155 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2158 if( !(p
->flags
& PAGE_VALID
) )
2161 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2163 if (flags
& PAGE_WRITE
) {
2164 if (!(p
->flags
& PAGE_WRITE_ORG
))
2166 /* unprotect the page if it was put read-only because it
2167 contains translated code */
2168 if (!(p
->flags
& PAGE_WRITE
)) {
2169 if (!page_unprotect(addr
, 0, NULL
))
2178 /* called from signal handler: invalidate the code and unprotect the
2179 page. Return TRUE if the fault was succesfully handled. */
2180 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2182 unsigned int page_index
, prot
, pindex
;
2184 target_ulong host_start
, host_end
, addr
;
2186 /* Technically this isn't safe inside a signal handler. However we
2187 know this only ever happens in a synchronous SEGV handler, so in
2188 practice it seems to be ok. */
2191 host_start
= address
& qemu_host_page_mask
;
2192 page_index
= host_start
>> TARGET_PAGE_BITS
;
2193 p1
= page_find(page_index
);
2198 host_end
= host_start
+ qemu_host_page_size
;
2201 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2205 /* if the page was really writable, then we change its
2206 protection back to writable */
2207 if (prot
& PAGE_WRITE_ORG
) {
2208 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2209 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2210 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2211 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2212 p1
[pindex
].flags
|= PAGE_WRITE
;
2213 /* and since the content will be modified, we must invalidate
2214 the corresponding translated code. */
2215 tb_invalidate_phys_page(address
, pc
, puc
);
2216 #ifdef DEBUG_TB_CHECK
2217 tb_invalidate_check(address
);
2227 static inline void tlb_set_dirty(CPUState
*env
,
2228 unsigned long addr
, target_ulong vaddr
)
2231 #endif /* defined(CONFIG_USER_ONLY) */
2233 #if !defined(CONFIG_USER_ONLY)
2235 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2236 ram_addr_t memory
, ram_addr_t region_offset
);
2237 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2238 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2239 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2242 if (addr > start_addr) \
2245 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2246 if (start_addr2 > 0) \
2250 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2251 end_addr2 = TARGET_PAGE_SIZE - 1; \
2253 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2254 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2259 /* register physical memory. 'size' must be a multiple of the target
2260 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2261 io memory page. The address used when calling the IO function is
2262 the offset from the start of the region, plus region_offset. Both
2263 start_region and regon_offset are rounded down to a page boundary
2264 before calculating this offset. This should not be a problem unless
2265 the low bits of start_addr and region_offset differ. */
2266 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2268 ram_addr_t phys_offset
,
2269 ram_addr_t region_offset
)
2271 target_phys_addr_t addr
, end_addr
;
2274 ram_addr_t orig_size
= size
;
2278 /* XXX: should not depend on cpu context */
2280 if (env
->kqemu_enabled
) {
2281 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2285 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2287 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2288 region_offset
= start_addr
;
2290 region_offset
&= TARGET_PAGE_MASK
;
2291 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2292 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2293 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2294 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2295 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2296 ram_addr_t orig_memory
= p
->phys_offset
;
2297 target_phys_addr_t start_addr2
, end_addr2
;
2298 int need_subpage
= 0;
2300 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2302 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2303 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2304 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2305 &p
->phys_offset
, orig_memory
,
2308 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2311 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2313 p
->region_offset
= 0;
2315 p
->phys_offset
= phys_offset
;
2316 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2317 (phys_offset
& IO_MEM_ROMD
))
2318 phys_offset
+= TARGET_PAGE_SIZE
;
2321 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2322 p
->phys_offset
= phys_offset
;
2323 p
->region_offset
= region_offset
;
2324 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2325 (phys_offset
& IO_MEM_ROMD
)) {
2326 phys_offset
+= TARGET_PAGE_SIZE
;
2328 target_phys_addr_t start_addr2
, end_addr2
;
2329 int need_subpage
= 0;
2331 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2332 end_addr2
, need_subpage
);
2334 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2335 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2336 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2337 addr
& TARGET_PAGE_MASK
);
2338 subpage_register(subpage
, start_addr2
, end_addr2
,
2339 phys_offset
, region_offset
);
2340 p
->region_offset
= 0;
2344 region_offset
+= TARGET_PAGE_SIZE
;
2347 /* since each CPU stores ram addresses in its TLB cache, we must
2348 reset the modified entries */
2350 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2355 /* XXX: temporary until new memory mapping API */
2356 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2360 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2362 return IO_MEM_UNASSIGNED
;
2363 return p
->phys_offset
;
2366 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2369 kvm_coalesce_mmio_region(addr
, size
);
2372 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2375 kvm_uncoalesce_mmio_region(addr
, size
);
2378 /* XXX: better than nothing */
2379 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2382 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2383 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2384 (uint64_t)size
, (uint64_t)phys_ram_size
);
2387 addr
= phys_ram_alloc_offset
;
2388 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2391 kvm_setup_guest_memory(phys_ram_base
+ addr
, size
);
2396 void qemu_ram_free(ram_addr_t addr
)
2400 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2402 #ifdef DEBUG_UNASSIGNED
2403 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2405 #if defined(TARGET_SPARC)
2406 do_unassigned_access(addr
, 0, 0, 0, 1);
2411 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2413 #ifdef DEBUG_UNASSIGNED
2414 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2416 #if defined(TARGET_SPARC)
2417 do_unassigned_access(addr
, 0, 0, 0, 2);
2422 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2424 #ifdef DEBUG_UNASSIGNED
2425 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2427 #if defined(TARGET_SPARC)
2428 do_unassigned_access(addr
, 0, 0, 0, 4);
2433 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2435 #ifdef DEBUG_UNASSIGNED
2436 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2438 #if defined(TARGET_SPARC)
2439 do_unassigned_access(addr
, 1, 0, 0, 1);
2443 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2445 #ifdef DEBUG_UNASSIGNED
2446 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2448 #if defined(TARGET_SPARC)
2449 do_unassigned_access(addr
, 1, 0, 0, 2);
2453 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2455 #ifdef DEBUG_UNASSIGNED
2456 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2458 #if defined(TARGET_SPARC)
2459 do_unassigned_access(addr
, 1, 0, 0, 4);
2463 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2464 unassigned_mem_readb
,
2465 unassigned_mem_readw
,
2466 unassigned_mem_readl
,
2469 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2470 unassigned_mem_writeb
,
2471 unassigned_mem_writew
,
2472 unassigned_mem_writel
,
2475 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2479 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2480 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2481 #if !defined(CONFIG_USER_ONLY)
2482 tb_invalidate_phys_page_fast(ram_addr
, 1);
2483 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2486 stb_p(phys_ram_base
+ ram_addr
, val
);
2488 if (cpu_single_env
->kqemu_enabled
&&
2489 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2490 kqemu_modify_page(cpu_single_env
, ram_addr
);
2492 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2493 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2494 /* we remove the notdirty callback only if the code has been
2496 if (dirty_flags
== 0xff)
2497 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2500 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2504 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2505 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2506 #if !defined(CONFIG_USER_ONLY)
2507 tb_invalidate_phys_page_fast(ram_addr
, 2);
2508 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2511 stw_p(phys_ram_base
+ ram_addr
, val
);
2513 if (cpu_single_env
->kqemu_enabled
&&
2514 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2515 kqemu_modify_page(cpu_single_env
, ram_addr
);
2517 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2518 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2519 /* we remove the notdirty callback only if the code has been
2521 if (dirty_flags
== 0xff)
2522 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2525 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2529 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2530 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2531 #if !defined(CONFIG_USER_ONLY)
2532 tb_invalidate_phys_page_fast(ram_addr
, 4);
2533 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2536 stl_p(phys_ram_base
+ ram_addr
, val
);
2538 if (cpu_single_env
->kqemu_enabled
&&
2539 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2540 kqemu_modify_page(cpu_single_env
, ram_addr
);
2542 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2543 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2544 /* we remove the notdirty callback only if the code has been
2546 if (dirty_flags
== 0xff)
2547 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2550 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2551 NULL
, /* never used */
2552 NULL
, /* never used */
2553 NULL
, /* never used */
2556 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2557 notdirty_mem_writeb
,
2558 notdirty_mem_writew
,
2559 notdirty_mem_writel
,
2562 /* Generate a debug exception if a watchpoint has been hit. */
2563 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2565 CPUState
*env
= cpu_single_env
;
2566 target_ulong pc
, cs_base
;
2567 TranslationBlock
*tb
;
2572 if (env
->watchpoint_hit
) {
2573 /* We re-entered the check after replacing the TB. Now raise
2574 * the debug interrupt so that is will trigger after the
2575 * current instruction. */
2576 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2579 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2580 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2581 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2582 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2583 wp
->flags
|= BP_WATCHPOINT_HIT
;
2584 if (!env
->watchpoint_hit
) {
2585 env
->watchpoint_hit
= wp
;
2586 tb
= tb_find_pc(env
->mem_io_pc
);
2588 cpu_abort(env
, "check_watchpoint: could not find TB for "
2589 "pc=%p", (void *)env
->mem_io_pc
);
2591 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2592 tb_phys_invalidate(tb
, -1);
2593 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2594 env
->exception_index
= EXCP_DEBUG
;
2596 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2597 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2599 cpu_resume_from_signal(env
, NULL
);
2602 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2607 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2608 so these check for a hit then pass through to the normal out-of-line
2610 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2612 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2613 return ldub_phys(addr
);
2616 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2618 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2619 return lduw_phys(addr
);
2622 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2624 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2625 return ldl_phys(addr
);
2628 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2631 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2632 stb_phys(addr
, val
);
2635 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2638 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2639 stw_phys(addr
, val
);
2642 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2645 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2646 stl_phys(addr
, val
);
2649 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2655 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2661 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2667 idx
= SUBPAGE_IDX(addr
);
2668 #if defined(DEBUG_SUBPAGE)
2669 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2670 mmio
, len
, addr
, idx
);
2672 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2673 addr
+ mmio
->region_offset
[idx
][0][len
]);
2678 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2679 uint32_t value
, unsigned int len
)
2683 idx
= SUBPAGE_IDX(addr
);
2684 #if defined(DEBUG_SUBPAGE)
2685 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2686 mmio
, len
, addr
, idx
, value
);
2688 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2689 addr
+ mmio
->region_offset
[idx
][1][len
],
2693 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2695 #if defined(DEBUG_SUBPAGE)
2696 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2699 return subpage_readlen(opaque
, addr
, 0);
2702 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2705 #if defined(DEBUG_SUBPAGE)
2706 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2708 subpage_writelen(opaque
, addr
, value
, 0);
2711 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2713 #if defined(DEBUG_SUBPAGE)
2714 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2717 return subpage_readlen(opaque
, addr
, 1);
2720 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2723 #if defined(DEBUG_SUBPAGE)
2724 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2726 subpage_writelen(opaque
, addr
, value
, 1);
2729 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2731 #if defined(DEBUG_SUBPAGE)
2732 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2735 return subpage_readlen(opaque
, addr
, 2);
2738 static void subpage_writel (void *opaque
,
2739 target_phys_addr_t addr
, uint32_t value
)
2741 #if defined(DEBUG_SUBPAGE)
2742 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2744 subpage_writelen(opaque
, addr
, value
, 2);
2747 static CPUReadMemoryFunc
*subpage_read
[] = {
2753 static CPUWriteMemoryFunc
*subpage_write
[] = {
2759 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2760 ram_addr_t memory
, ram_addr_t region_offset
)
2765 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2767 idx
= SUBPAGE_IDX(start
);
2768 eidx
= SUBPAGE_IDX(end
);
2769 #if defined(DEBUG_SUBPAGE)
2770 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2771 mmio
, start
, end
, idx
, eidx
, memory
);
2773 memory
>>= IO_MEM_SHIFT
;
2774 for (; idx
<= eidx
; idx
++) {
2775 for (i
= 0; i
< 4; i
++) {
2776 if (io_mem_read
[memory
][i
]) {
2777 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2778 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2779 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2781 if (io_mem_write
[memory
][i
]) {
2782 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2783 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2784 mmio
->region_offset
[idx
][1][i
] = region_offset
;
2792 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2793 ram_addr_t orig_memory
, ram_addr_t region_offset
)
2798 mmio
= qemu_mallocz(sizeof(subpage_t
));
2801 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2802 #if defined(DEBUG_SUBPAGE)
2803 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2804 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2806 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2807 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
2813 static int get_free_io_mem_idx(void)
2817 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2818 if (!io_mem_used
[i
]) {
2826 static void io_mem_init(void)
2830 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2831 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2832 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2836 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2837 watch_mem_write
, NULL
);
2838 /* alloc dirty bits array */
2839 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2840 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2843 /* mem_read and mem_write are arrays of functions containing the
2844 function to access byte (index 0), word (index 1) and dword (index
2845 2). Functions can be omitted with a NULL function pointer. The
2846 registered functions may be modified dynamically later.
2847 If io_index is non zero, the corresponding io zone is
2848 modified. If it is zero, a new io zone is allocated. The return
2849 value can be used with cpu_register_physical_memory(). (-1) is
2850 returned if error. */
2851 int cpu_register_io_memory(int io_index
,
2852 CPUReadMemoryFunc
**mem_read
,
2853 CPUWriteMemoryFunc
**mem_write
,
2856 int i
, subwidth
= 0;
2858 if (io_index
<= 0) {
2859 io_index
= get_free_io_mem_idx();
2863 if (io_index
>= IO_MEM_NB_ENTRIES
)
2867 for(i
= 0;i
< 3; i
++) {
2868 if (!mem_read
[i
] || !mem_write
[i
])
2869 subwidth
= IO_MEM_SUBWIDTH
;
2870 io_mem_read
[io_index
][i
] = mem_read
[i
];
2871 io_mem_write
[io_index
][i
] = mem_write
[i
];
2873 io_mem_opaque
[io_index
] = opaque
;
2874 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2877 void cpu_unregister_io_memory(int io_table_address
)
2880 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
2882 for (i
=0;i
< 3; i
++) {
2883 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
2884 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
2886 io_mem_opaque
[io_index
] = NULL
;
2887 io_mem_used
[io_index
] = 0;
2890 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2892 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2895 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2897 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2900 #endif /* !defined(CONFIG_USER_ONLY) */
2902 /* physical memory access (slow version, mainly for debug) */
2903 #if defined(CONFIG_USER_ONLY)
2904 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2905 int len
, int is_write
)
2912 page
= addr
& TARGET_PAGE_MASK
;
2913 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2916 flags
= page_get_flags(page
);
2917 if (!(flags
& PAGE_VALID
))
2920 if (!(flags
& PAGE_WRITE
))
2922 /* XXX: this code should not depend on lock_user */
2923 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2924 /* FIXME - should this return an error rather than just fail? */
2927 unlock_user(p
, addr
, l
);
2929 if (!(flags
& PAGE_READ
))
2931 /* XXX: this code should not depend on lock_user */
2932 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2933 /* FIXME - should this return an error rather than just fail? */
2936 unlock_user(p
, addr
, 0);
2945 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2946 int len
, int is_write
)
2951 target_phys_addr_t page
;
2956 page
= addr
& TARGET_PAGE_MASK
;
2957 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2960 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2962 pd
= IO_MEM_UNASSIGNED
;
2964 pd
= p
->phys_offset
;
2968 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2969 target_phys_addr_t addr1
= addr
;
2970 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2972 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
2973 /* XXX: could force cpu_single_env to NULL to avoid
2975 if (l
>= 4 && ((addr1
& 3) == 0)) {
2976 /* 32 bit write access */
2978 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
2980 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
2981 /* 16 bit write access */
2983 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
2986 /* 8 bit write access */
2988 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
2992 unsigned long addr1
;
2993 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2995 ptr
= phys_ram_base
+ addr1
;
2996 memcpy(ptr
, buf
, l
);
2997 if (!cpu_physical_memory_is_dirty(addr1
)) {
2998 /* invalidate code */
2999 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3001 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3002 (0xff & ~CODE_DIRTY_FLAG
);
3006 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3007 !(pd
& IO_MEM_ROMD
)) {
3008 target_phys_addr_t addr1
= addr
;
3010 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3012 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3013 if (l
>= 4 && ((addr1
& 3) == 0)) {
3014 /* 32 bit read access */
3015 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3018 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3019 /* 16 bit read access */
3020 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3024 /* 8 bit read access */
3025 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3031 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3032 (addr
& ~TARGET_PAGE_MASK
);
3033 memcpy(buf
, ptr
, l
);
3042 /* used for ROM loading : can write in RAM and ROM */
3043 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3044 const uint8_t *buf
, int len
)
3048 target_phys_addr_t page
;
3053 page
= addr
& TARGET_PAGE_MASK
;
3054 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3057 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3059 pd
= IO_MEM_UNASSIGNED
;
3061 pd
= p
->phys_offset
;
3064 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3065 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3066 !(pd
& IO_MEM_ROMD
)) {
3069 unsigned long addr1
;
3070 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3072 ptr
= phys_ram_base
+ addr1
;
3073 memcpy(ptr
, buf
, l
);
3083 target_phys_addr_t addr
;
3084 target_phys_addr_t len
;
3087 static BounceBuffer bounce
;
3089 typedef struct MapClient
{
3091 void (*callback
)(void *opaque
);
3092 LIST_ENTRY(MapClient
) link
;
3095 static LIST_HEAD(map_client_list
, MapClient
) map_client_list
3096 = LIST_HEAD_INITIALIZER(map_client_list
);
3098 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3100 MapClient
*client
= qemu_malloc(sizeof(*client
));
3102 client
->opaque
= opaque
;
3103 client
->callback
= callback
;
3104 LIST_INSERT_HEAD(&map_client_list
, client
, link
);
3108 void cpu_unregister_map_client(void *_client
)
3110 MapClient
*client
= (MapClient
*)_client
;
3112 LIST_REMOVE(client
, link
);
3116 static void cpu_notify_map_clients(void)
3120 while (!LIST_EMPTY(&map_client_list
)) {
3121 client
= LIST_FIRST(&map_client_list
);
3122 client
->callback(client
->opaque
);
3123 cpu_unregister_map_client(client
);
3127 /* Map a physical memory region into a host virtual address.
3128 * May map a subset of the requested range, given by and returned in *plen.
3129 * May return NULL if resources needed to perform the mapping are exhausted.
3130 * Use only for reads OR writes - not for read-modify-write operations.
3131 * Use cpu_register_map_client() to know when retrying the map operation is
3132 * likely to succeed.
3134 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3135 target_phys_addr_t
*plen
,
3138 target_phys_addr_t len
= *plen
;
3139 target_phys_addr_t done
= 0;
3141 uint8_t *ret
= NULL
;
3143 target_phys_addr_t page
;
3146 unsigned long addr1
;
3149 page
= addr
& TARGET_PAGE_MASK
;
3150 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3153 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3155 pd
= IO_MEM_UNASSIGNED
;
3157 pd
= p
->phys_offset
;
3160 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3161 if (done
|| bounce
.buffer
) {
3164 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3168 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3170 ptr
= bounce
.buffer
;
3172 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3173 ptr
= phys_ram_base
+ addr1
;
3177 } else if (ret
+ done
!= ptr
) {
3189 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3190 * Will also mark the memory as dirty if is_write == 1. access_len gives
3191 * the amount of memory that was actually read or written by the caller.
3193 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3194 int is_write
, target_phys_addr_t access_len
)
3196 if (buffer
!= bounce
.buffer
) {
3198 unsigned long addr1
= (uint8_t *)buffer
- phys_ram_base
;
3199 while (access_len
) {
3201 l
= TARGET_PAGE_SIZE
;
3204 if (!cpu_physical_memory_is_dirty(addr1
)) {
3205 /* invalidate code */
3206 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3208 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3209 (0xff & ~CODE_DIRTY_FLAG
);
3218 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3220 qemu_free(bounce
.buffer
);
3221 bounce
.buffer
= NULL
;
3222 cpu_notify_map_clients();
3225 /* warning: addr must be aligned */
3226 uint32_t ldl_phys(target_phys_addr_t addr
)
3234 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3236 pd
= IO_MEM_UNASSIGNED
;
3238 pd
= p
->phys_offset
;
3241 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3242 !(pd
& IO_MEM_ROMD
)) {
3244 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3246 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3247 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3250 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3251 (addr
& ~TARGET_PAGE_MASK
);
3257 /* warning: addr must be aligned */
3258 uint64_t ldq_phys(target_phys_addr_t addr
)
3266 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3268 pd
= IO_MEM_UNASSIGNED
;
3270 pd
= p
->phys_offset
;
3273 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3274 !(pd
& IO_MEM_ROMD
)) {
3276 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3278 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3279 #ifdef TARGET_WORDS_BIGENDIAN
3280 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3281 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3283 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3284 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3288 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3289 (addr
& ~TARGET_PAGE_MASK
);
3296 uint32_t ldub_phys(target_phys_addr_t addr
)
3299 cpu_physical_memory_read(addr
, &val
, 1);
3304 uint32_t lduw_phys(target_phys_addr_t addr
)
3307 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3308 return tswap16(val
);
3311 /* warning: addr must be aligned. The ram page is not masked as dirty
3312 and the code inside is not invalidated. It is useful if the dirty
3313 bits are used to track modified PTEs */
3314 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3321 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3323 pd
= IO_MEM_UNASSIGNED
;
3325 pd
= p
->phys_offset
;
3328 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3329 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3331 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3332 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3334 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3335 ptr
= phys_ram_base
+ addr1
;
3338 if (unlikely(in_migration
)) {
3339 if (!cpu_physical_memory_is_dirty(addr1
)) {
3340 /* invalidate code */
3341 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3343 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3344 (0xff & ~CODE_DIRTY_FLAG
);
3350 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3357 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3359 pd
= IO_MEM_UNASSIGNED
;
3361 pd
= p
->phys_offset
;
3364 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3365 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3367 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3368 #ifdef TARGET_WORDS_BIGENDIAN
3369 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3370 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3372 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3373 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3376 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3377 (addr
& ~TARGET_PAGE_MASK
);
3382 /* warning: addr must be aligned */
3383 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3390 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3392 pd
= IO_MEM_UNASSIGNED
;
3394 pd
= p
->phys_offset
;
3397 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3398 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3400 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3401 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3403 unsigned long addr1
;
3404 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3406 ptr
= phys_ram_base
+ addr1
;
3408 if (!cpu_physical_memory_is_dirty(addr1
)) {
3409 /* invalidate code */
3410 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3412 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3413 (0xff & ~CODE_DIRTY_FLAG
);
3419 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3422 cpu_physical_memory_write(addr
, &v
, 1);
3426 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3428 uint16_t v
= tswap16(val
);
3429 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3433 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3436 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3441 /* virtual memory access for debug */
3442 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3443 uint8_t *buf
, int len
, int is_write
)
3446 target_phys_addr_t phys_addr
;
3450 page
= addr
& TARGET_PAGE_MASK
;
3451 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3452 /* if no physical page mapped, return an error */
3453 if (phys_addr
== -1)
3455 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3458 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3467 /* in deterministic execution mode, instructions doing device I/Os
3468 must be at the end of the TB */
3469 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3471 TranslationBlock
*tb
;
3473 target_ulong pc
, cs_base
;
3476 tb
= tb_find_pc((unsigned long)retaddr
);
3478 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3481 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3482 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3483 /* Calculate how many instructions had been executed before the fault
3485 n
= n
- env
->icount_decr
.u16
.low
;
3486 /* Generate a new TB ending on the I/O insn. */
3488 /* On MIPS and SH, delay slot instructions can only be restarted if
3489 they were already the first instruction in the TB. If this is not
3490 the first instruction in a TB then re-execute the preceding
3492 #if defined(TARGET_MIPS)
3493 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3494 env
->active_tc
.PC
-= 4;
3495 env
->icount_decr
.u16
.low
++;
3496 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3498 #elif defined(TARGET_SH4)
3499 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3502 env
->icount_decr
.u16
.low
++;
3503 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3506 /* This should never happen. */
3507 if (n
> CF_COUNT_MASK
)
3508 cpu_abort(env
, "TB too big during recompile");
3510 cflags
= n
| CF_LAST_IO
;
3512 cs_base
= tb
->cs_base
;
3514 tb_phys_invalidate(tb
, -1);
3515 /* FIXME: In theory this could raise an exception. In practice
3516 we have already translated the block once so it's probably ok. */
3517 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3518 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3519 the first in the TB) then we end up generating a whole new TB and
3520 repeating the fault, which is horribly inefficient.
3521 Better would be to execute just this insn uncached, or generate a
3523 cpu_resume_from_signal(env
, NULL
);
3526 void dump_exec_info(FILE *f
,
3527 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3529 int i
, target_code_size
, max_target_code_size
;
3530 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3531 TranslationBlock
*tb
;
3533 target_code_size
= 0;
3534 max_target_code_size
= 0;
3536 direct_jmp_count
= 0;
3537 direct_jmp2_count
= 0;
3538 for(i
= 0; i
< nb_tbs
; i
++) {
3540 target_code_size
+= tb
->size
;
3541 if (tb
->size
> max_target_code_size
)
3542 max_target_code_size
= tb
->size
;
3543 if (tb
->page_addr
[1] != -1)
3545 if (tb
->tb_next_offset
[0] != 0xffff) {
3547 if (tb
->tb_next_offset
[1] != 0xffff) {
3548 direct_jmp2_count
++;
3552 /* XXX: avoid using doubles ? */
3553 cpu_fprintf(f
, "Translation buffer state:\n");
3554 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3555 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3556 cpu_fprintf(f
, "TB count %d/%d\n",
3557 nb_tbs
, code_gen_max_blocks
);
3558 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3559 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3560 max_target_code_size
);
3561 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3562 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3563 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3564 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3566 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3567 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3569 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3571 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3572 cpu_fprintf(f
, "\nStatistics:\n");
3573 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3574 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3575 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3576 tcg_dump_info(f
, cpu_fprintf
);
3579 #if !defined(CONFIG_USER_ONLY)
3581 #define MMUSUFFIX _cmmu
3582 #define GETPC() NULL
3583 #define env cpu_single_env
3584 #define SOFTMMU_CODE_ACCESS
3587 #include "softmmu_template.h"
3590 #include "softmmu_template.h"
3593 #include "softmmu_template.h"
3596 #include "softmmu_template.h"