2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
48 #if defined(CONFIG_USER_ONLY)
52 //#define DEBUG_TB_INVALIDATE
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #define MMAP_AREA_START 0x00000000
72 #define MMAP_AREA_END 0xa8000000
74 #if defined(TARGET_SPARC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 41
76 #elif defined(TARGET_SPARC)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #elif defined(TARGET_ALPHA)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #define TARGET_VIRT_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_PPC64)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 42
85 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
86 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 #elif defined(TARGET_IA64)
88 #define TARGET_PHYS_ADDR_SPACE_BITS 36
90 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
91 #define TARGET_PHYS_ADDR_SPACE_BITS 32
94 static TranslationBlock
*tbs
;
95 int code_gen_max_blocks
;
96 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
98 /* any access to the tbs or the page table must use this lock */
99 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
101 #if defined(__arm__) || defined(__sparc_v9__)
102 /* The prologue must be reachable with a direct jump. ARM and Sparc64
103 have limited branch ranges (possibly also PPC) so place it in a
104 section close to code segment. */
105 #define code_gen_section \
106 __attribute__((__section__(".gen_code"))) \
107 __attribute__((aligned (32)))
109 #define code_gen_section \
110 __attribute__((aligned (32)))
113 uint8_t code_gen_prologue
[1024] code_gen_section
;
114 static uint8_t *code_gen_buffer
;
115 static unsigned long code_gen_buffer_size
;
116 /* threshold to flush the translated code buffer */
117 static unsigned long code_gen_buffer_max_size
;
118 uint8_t *code_gen_ptr
;
120 #if !defined(CONFIG_USER_ONLY)
121 ram_addr_t phys_ram_size
;
123 uint8_t *phys_ram_base
;
124 uint8_t *phys_ram_dirty
;
126 static int in_migration
;
127 static ram_addr_t phys_ram_alloc_offset
= 0;
131 /* current CPU in the current thread. It is only valid inside
133 CPUState
*cpu_single_env
;
134 /* 0 = Do not count executed instructions.
135 1 = Precise instruction counting.
136 2 = Adaptive rate instruction counting. */
138 /* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
142 typedef struct PageDesc
{
143 /* list of TBs intersecting this ram page */
144 TranslationBlock
*first_tb
;
145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count
;
148 uint8_t *code_bitmap
;
149 #if defined(CONFIG_USER_ONLY)
154 typedef struct PhysPageDesc
{
155 /* offset in host memory of the page + io_index in the low bits */
156 ram_addr_t phys_offset
;
157 ram_addr_t region_offset
;
161 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162 /* XXX: this is a temporary hack for alpha target.
163 * In the future, this is to be replaced by a multi-level table
164 * to actually be able to handle the complete 64 bits address space.
166 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
171 #define L1_SIZE (1 << L1_BITS)
172 #define L2_SIZE (1 << L2_BITS)
174 unsigned long qemu_real_host_page_size
;
175 unsigned long qemu_host_page_bits
;
176 unsigned long qemu_host_page_size
;
177 unsigned long qemu_host_page_mask
;
179 /* XXX: for system emulation, it could just be an array */
180 static PageDesc
*l1_map
[L1_SIZE
];
181 static PhysPageDesc
**l1_phys_map
;
183 #if !defined(CONFIG_USER_ONLY)
184 static void io_mem_init(void);
186 /* io memory support */
187 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
188 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
189 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
190 char io_mem_used
[IO_MEM_NB_ENTRIES
];
191 static int io_mem_watch
;
195 static const char *logfilename
= "/tmp/qemu.log";
198 static int log_append
= 0;
201 static int tlb_flush_count
;
202 static int tb_flush_count
;
203 static int tb_phys_invalidate_count
;
205 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
206 typedef struct subpage_t
{
207 target_phys_addr_t base
;
208 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
209 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
210 void *opaque
[TARGET_PAGE_SIZE
][2][4];
211 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
215 static void map_exec(void *addr
, long size
)
218 VirtualProtect(addr
, size
,
219 PAGE_EXECUTE_READWRITE
, &old_protect
);
223 static void map_exec(void *addr
, long size
)
225 unsigned long start
, end
, page_size
;
227 page_size
= getpagesize();
228 start
= (unsigned long)addr
;
229 start
&= ~(page_size
- 1);
231 end
= (unsigned long)addr
+ size
;
232 end
+= page_size
- 1;
233 end
&= ~(page_size
- 1);
235 mprotect((void *)start
, end
- start
,
236 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
240 static void page_init(void)
242 /* NOTE: we can always suppose that qemu_host_page_size >=
246 SYSTEM_INFO system_info
;
248 GetSystemInfo(&system_info
);
249 qemu_real_host_page_size
= system_info
.dwPageSize
;
252 qemu_real_host_page_size
= getpagesize();
254 if (qemu_host_page_size
== 0)
255 qemu_host_page_size
= qemu_real_host_page_size
;
256 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
257 qemu_host_page_size
= TARGET_PAGE_SIZE
;
258 qemu_host_page_bits
= 0;
259 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
260 qemu_host_page_bits
++;
261 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
262 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
263 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
265 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
267 long long startaddr
, endaddr
;
272 last_brk
= (unsigned long)sbrk(0);
273 f
= fopen("/proc/self/maps", "r");
276 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
278 startaddr
= MIN(startaddr
,
279 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
280 endaddr
= MIN(endaddr
,
281 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
282 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
283 TARGET_PAGE_ALIGN(endaddr
),
294 static inline PageDesc
**page_l1_map(target_ulong index
)
296 #if TARGET_LONG_BITS > 32
297 /* Host memory outside guest VM. For 32-bit targets we have already
298 excluded high addresses. */
299 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
302 return &l1_map
[index
>> L2_BITS
];
305 static inline PageDesc
*page_find_alloc(target_ulong index
)
308 lp
= page_l1_map(index
);
314 /* allocate if not found */
315 #if defined(CONFIG_USER_ONLY)
316 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
317 /* Don't use qemu_malloc because it may recurse. */
318 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
319 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
322 unsigned long addr
= h2g(p
);
323 page_set_flags(addr
& TARGET_PAGE_MASK
,
324 TARGET_PAGE_ALIGN(addr
+ len
),
328 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
332 return p
+ (index
& (L2_SIZE
- 1));
335 static inline PageDesc
*page_find(target_ulong index
)
338 lp
= page_l1_map(index
);
345 return p
+ (index
& (L2_SIZE
- 1));
348 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
353 p
= (void **)l1_phys_map
;
354 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
356 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
357 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
362 /* allocate if not found */
365 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
366 memset(p
, 0, sizeof(void *) * L1_SIZE
);
370 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
374 /* allocate if not found */
377 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
379 for (i
= 0; i
< L2_SIZE
; i
++) {
380 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
381 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
384 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
387 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
389 return phys_page_find_alloc(index
, 0);
392 #if !defined(CONFIG_USER_ONLY)
393 static void tlb_protect_code(ram_addr_t ram_addr
);
394 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
396 #define mmap_lock() do { } while(0)
397 #define mmap_unlock() do { } while(0)
400 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
402 #if defined(CONFIG_USER_ONLY)
403 /* Currently it is not recommanded to allocate big chunks of data in
404 user mode. It will change when a dedicated libc will be used */
405 #define USE_STATIC_CODE_GEN_BUFFER
408 #ifdef USE_STATIC_CODE_GEN_BUFFER
409 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
412 static void code_gen_alloc(unsigned long tb_size
)
417 #ifdef USE_STATIC_CODE_GEN_BUFFER
418 code_gen_buffer
= static_code_gen_buffer
;
419 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
420 map_exec(code_gen_buffer
, code_gen_buffer_size
);
422 code_gen_buffer_size
= tb_size
;
423 if (code_gen_buffer_size
== 0) {
424 #if defined(CONFIG_USER_ONLY)
425 /* in user mode, phys_ram_size is not meaningful */
426 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
428 /* XXX: needs ajustments */
429 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
432 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
433 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
434 /* The code gen buffer location may have constraints depending on
435 the host cpu and OS */
436 #if defined(__linux__)
441 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
442 #if defined(__x86_64__)
444 /* Cannot map more than that */
445 if (code_gen_buffer_size
> (800 * 1024 * 1024))
446 code_gen_buffer_size
= (800 * 1024 * 1024);
447 #elif defined(__sparc_v9__)
448 // Map the buffer below 2G, so we can use direct calls and branches
450 start
= (void *) 0x60000000UL
;
451 if (code_gen_buffer_size
> (512 * 1024 * 1024))
452 code_gen_buffer_size
= (512 * 1024 * 1024);
453 #elif defined(__arm__)
454 /* Map the buffer below 32M, so we can use direct calls and branches */
456 start
= (void *) 0x01000000UL
;
457 if (code_gen_buffer_size
> 16 * 1024 * 1024)
458 code_gen_buffer_size
= 16 * 1024 * 1024;
460 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
461 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
463 if (code_gen_buffer
== MAP_FAILED
) {
464 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
468 #elif defined(__FreeBSD__)
472 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
473 #if defined(__x86_64__)
474 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
475 * 0x40000000 is free */
477 addr
= (void *)0x40000000;
478 /* Cannot map more than that */
479 if (code_gen_buffer_size
> (800 * 1024 * 1024))
480 code_gen_buffer_size
= (800 * 1024 * 1024);
482 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
483 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
485 if (code_gen_buffer
== MAP_FAILED
) {
486 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
491 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
492 map_exec(code_gen_buffer
, code_gen_buffer_size
);
494 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
495 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
496 code_gen_buffer_max_size
= code_gen_buffer_size
-
497 code_gen_max_block_size();
498 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
499 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
502 /* Must be called before using the QEMU cpus. 'tb_size' is the size
503 (in bytes) allocated to the translation buffer. Zero means default
505 void cpu_exec_init_all(unsigned long tb_size
)
508 code_gen_alloc(tb_size
);
509 code_gen_ptr
= code_gen_buffer
;
511 #if !defined(CONFIG_USER_ONLY)
516 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
518 #define CPU_COMMON_SAVE_VERSION 1
520 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
522 CPUState
*env
= opaque
;
524 qemu_put_be32s(f
, &env
->halted
);
525 qemu_put_be32s(f
, &env
->interrupt_request
);
528 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
530 CPUState
*env
= opaque
;
532 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
535 qemu_get_be32s(f
, &env
->halted
);
536 qemu_get_be32s(f
, &env
->interrupt_request
);
537 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
544 void cpu_exec_init(CPUState
*env
)
549 env
->next_cpu
= NULL
;
552 while (*penv
!= NULL
) {
553 penv
= (CPUState
**)&(*penv
)->next_cpu
;
556 env
->cpu_index
= cpu_index
;
557 TAILQ_INIT(&env
->breakpoints
);
558 TAILQ_INIT(&env
->watchpoints
);
560 env
->thread_id
= GetCurrentProcessId();
562 env
->thread_id
= getpid();
565 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
566 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
567 cpu_common_save
, cpu_common_load
, env
);
568 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
569 cpu_save
, cpu_load
, env
);
573 static inline void invalidate_page_bitmap(PageDesc
*p
)
575 if (p
->code_bitmap
) {
576 qemu_free(p
->code_bitmap
);
577 p
->code_bitmap
= NULL
;
579 p
->code_write_count
= 0;
582 /* set to NULL all the 'first_tb' fields in all PageDescs */
583 static void page_flush_tb(void)
588 for(i
= 0; i
< L1_SIZE
; i
++) {
591 for(j
= 0; j
< L2_SIZE
; j
++) {
593 invalidate_page_bitmap(p
);
600 /* flush all the translation blocks */
601 /* XXX: tb_flush is currently not thread safe */
602 void tb_flush(CPUState
*env1
)
605 #if defined(DEBUG_FLUSH)
606 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
607 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
609 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
611 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
612 cpu_abort(env1
, "Internal error: code buffer overflow\n");
616 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
617 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
620 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
623 code_gen_ptr
= code_gen_buffer
;
624 /* XXX: flush processor icache at this point if cache flush is
629 #ifdef DEBUG_TB_CHECK
631 static void tb_invalidate_check(target_ulong address
)
633 TranslationBlock
*tb
;
635 address
&= TARGET_PAGE_MASK
;
636 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
637 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
638 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
639 address
>= tb
->pc
+ tb
->size
)) {
640 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
641 address
, (long)tb
->pc
, tb
->size
);
647 /* verify that all the pages have correct rights for code */
648 static void tb_page_check(void)
650 TranslationBlock
*tb
;
651 int i
, flags1
, flags2
;
653 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
654 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
655 flags1
= page_get_flags(tb
->pc
);
656 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
657 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
658 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
659 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
665 static void tb_jmp_check(TranslationBlock
*tb
)
667 TranslationBlock
*tb1
;
670 /* suppress any remaining jumps to this TB */
674 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
677 tb1
= tb1
->jmp_next
[n1
];
679 /* check end of list */
681 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
687 /* invalidate one TB */
688 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
691 TranslationBlock
*tb1
;
695 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
698 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
702 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
704 TranslationBlock
*tb1
;
710 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
712 *ptb
= tb1
->page_next
[n1
];
715 ptb
= &tb1
->page_next
[n1
];
719 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
721 TranslationBlock
*tb1
, **ptb
;
724 ptb
= &tb
->jmp_next
[n
];
727 /* find tb(n) in circular list */
731 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
732 if (n1
== n
&& tb1
== tb
)
735 ptb
= &tb1
->jmp_first
;
737 ptb
= &tb1
->jmp_next
[n1
];
740 /* now we can suppress tb(n) from the list */
741 *ptb
= tb
->jmp_next
[n
];
743 tb
->jmp_next
[n
] = NULL
;
747 /* reset the jump entry 'n' of a TB so that it is not chained to
749 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
751 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
754 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
759 target_phys_addr_t phys_pc
;
760 TranslationBlock
*tb1
, *tb2
;
762 /* remove the TB from the hash list */
763 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
764 h
= tb_phys_hash_func(phys_pc
);
765 tb_remove(&tb_phys_hash
[h
], tb
,
766 offsetof(TranslationBlock
, phys_hash_next
));
768 /* remove the TB from the page list */
769 if (tb
->page_addr
[0] != page_addr
) {
770 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
771 tb_page_remove(&p
->first_tb
, tb
);
772 invalidate_page_bitmap(p
);
774 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
775 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
776 tb_page_remove(&p
->first_tb
, tb
);
777 invalidate_page_bitmap(p
);
780 tb_invalidated_flag
= 1;
782 /* remove the TB from the hash list */
783 h
= tb_jmp_cache_hash_func(tb
->pc
);
784 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
785 if (env
->tb_jmp_cache
[h
] == tb
)
786 env
->tb_jmp_cache
[h
] = NULL
;
789 /* suppress this TB from the two jump lists */
790 tb_jmp_remove(tb
, 0);
791 tb_jmp_remove(tb
, 1);
793 /* suppress any remaining jumps to this TB */
799 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
800 tb2
= tb1
->jmp_next
[n1
];
801 tb_reset_jump(tb1
, n1
);
802 tb1
->jmp_next
[n1
] = NULL
;
805 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
807 tb_phys_invalidate_count
++;
810 static inline void set_bits(uint8_t *tab
, int start
, int len
)
816 mask
= 0xff << (start
& 7);
817 if ((start
& ~7) == (end
& ~7)) {
819 mask
&= ~(0xff << (end
& 7));
824 start
= (start
+ 8) & ~7;
826 while (start
< end1
) {
831 mask
= ~(0xff << (end
& 7));
837 static void build_page_bitmap(PageDesc
*p
)
839 int n
, tb_start
, tb_end
;
840 TranslationBlock
*tb
;
842 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
847 tb
= (TranslationBlock
*)((long)tb
& ~3);
848 /* NOTE: this is subtle as a TB may span two physical pages */
850 /* NOTE: tb_end may be after the end of the page, but
851 it is not a problem */
852 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
853 tb_end
= tb_start
+ tb
->size
;
854 if (tb_end
> TARGET_PAGE_SIZE
)
855 tb_end
= TARGET_PAGE_SIZE
;
858 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
860 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
861 tb
= tb
->page_next
[n
];
865 TranslationBlock
*tb_gen_code(CPUState
*env
,
866 target_ulong pc
, target_ulong cs_base
,
867 int flags
, int cflags
)
869 TranslationBlock
*tb
;
871 target_ulong phys_pc
, phys_page2
, virt_page2
;
874 phys_pc
= get_phys_addr_code(env
, pc
);
877 /* flush must be done */
879 /* cannot fail at this point */
881 /* Don't forget to invalidate previous TB info. */
882 tb_invalidated_flag
= 1;
884 tc_ptr
= code_gen_ptr
;
886 tb
->cs_base
= cs_base
;
889 cpu_gen_code(env
, tb
, &code_gen_size
);
890 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
892 /* check next page if needed */
893 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
895 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
896 phys_page2
= get_phys_addr_code(env
, virt_page2
);
898 tb_link_phys(tb
, phys_pc
, phys_page2
);
902 /* invalidate all TBs which intersect with the target physical page
903 starting in range [start;end[. NOTE: start and end must refer to
904 the same physical page. 'is_cpu_write_access' should be true if called
905 from a real cpu write access: the virtual CPU will exit the current
906 TB if code is modified inside this TB. */
907 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
908 int is_cpu_write_access
)
910 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
911 CPUState
*env
= cpu_single_env
;
912 target_ulong tb_start
, tb_end
;
915 #ifdef TARGET_HAS_PRECISE_SMC
916 int current_tb_not_found
= is_cpu_write_access
;
917 TranslationBlock
*current_tb
= NULL
;
918 int current_tb_modified
= 0;
919 target_ulong current_pc
= 0;
920 target_ulong current_cs_base
= 0;
921 int current_flags
= 0;
922 #endif /* TARGET_HAS_PRECISE_SMC */
924 p
= page_find(start
>> TARGET_PAGE_BITS
);
927 if (!p
->code_bitmap
&&
928 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
929 is_cpu_write_access
) {
930 /* build code bitmap */
931 build_page_bitmap(p
);
934 /* we remove all the TBs in the range [start, end[ */
935 /* XXX: see if in some cases it could be faster to invalidate all the code */
939 tb
= (TranslationBlock
*)((long)tb
& ~3);
940 tb_next
= tb
->page_next
[n
];
941 /* NOTE: this is subtle as a TB may span two physical pages */
943 /* NOTE: tb_end may be after the end of the page, but
944 it is not a problem */
945 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
946 tb_end
= tb_start
+ tb
->size
;
948 tb_start
= tb
->page_addr
[1];
949 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
951 if (!(tb_end
<= start
|| tb_start
>= end
)) {
952 #ifdef TARGET_HAS_PRECISE_SMC
953 if (current_tb_not_found
) {
954 current_tb_not_found
= 0;
956 if (env
->mem_io_pc
) {
957 /* now we have a real cpu fault */
958 current_tb
= tb_find_pc(env
->mem_io_pc
);
961 if (current_tb
== tb
&&
962 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
963 /* If we are modifying the current TB, we must stop
964 its execution. We could be more precise by checking
965 that the modification is after the current PC, but it
966 would require a specialized function to partially
967 restore the CPU state */
969 current_tb_modified
= 1;
970 cpu_restore_state(current_tb
, env
,
971 env
->mem_io_pc
, NULL
);
972 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
975 #endif /* TARGET_HAS_PRECISE_SMC */
976 /* we need to do that to handle the case where a signal
977 occurs while doing tb_phys_invalidate() */
980 saved_tb
= env
->current_tb
;
981 env
->current_tb
= NULL
;
983 tb_phys_invalidate(tb
, -1);
985 env
->current_tb
= saved_tb
;
986 if (env
->interrupt_request
&& env
->current_tb
)
987 cpu_interrupt(env
, env
->interrupt_request
);
992 #if !defined(CONFIG_USER_ONLY)
993 /* if no code remaining, no need to continue to use slow writes */
995 invalidate_page_bitmap(p
);
996 if (is_cpu_write_access
) {
997 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1001 #ifdef TARGET_HAS_PRECISE_SMC
1002 if (current_tb_modified
) {
1003 /* we generate a block containing just the instruction
1004 modifying the memory. It will ensure that it cannot modify
1006 env
->current_tb
= NULL
;
1007 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1008 cpu_resume_from_signal(env
, NULL
);
1013 /* len must be <= 8 and start must be a multiple of len */
1014 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1020 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1021 cpu_single_env
->mem_io_vaddr
, len
,
1022 cpu_single_env
->eip
,
1023 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1026 p
= page_find(start
>> TARGET_PAGE_BITS
);
1029 if (p
->code_bitmap
) {
1030 offset
= start
& ~TARGET_PAGE_MASK
;
1031 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1032 if (b
& ((1 << len
) - 1))
1036 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1040 #if !defined(CONFIG_SOFTMMU)
1041 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1042 unsigned long pc
, void *puc
)
1044 TranslationBlock
*tb
;
1047 #ifdef TARGET_HAS_PRECISE_SMC
1048 TranslationBlock
*current_tb
= NULL
;
1049 CPUState
*env
= cpu_single_env
;
1050 int current_tb_modified
= 0;
1051 target_ulong current_pc
= 0;
1052 target_ulong current_cs_base
= 0;
1053 int current_flags
= 0;
1056 addr
&= TARGET_PAGE_MASK
;
1057 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1061 #ifdef TARGET_HAS_PRECISE_SMC
1062 if (tb
&& pc
!= 0) {
1063 current_tb
= tb_find_pc(pc
);
1066 while (tb
!= NULL
) {
1068 tb
= (TranslationBlock
*)((long)tb
& ~3);
1069 #ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb
== tb
&&
1071 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1072 /* If we are modifying the current TB, we must stop
1073 its execution. We could be more precise by checking
1074 that the modification is after the current PC, but it
1075 would require a specialized function to partially
1076 restore the CPU state */
1078 current_tb_modified
= 1;
1079 cpu_restore_state(current_tb
, env
, pc
, puc
);
1080 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1083 #endif /* TARGET_HAS_PRECISE_SMC */
1084 tb_phys_invalidate(tb
, addr
);
1085 tb
= tb
->page_next
[n
];
1088 #ifdef TARGET_HAS_PRECISE_SMC
1089 if (current_tb_modified
) {
1090 /* we generate a block containing just the instruction
1091 modifying the memory. It will ensure that it cannot modify
1093 env
->current_tb
= NULL
;
1094 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1095 cpu_resume_from_signal(env
, puc
);
1101 /* add the tb in the target page and protect it if necessary */
1102 static inline void tb_alloc_page(TranslationBlock
*tb
,
1103 unsigned int n
, target_ulong page_addr
)
1106 TranslationBlock
*last_first_tb
;
1108 tb
->page_addr
[n
] = page_addr
;
1109 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1110 tb
->page_next
[n
] = p
->first_tb
;
1111 last_first_tb
= p
->first_tb
;
1112 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1113 invalidate_page_bitmap(p
);
1115 #if defined(TARGET_HAS_SMC) || 1
1117 #if defined(CONFIG_USER_ONLY)
1118 if (p
->flags
& PAGE_WRITE
) {
1123 /* force the host page as non writable (writes will have a
1124 page fault + mprotect overhead) */
1125 page_addr
&= qemu_host_page_mask
;
1127 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1128 addr
+= TARGET_PAGE_SIZE
) {
1130 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1134 p2
->flags
&= ~PAGE_WRITE
;
1135 page_get_flags(addr
);
1137 mprotect(g2h(page_addr
), qemu_host_page_size
,
1138 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1139 #ifdef DEBUG_TB_INVALIDATE
1140 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1145 /* if some code is already present, then the pages are already
1146 protected. So we handle the case where only the first TB is
1147 allocated in a physical page */
1148 if (!last_first_tb
) {
1149 tlb_protect_code(page_addr
);
1153 #endif /* TARGET_HAS_SMC */
1156 /* Allocate a new translation block. Flush the translation buffer if
1157 too many translation blocks or too much generated code. */
1158 TranslationBlock
*tb_alloc(target_ulong pc
)
1160 TranslationBlock
*tb
;
1162 if (nb_tbs
>= code_gen_max_blocks
||
1163 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1165 tb
= &tbs
[nb_tbs
++];
1171 void tb_free(TranslationBlock
*tb
)
1173 /* In practice this is mostly used for single use temporary TB
1174 Ignore the hard cases and just back up if this TB happens to
1175 be the last one generated. */
1176 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1177 code_gen_ptr
= tb
->tc_ptr
;
1182 /* add a new TB and link it to the physical page tables. phys_page2 is
1183 (-1) to indicate that only one page contains the TB. */
1184 void tb_link_phys(TranslationBlock
*tb
,
1185 target_ulong phys_pc
, target_ulong phys_page2
)
1188 TranslationBlock
**ptb
;
1190 /* Grab the mmap lock to stop another thread invalidating this TB
1191 before we are done. */
1193 /* add in the physical hash table */
1194 h
= tb_phys_hash_func(phys_pc
);
1195 ptb
= &tb_phys_hash
[h
];
1196 tb
->phys_hash_next
= *ptb
;
1199 /* add in the page list */
1200 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1201 if (phys_page2
!= -1)
1202 tb_alloc_page(tb
, 1, phys_page2
);
1204 tb
->page_addr
[1] = -1;
1206 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1207 tb
->jmp_next
[0] = NULL
;
1208 tb
->jmp_next
[1] = NULL
;
1210 /* init original jump addresses */
1211 if (tb
->tb_next_offset
[0] != 0xffff)
1212 tb_reset_jump(tb
, 0);
1213 if (tb
->tb_next_offset
[1] != 0xffff)
1214 tb_reset_jump(tb
, 1);
1216 #ifdef DEBUG_TB_CHECK
1222 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1223 tb[1].tc_ptr. Return NULL if not found */
1224 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1226 int m_min
, m_max
, m
;
1228 TranslationBlock
*tb
;
1232 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1233 tc_ptr
>= (unsigned long)code_gen_ptr
)
1235 /* binary search (cf Knuth) */
1238 while (m_min
<= m_max
) {
1239 m
= (m_min
+ m_max
) >> 1;
1241 v
= (unsigned long)tb
->tc_ptr
;
1244 else if (tc_ptr
< v
) {
1253 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1255 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1257 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1260 tb1
= tb
->jmp_next
[n
];
1262 /* find head of list */
1265 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1268 tb1
= tb1
->jmp_next
[n1
];
1270 /* we are now sure now that tb jumps to tb1 */
1273 /* remove tb from the jmp_first list */
1274 ptb
= &tb_next
->jmp_first
;
1278 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1279 if (n1
== n
&& tb1
== tb
)
1281 ptb
= &tb1
->jmp_next
[n1
];
1283 *ptb
= tb
->jmp_next
[n
];
1284 tb
->jmp_next
[n
] = NULL
;
1286 /* suppress the jump to next tb in generated code */
1287 tb_reset_jump(tb
, n
);
1289 /* suppress jumps in the tb on which we could have jumped */
1290 tb_reset_jump_recursive(tb_next
);
1294 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1296 tb_reset_jump_recursive2(tb
, 0);
1297 tb_reset_jump_recursive2(tb
, 1);
1300 #if defined(TARGET_HAS_ICE)
1301 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1303 target_phys_addr_t addr
;
1305 ram_addr_t ram_addr
;
1308 addr
= cpu_get_phys_page_debug(env
, pc
);
1309 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1311 pd
= IO_MEM_UNASSIGNED
;
1313 pd
= p
->phys_offset
;
1315 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1316 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1320 /* Add a watchpoint. */
1321 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1322 int flags
, CPUWatchpoint
**watchpoint
)
1324 target_ulong len_mask
= ~(len
- 1);
1327 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1328 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1329 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1330 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1333 wp
= qemu_malloc(sizeof(*wp
));
1336 wp
->len_mask
= len_mask
;
1339 /* keep all GDB-injected watchpoints in front */
1341 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1343 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1345 tlb_flush_page(env
, addr
);
1352 /* Remove a specific watchpoint. */
1353 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1356 target_ulong len_mask
= ~(len
- 1);
1359 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1360 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1361 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1362 cpu_watchpoint_remove_by_ref(env
, wp
);
1369 /* Remove a specific watchpoint by reference. */
1370 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1372 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1374 tlb_flush_page(env
, watchpoint
->vaddr
);
1376 qemu_free(watchpoint
);
1379 /* Remove all matching watchpoints. */
1380 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1382 CPUWatchpoint
*wp
, *next
;
1384 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1385 if (wp
->flags
& mask
)
1386 cpu_watchpoint_remove_by_ref(env
, wp
);
1390 /* Add a breakpoint. */
1391 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1392 CPUBreakpoint
**breakpoint
)
1394 #if defined(TARGET_HAS_ICE)
1397 bp
= qemu_malloc(sizeof(*bp
));
1402 /* keep all GDB-injected breakpoints in front */
1404 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1406 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1408 breakpoint_invalidate(env
, pc
);
1418 /* Remove a specific breakpoint. */
1419 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1421 #if defined(TARGET_HAS_ICE)
1424 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1425 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1426 cpu_breakpoint_remove_by_ref(env
, bp
);
1436 /* Remove a specific breakpoint by reference. */
1437 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1439 #if defined(TARGET_HAS_ICE)
1440 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1442 breakpoint_invalidate(env
, breakpoint
->pc
);
1444 qemu_free(breakpoint
);
1448 /* Remove all matching breakpoints. */
1449 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1451 #if defined(TARGET_HAS_ICE)
1452 CPUBreakpoint
*bp
, *next
;
1454 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1455 if (bp
->flags
& mask
)
1456 cpu_breakpoint_remove_by_ref(env
, bp
);
1461 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1462 CPU loop after each instruction */
1463 void cpu_single_step(CPUState
*env
, int enabled
)
1465 #if defined(TARGET_HAS_ICE)
1466 if (env
->singlestep_enabled
!= enabled
) {
1467 env
->singlestep_enabled
= enabled
;
1469 kvm_update_guest_debug(env
, 0);
1471 /* must flush all the translated code to avoid inconsistancies */
1472 /* XXX: only flush what is necessary */
1479 /* enable or disable low levels log */
1480 void cpu_set_log(int log_flags
)
1482 loglevel
= log_flags
;
1483 if (loglevel
&& !logfile
) {
1484 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1486 perror(logfilename
);
1489 #if !defined(CONFIG_SOFTMMU)
1490 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1492 static char logfile_buf
[4096];
1493 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1496 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1500 if (!loglevel
&& logfile
) {
1506 void cpu_set_log_filename(const char *filename
)
1508 logfilename
= strdup(filename
);
1513 cpu_set_log(loglevel
);
1516 /* mask must never be zero, except for A20 change call */
1517 void cpu_interrupt(CPUState
*env
, int mask
)
1519 #if !defined(USE_NPTL)
1520 TranslationBlock
*tb
;
1521 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1525 if (mask
& CPU_INTERRUPT_EXIT
) {
1526 env
->exit_request
= 1;
1527 mask
&= ~CPU_INTERRUPT_EXIT
;
1530 old_mask
= env
->interrupt_request
;
1531 env
->interrupt_request
|= mask
;
1532 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1533 kvm_update_interrupt_request(env
);
1534 #if defined(USE_NPTL)
1535 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1536 problem and hope the cpu will stop of its own accord. For userspace
1537 emulation this often isn't actually as bad as it sounds. Often
1538 signals are used primarily to interrupt blocking syscalls. */
1541 env
->icount_decr
.u16
.high
= 0xffff;
1542 #ifndef CONFIG_USER_ONLY
1544 && (mask
& ~old_mask
) != 0) {
1545 cpu_abort(env
, "Raised interrupt while not in I/O function");
1549 tb
= env
->current_tb
;
1550 /* if the cpu is currently executing code, we must unlink it and
1551 all the potentially executing TB */
1552 if (tb
&& !testandset(&interrupt_lock
)) {
1553 env
->current_tb
= NULL
;
1554 tb_reset_jump_recursive(tb
);
1555 resetlock(&interrupt_lock
);
1561 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1563 env
->interrupt_request
&= ~mask
;
1566 const CPULogItem cpu_log_items
[] = {
1567 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1568 "show generated host assembly code for each compiled TB" },
1569 { CPU_LOG_TB_IN_ASM
, "in_asm",
1570 "show target assembly code for each compiled TB" },
1571 { CPU_LOG_TB_OP
, "op",
1572 "show micro ops for each compiled TB" },
1573 { CPU_LOG_TB_OP_OPT
, "op_opt",
1576 "before eflags optimization and "
1578 "after liveness analysis" },
1579 { CPU_LOG_INT
, "int",
1580 "show interrupts/exceptions in short format" },
1581 { CPU_LOG_EXEC
, "exec",
1582 "show trace before each executed TB (lots of logs)" },
1583 { CPU_LOG_TB_CPU
, "cpu",
1584 "show CPU state before block translation" },
1586 { CPU_LOG_PCALL
, "pcall",
1587 "show protected mode far calls/returns/exceptions" },
1588 { CPU_LOG_RESET
, "cpu_reset",
1589 "show CPU state before CPU resets" },
1592 { CPU_LOG_IOPORT
, "ioport",
1593 "show all i/o ports accesses" },
1598 static int cmp1(const char *s1
, int n
, const char *s2
)
1600 if (strlen(s2
) != n
)
1602 return memcmp(s1
, s2
, n
) == 0;
1605 /* takes a comma separated list of log masks. Return 0 if error. */
1606 int cpu_str_to_log_mask(const char *str
)
1608 const CPULogItem
*item
;
1615 p1
= strchr(p
, ',');
1618 if(cmp1(p
,p1
-p
,"all")) {
1619 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1623 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1624 if (cmp1(p
, p1
- p
, item
->name
))
1638 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1645 fprintf(stderr
, "qemu: fatal: ");
1646 vfprintf(stderr
, fmt
, ap
);
1647 fprintf(stderr
, "\n");
1649 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1651 cpu_dump_state(env
, stderr
, fprintf
, 0);
1653 if (qemu_log_enabled()) {
1654 qemu_log("qemu: fatal: ");
1655 qemu_log_vprintf(fmt
, ap2
);
1658 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1660 log_cpu_state(env
, 0);
1670 CPUState
*cpu_copy(CPUState
*env
)
1672 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1673 CPUState
*next_cpu
= new_env
->next_cpu
;
1674 int cpu_index
= new_env
->cpu_index
;
1675 #if defined(TARGET_HAS_ICE)
1680 memcpy(new_env
, env
, sizeof(CPUState
));
1682 /* Preserve chaining and index. */
1683 new_env
->next_cpu
= next_cpu
;
1684 new_env
->cpu_index
= cpu_index
;
1686 /* Clone all break/watchpoints.
1687 Note: Once we support ptrace with hw-debug register access, make sure
1688 BP_CPU break/watchpoints are handled correctly on clone. */
1689 TAILQ_INIT(&env
->breakpoints
);
1690 TAILQ_INIT(&env
->watchpoints
);
1691 #if defined(TARGET_HAS_ICE)
1692 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1693 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1695 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1696 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1704 #if !defined(CONFIG_USER_ONLY)
1706 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1710 /* Discard jump cache entries for any tb which might potentially
1711 overlap the flushed page. */
1712 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1713 memset (&env
->tb_jmp_cache
[i
], 0,
1714 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1716 i
= tb_jmp_cache_hash_page(addr
);
1717 memset (&env
->tb_jmp_cache
[i
], 0,
1718 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1721 /* NOTE: if flush_global is true, also flush global entries (not
1723 void tlb_flush(CPUState
*env
, int flush_global
)
1727 #if defined(DEBUG_TLB)
1728 printf("tlb_flush:\n");
1730 /* must reset current TB so that interrupts cannot modify the
1731 links while we are modifying them */
1732 env
->current_tb
= NULL
;
1734 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1735 env
->tlb_table
[0][i
].addr_read
= -1;
1736 env
->tlb_table
[0][i
].addr_write
= -1;
1737 env
->tlb_table
[0][i
].addr_code
= -1;
1738 env
->tlb_table
[1][i
].addr_read
= -1;
1739 env
->tlb_table
[1][i
].addr_write
= -1;
1740 env
->tlb_table
[1][i
].addr_code
= -1;
1741 #if (NB_MMU_MODES >= 3)
1742 env
->tlb_table
[2][i
].addr_read
= -1;
1743 env
->tlb_table
[2][i
].addr_write
= -1;
1744 env
->tlb_table
[2][i
].addr_code
= -1;
1745 #if (NB_MMU_MODES == 4)
1746 env
->tlb_table
[3][i
].addr_read
= -1;
1747 env
->tlb_table
[3][i
].addr_write
= -1;
1748 env
->tlb_table
[3][i
].addr_code
= -1;
1753 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1756 if (env
->kqemu_enabled
) {
1757 kqemu_flush(env
, flush_global
);
1763 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1765 if (addr
== (tlb_entry
->addr_read
&
1766 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1767 addr
== (tlb_entry
->addr_write
&
1768 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1769 addr
== (tlb_entry
->addr_code
&
1770 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1771 tlb_entry
->addr_read
= -1;
1772 tlb_entry
->addr_write
= -1;
1773 tlb_entry
->addr_code
= -1;
1777 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1781 #if defined(DEBUG_TLB)
1782 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1784 /* must reset current TB so that interrupts cannot modify the
1785 links while we are modifying them */
1786 env
->current_tb
= NULL
;
1788 addr
&= TARGET_PAGE_MASK
;
1789 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1790 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1791 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1792 #if (NB_MMU_MODES >= 3)
1793 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1794 #if (NB_MMU_MODES == 4)
1795 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1799 tlb_flush_jmp_cache(env
, addr
);
1802 if (env
->kqemu_enabled
) {
1803 kqemu_flush_page(env
, addr
);
1808 /* update the TLBs so that writes to code in the virtual page 'addr'
1810 static void tlb_protect_code(ram_addr_t ram_addr
)
1812 cpu_physical_memory_reset_dirty(ram_addr
,
1813 ram_addr
+ TARGET_PAGE_SIZE
,
1817 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1818 tested for self modifying code */
1819 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1822 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1825 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1826 unsigned long start
, unsigned long length
)
1829 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1830 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1831 if ((addr
- start
) < length
) {
1832 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1837 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1841 unsigned long length
, start1
;
1845 start
&= TARGET_PAGE_MASK
;
1846 end
= TARGET_PAGE_ALIGN(end
);
1848 length
= end
- start
;
1851 len
= length
>> TARGET_PAGE_BITS
;
1853 /* XXX: should not depend on cpu context */
1855 if (env
->kqemu_enabled
) {
1858 for(i
= 0; i
< len
; i
++) {
1859 kqemu_set_notdirty(env
, addr
);
1860 addr
+= TARGET_PAGE_SIZE
;
1864 mask
= ~dirty_flags
;
1865 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1866 for(i
= 0; i
< len
; i
++)
1869 /* we modify the TLB cache so that the dirty bit will be set again
1870 when accessing the range */
1871 start1
= start
+ (unsigned long)phys_ram_base
;
1872 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1873 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1874 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1875 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1876 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1877 #if (NB_MMU_MODES >= 3)
1878 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1879 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1880 #if (NB_MMU_MODES == 4)
1881 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1882 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1888 int cpu_physical_memory_set_dirty_tracking(int enable
)
1893 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1894 in_migration
= enable
;
1898 int cpu_physical_memory_get_dirty_tracking(void)
1900 return in_migration
;
1903 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1906 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1909 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1911 ram_addr_t ram_addr
;
1913 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1914 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1915 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1916 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1917 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1922 /* update the TLB according to the current state of the dirty bits */
1923 void cpu_tlb_update_dirty(CPUState
*env
)
1926 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1927 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1928 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1929 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1930 #if (NB_MMU_MODES >= 3)
1931 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1932 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1933 #if (NB_MMU_MODES == 4)
1934 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1935 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1940 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1942 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1943 tlb_entry
->addr_write
= vaddr
;
1946 /* update the TLB corresponding to virtual page vaddr
1947 so that it is no longer dirty */
1948 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1952 vaddr
&= TARGET_PAGE_MASK
;
1953 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1954 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1955 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1956 #if (NB_MMU_MODES >= 3)
1957 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1958 #if (NB_MMU_MODES == 4)
1959 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1964 /* add a new TLB entry. At most one entry for a given virtual address
1965 is permitted. Return 0 if OK or 2 if the page could not be mapped
1966 (can only happen in non SOFTMMU mode for I/O pages or pages
1967 conflicting with the host address space). */
1968 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1969 target_phys_addr_t paddr
, int prot
,
1970 int mmu_idx
, int is_softmmu
)
1975 target_ulong address
;
1976 target_ulong code_address
;
1977 target_phys_addr_t addend
;
1981 target_phys_addr_t iotlb
;
1983 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1985 pd
= IO_MEM_UNASSIGNED
;
1987 pd
= p
->phys_offset
;
1989 #if defined(DEBUG_TLB)
1990 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1991 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1996 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1997 /* IO memory case (romd handled later) */
1998 address
|= TLB_MMIO
;
2000 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
2001 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2003 iotlb
= pd
& TARGET_PAGE_MASK
;
2004 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2005 iotlb
|= IO_MEM_NOTDIRTY
;
2007 iotlb
|= IO_MEM_ROM
;
2009 /* IO handlers are currently passed a phsical address.
2010 It would be nice to pass an offset from the base address
2011 of that region. This would avoid having to special case RAM,
2012 and avoid full address decoding in every device.
2013 We can't use the high bits of pd for this because
2014 IO_MEM_ROMD uses these as a ram address. */
2015 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2017 iotlb
+= p
->region_offset
;
2023 code_address
= address
;
2024 /* Make accesses to pages with watchpoints go via the
2025 watchpoint trap routines. */
2026 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2027 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2028 iotlb
= io_mem_watch
+ paddr
;
2029 /* TODO: The memory case can be optimized by not trapping
2030 reads of pages with a write breakpoint. */
2031 address
|= TLB_MMIO
;
2035 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2036 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2037 te
= &env
->tlb_table
[mmu_idx
][index
];
2038 te
->addend
= addend
- vaddr
;
2039 if (prot
& PAGE_READ
) {
2040 te
->addr_read
= address
;
2045 if (prot
& PAGE_EXEC
) {
2046 te
->addr_code
= code_address
;
2050 if (prot
& PAGE_WRITE
) {
2051 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2052 (pd
& IO_MEM_ROMD
)) {
2053 /* Write access calls the I/O callback. */
2054 te
->addr_write
= address
| TLB_MMIO
;
2055 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2056 !cpu_physical_memory_is_dirty(pd
)) {
2057 te
->addr_write
= address
| TLB_NOTDIRTY
;
2059 te
->addr_write
= address
;
2062 te
->addr_write
= -1;
2069 void tlb_flush(CPUState
*env
, int flush_global
)
2073 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2077 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2078 target_phys_addr_t paddr
, int prot
,
2079 int mmu_idx
, int is_softmmu
)
2084 /* dump memory mappings */
2085 void page_dump(FILE *f
)
2087 unsigned long start
, end
;
2088 int i
, j
, prot
, prot1
;
2091 fprintf(f
, "%-8s %-8s %-8s %s\n",
2092 "start", "end", "size", "prot");
2096 for(i
= 0; i
<= L1_SIZE
; i
++) {
2101 for(j
= 0;j
< L2_SIZE
; j
++) {
2106 if (prot1
!= prot
) {
2107 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2109 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2110 start
, end
, end
- start
,
2111 prot
& PAGE_READ
? 'r' : '-',
2112 prot
& PAGE_WRITE
? 'w' : '-',
2113 prot
& PAGE_EXEC
? 'x' : '-');
2127 int page_get_flags(target_ulong address
)
2131 p
= page_find(address
>> TARGET_PAGE_BITS
);
2137 /* modify the flags of a page and invalidate the code if
2138 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2139 depending on PAGE_WRITE */
2140 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2145 /* mmap_lock should already be held. */
2146 start
= start
& TARGET_PAGE_MASK
;
2147 end
= TARGET_PAGE_ALIGN(end
);
2148 if (flags
& PAGE_WRITE
)
2149 flags
|= PAGE_WRITE_ORG
;
2150 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2151 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2152 /* We may be called for host regions that are outside guest
2156 /* if the write protection is set, then we invalidate the code
2158 if (!(p
->flags
& PAGE_WRITE
) &&
2159 (flags
& PAGE_WRITE
) &&
2161 tb_invalidate_phys_page(addr
, 0, NULL
);
2167 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2173 if (start
+ len
< start
)
2174 /* we've wrapped around */
2177 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2178 start
= start
& TARGET_PAGE_MASK
;
2180 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2181 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2184 if( !(p
->flags
& PAGE_VALID
) )
2187 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2189 if (flags
& PAGE_WRITE
) {
2190 if (!(p
->flags
& PAGE_WRITE_ORG
))
2192 /* unprotect the page if it was put read-only because it
2193 contains translated code */
2194 if (!(p
->flags
& PAGE_WRITE
)) {
2195 if (!page_unprotect(addr
, 0, NULL
))
2204 /* called from signal handler: invalidate the code and unprotect the
2205 page. Return TRUE if the fault was succesfully handled. */
2206 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2208 unsigned int page_index
, prot
, pindex
;
2210 target_ulong host_start
, host_end
, addr
;
2212 /* Technically this isn't safe inside a signal handler. However we
2213 know this only ever happens in a synchronous SEGV handler, so in
2214 practice it seems to be ok. */
2217 host_start
= address
& qemu_host_page_mask
;
2218 page_index
= host_start
>> TARGET_PAGE_BITS
;
2219 p1
= page_find(page_index
);
2224 host_end
= host_start
+ qemu_host_page_size
;
2227 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2231 /* if the page was really writable, then we change its
2232 protection back to writable */
2233 if (prot
& PAGE_WRITE_ORG
) {
2234 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2235 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2236 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2237 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2238 p1
[pindex
].flags
|= PAGE_WRITE
;
2239 /* and since the content will be modified, we must invalidate
2240 the corresponding translated code. */
2241 tb_invalidate_phys_page(address
, pc
, puc
);
2242 #ifdef DEBUG_TB_CHECK
2243 tb_invalidate_check(address
);
2253 static inline void tlb_set_dirty(CPUState
*env
,
2254 unsigned long addr
, target_ulong vaddr
)
2257 #endif /* defined(CONFIG_USER_ONLY) */
2259 #if !defined(CONFIG_USER_ONLY)
2261 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2262 ram_addr_t memory
, ram_addr_t region_offset
);
2263 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2264 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2265 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2268 if (addr > start_addr) \
2271 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2272 if (start_addr2 > 0) \
2276 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2277 end_addr2 = TARGET_PAGE_SIZE - 1; \
2279 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2280 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2285 /* register physical memory. 'size' must be a multiple of the target
2286 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2287 io memory page. The address used when calling the IO function is
2288 the offset from the start of the region, plus region_offset. Both
2289 start_region and regon_offset are rounded down to a page boundary
2290 before calculating this offset. This should not be a problem unless
2291 the low bits of start_addr and region_offset differ. */
2292 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2294 ram_addr_t phys_offset
,
2295 ram_addr_t region_offset
)
2297 target_phys_addr_t addr
, end_addr
;
2300 ram_addr_t orig_size
= size
;
2304 /* XXX: should not depend on cpu context */
2306 if (env
->kqemu_enabled
) {
2307 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2311 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2313 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2314 region_offset
= start_addr
;
2316 region_offset
&= TARGET_PAGE_MASK
;
2317 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2318 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2319 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2320 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2321 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2322 ram_addr_t orig_memory
= p
->phys_offset
;
2323 target_phys_addr_t start_addr2
, end_addr2
;
2324 int need_subpage
= 0;
2326 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2328 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2329 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2330 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2331 &p
->phys_offset
, orig_memory
,
2334 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2337 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2339 p
->region_offset
= 0;
2341 p
->phys_offset
= phys_offset
;
2342 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2343 (phys_offset
& IO_MEM_ROMD
))
2344 phys_offset
+= TARGET_PAGE_SIZE
;
2347 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2348 p
->phys_offset
= phys_offset
;
2349 p
->region_offset
= region_offset
;
2350 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2351 (phys_offset
& IO_MEM_ROMD
)) {
2352 phys_offset
+= TARGET_PAGE_SIZE
;
2354 target_phys_addr_t start_addr2
, end_addr2
;
2355 int need_subpage
= 0;
2357 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2358 end_addr2
, need_subpage
);
2360 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2361 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2362 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2363 addr
& TARGET_PAGE_MASK
);
2364 subpage_register(subpage
, start_addr2
, end_addr2
,
2365 phys_offset
, region_offset
);
2366 p
->region_offset
= 0;
2370 region_offset
+= TARGET_PAGE_SIZE
;
2373 /* since each CPU stores ram addresses in its TLB cache, we must
2374 reset the modified entries */
2376 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2381 /* XXX: temporary until new memory mapping API */
2382 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2386 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2388 return IO_MEM_UNASSIGNED
;
2389 return p
->phys_offset
;
2392 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2395 kvm_coalesce_mmio_region(addr
, size
);
2398 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2401 kvm_uncoalesce_mmio_region(addr
, size
);
2404 /* XXX: better than nothing */
2405 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2408 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2409 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2410 (uint64_t)size
, (uint64_t)phys_ram_size
);
2413 addr
= phys_ram_alloc_offset
;
2414 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2418 void qemu_ram_free(ram_addr_t addr
)
2422 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2424 #ifdef DEBUG_UNASSIGNED
2425 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2427 #if defined(TARGET_SPARC)
2428 do_unassigned_access(addr
, 0, 0, 0, 1);
2433 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2435 #ifdef DEBUG_UNASSIGNED
2436 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2438 #if defined(TARGET_SPARC)
2439 do_unassigned_access(addr
, 0, 0, 0, 2);
2444 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2446 #ifdef DEBUG_UNASSIGNED
2447 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2449 #if defined(TARGET_SPARC)
2450 do_unassigned_access(addr
, 0, 0, 0, 4);
2455 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2457 #ifdef DEBUG_UNASSIGNED
2458 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2460 #if defined(TARGET_SPARC)
2461 do_unassigned_access(addr
, 1, 0, 0, 1);
2465 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2467 #ifdef DEBUG_UNASSIGNED
2468 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2470 #if defined(TARGET_SPARC)
2471 do_unassigned_access(addr
, 1, 0, 0, 2);
2475 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2477 #ifdef DEBUG_UNASSIGNED
2478 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2480 #if defined(TARGET_SPARC)
2481 do_unassigned_access(addr
, 1, 0, 0, 4);
2485 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2486 unassigned_mem_readb
,
2487 unassigned_mem_readw
,
2488 unassigned_mem_readl
,
2491 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2492 unassigned_mem_writeb
,
2493 unassigned_mem_writew
,
2494 unassigned_mem_writel
,
2497 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2501 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2502 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2503 #if !defined(CONFIG_USER_ONLY)
2504 tb_invalidate_phys_page_fast(ram_addr
, 1);
2505 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2508 stb_p(phys_ram_base
+ ram_addr
, val
);
2510 if (cpu_single_env
->kqemu_enabled
&&
2511 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2512 kqemu_modify_page(cpu_single_env
, ram_addr
);
2514 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2515 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2516 /* we remove the notdirty callback only if the code has been
2518 if (dirty_flags
== 0xff)
2519 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2522 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2526 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2527 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2528 #if !defined(CONFIG_USER_ONLY)
2529 tb_invalidate_phys_page_fast(ram_addr
, 2);
2530 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2533 stw_p(phys_ram_base
+ ram_addr
, val
);
2535 if (cpu_single_env
->kqemu_enabled
&&
2536 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2537 kqemu_modify_page(cpu_single_env
, ram_addr
);
2539 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2540 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2541 /* we remove the notdirty callback only if the code has been
2543 if (dirty_flags
== 0xff)
2544 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2547 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2551 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2552 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2553 #if !defined(CONFIG_USER_ONLY)
2554 tb_invalidate_phys_page_fast(ram_addr
, 4);
2555 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2558 stl_p(phys_ram_base
+ ram_addr
, val
);
2560 if (cpu_single_env
->kqemu_enabled
&&
2561 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2562 kqemu_modify_page(cpu_single_env
, ram_addr
);
2564 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2565 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2566 /* we remove the notdirty callback only if the code has been
2568 if (dirty_flags
== 0xff)
2569 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2572 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2573 NULL
, /* never used */
2574 NULL
, /* never used */
2575 NULL
, /* never used */
2578 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2579 notdirty_mem_writeb
,
2580 notdirty_mem_writew
,
2581 notdirty_mem_writel
,
2584 /* Generate a debug exception if a watchpoint has been hit. */
2585 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2587 CPUState
*env
= cpu_single_env
;
2588 target_ulong pc
, cs_base
;
2589 TranslationBlock
*tb
;
2594 if (env
->watchpoint_hit
) {
2595 /* We re-entered the check after replacing the TB. Now raise
2596 * the debug interrupt so that is will trigger after the
2597 * current instruction. */
2598 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2601 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2602 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2603 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2604 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2605 wp
->flags
|= BP_WATCHPOINT_HIT
;
2606 if (!env
->watchpoint_hit
) {
2607 env
->watchpoint_hit
= wp
;
2608 tb
= tb_find_pc(env
->mem_io_pc
);
2610 cpu_abort(env
, "check_watchpoint: could not find TB for "
2611 "pc=%p", (void *)env
->mem_io_pc
);
2613 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2614 tb_phys_invalidate(tb
, -1);
2615 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2616 env
->exception_index
= EXCP_DEBUG
;
2618 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2619 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2621 cpu_resume_from_signal(env
, NULL
);
2624 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2629 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2630 so these check for a hit then pass through to the normal out-of-line
2632 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2634 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2635 return ldub_phys(addr
);
2638 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2640 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2641 return lduw_phys(addr
);
2644 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2646 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2647 return ldl_phys(addr
);
2650 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2653 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2654 stb_phys(addr
, val
);
2657 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2660 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2661 stw_phys(addr
, val
);
2664 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2667 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2668 stl_phys(addr
, val
);
2671 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2677 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2683 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2689 idx
= SUBPAGE_IDX(addr
);
2690 #if defined(DEBUG_SUBPAGE)
2691 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2692 mmio
, len
, addr
, idx
);
2694 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2695 addr
+ mmio
->region_offset
[idx
][0][len
]);
2700 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2701 uint32_t value
, unsigned int len
)
2705 idx
= SUBPAGE_IDX(addr
);
2706 #if defined(DEBUG_SUBPAGE)
2707 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2708 mmio
, len
, addr
, idx
, value
);
2710 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2711 addr
+ mmio
->region_offset
[idx
][1][len
],
2715 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2717 #if defined(DEBUG_SUBPAGE)
2718 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2721 return subpage_readlen(opaque
, addr
, 0);
2724 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2727 #if defined(DEBUG_SUBPAGE)
2728 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2730 subpage_writelen(opaque
, addr
, value
, 0);
2733 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2735 #if defined(DEBUG_SUBPAGE)
2736 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2739 return subpage_readlen(opaque
, addr
, 1);
2742 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2745 #if defined(DEBUG_SUBPAGE)
2746 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2748 subpage_writelen(opaque
, addr
, value
, 1);
2751 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2753 #if defined(DEBUG_SUBPAGE)
2754 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2757 return subpage_readlen(opaque
, addr
, 2);
2760 static void subpage_writel (void *opaque
,
2761 target_phys_addr_t addr
, uint32_t value
)
2763 #if defined(DEBUG_SUBPAGE)
2764 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2766 subpage_writelen(opaque
, addr
, value
, 2);
2769 static CPUReadMemoryFunc
*subpage_read
[] = {
2775 static CPUWriteMemoryFunc
*subpage_write
[] = {
2781 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2782 ram_addr_t memory
, ram_addr_t region_offset
)
2787 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2789 idx
= SUBPAGE_IDX(start
);
2790 eidx
= SUBPAGE_IDX(end
);
2791 #if defined(DEBUG_SUBPAGE)
2792 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2793 mmio
, start
, end
, idx
, eidx
, memory
);
2795 memory
>>= IO_MEM_SHIFT
;
2796 for (; idx
<= eidx
; idx
++) {
2797 for (i
= 0; i
< 4; i
++) {
2798 if (io_mem_read
[memory
][i
]) {
2799 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2800 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2801 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2803 if (io_mem_write
[memory
][i
]) {
2804 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2805 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2806 mmio
->region_offset
[idx
][1][i
] = region_offset
;
2814 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2815 ram_addr_t orig_memory
, ram_addr_t region_offset
)
2820 mmio
= qemu_mallocz(sizeof(subpage_t
));
2823 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2824 #if defined(DEBUG_SUBPAGE)
2825 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2826 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2828 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2829 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
2835 static int get_free_io_mem_idx(void)
2839 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2840 if (!io_mem_used
[i
]) {
2848 static void io_mem_init(void)
2852 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2853 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2854 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2858 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2859 watch_mem_write
, NULL
);
2860 /* alloc dirty bits array */
2861 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2862 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2865 /* mem_read and mem_write are arrays of functions containing the
2866 function to access byte (index 0), word (index 1) and dword (index
2867 2). Functions can be omitted with a NULL function pointer. The
2868 registered functions may be modified dynamically later.
2869 If io_index is non zero, the corresponding io zone is
2870 modified. If it is zero, a new io zone is allocated. The return
2871 value can be used with cpu_register_physical_memory(). (-1) is
2872 returned if error. */
2873 int cpu_register_io_memory(int io_index
,
2874 CPUReadMemoryFunc
**mem_read
,
2875 CPUWriteMemoryFunc
**mem_write
,
2878 int i
, subwidth
= 0;
2880 if (io_index
<= 0) {
2881 io_index
= get_free_io_mem_idx();
2885 if (io_index
>= IO_MEM_NB_ENTRIES
)
2889 for(i
= 0;i
< 3; i
++) {
2890 if (!mem_read
[i
] || !mem_write
[i
])
2891 subwidth
= IO_MEM_SUBWIDTH
;
2892 io_mem_read
[io_index
][i
] = mem_read
[i
];
2893 io_mem_write
[io_index
][i
] = mem_write
[i
];
2895 io_mem_opaque
[io_index
] = opaque
;
2896 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2899 void cpu_unregister_io_memory(int io_table_address
)
2902 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
2904 for (i
=0;i
< 3; i
++) {
2905 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
2906 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
2908 io_mem_opaque
[io_index
] = NULL
;
2909 io_mem_used
[io_index
] = 0;
2912 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2914 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2917 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2919 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2922 #endif /* !defined(CONFIG_USER_ONLY) */
2924 /* physical memory access (slow version, mainly for debug) */
2925 #if defined(CONFIG_USER_ONLY)
2926 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2927 int len
, int is_write
)
2934 page
= addr
& TARGET_PAGE_MASK
;
2935 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2938 flags
= page_get_flags(page
);
2939 if (!(flags
& PAGE_VALID
))
2942 if (!(flags
& PAGE_WRITE
))
2944 /* XXX: this code should not depend on lock_user */
2945 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2946 /* FIXME - should this return an error rather than just fail? */
2949 unlock_user(p
, addr
, l
);
2951 if (!(flags
& PAGE_READ
))
2953 /* XXX: this code should not depend on lock_user */
2954 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2955 /* FIXME - should this return an error rather than just fail? */
2958 unlock_user(p
, addr
, 0);
2967 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2968 int len
, int is_write
)
2973 target_phys_addr_t page
;
2978 page
= addr
& TARGET_PAGE_MASK
;
2979 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2982 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2984 pd
= IO_MEM_UNASSIGNED
;
2986 pd
= p
->phys_offset
;
2990 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2991 target_phys_addr_t addr1
= addr
;
2992 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2994 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
2995 /* XXX: could force cpu_single_env to NULL to avoid
2997 if (l
>= 4 && ((addr1
& 3) == 0)) {
2998 /* 32 bit write access */
3000 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3002 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3003 /* 16 bit write access */
3005 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3008 /* 8 bit write access */
3010 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3014 unsigned long addr1
;
3015 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3017 ptr
= phys_ram_base
+ addr1
;
3018 memcpy(ptr
, buf
, l
);
3019 if (!cpu_physical_memory_is_dirty(addr1
)) {
3020 /* invalidate code */
3021 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3023 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3024 (0xff & ~CODE_DIRTY_FLAG
);
3026 /* qemu doesn't execute guest code directly, but kvm does
3027 therefore fluch instruction caches */
3029 flush_icache_range((unsigned long)ptr
,
3030 ((unsigned long)ptr
)+l
);
3033 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3034 !(pd
& IO_MEM_ROMD
)) {
3035 target_phys_addr_t addr1
= addr
;
3037 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3039 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3040 if (l
>= 4 && ((addr1
& 3) == 0)) {
3041 /* 32 bit read access */
3042 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3045 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3046 /* 16 bit read access */
3047 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3051 /* 8 bit read access */
3052 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3058 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3059 (addr
& ~TARGET_PAGE_MASK
);
3060 memcpy(buf
, ptr
, l
);
3069 /* used for ROM loading : can write in RAM and ROM */
3070 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3071 const uint8_t *buf
, int len
)
3075 target_phys_addr_t page
;
3080 page
= addr
& TARGET_PAGE_MASK
;
3081 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3084 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3086 pd
= IO_MEM_UNASSIGNED
;
3088 pd
= p
->phys_offset
;
3091 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3092 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3093 !(pd
& IO_MEM_ROMD
)) {
3096 unsigned long addr1
;
3097 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3099 ptr
= phys_ram_base
+ addr1
;
3100 memcpy(ptr
, buf
, l
);
3110 target_phys_addr_t addr
;
3111 target_phys_addr_t len
;
3114 static BounceBuffer bounce
;
3116 typedef struct MapClient
{
3118 void (*callback
)(void *opaque
);
3119 LIST_ENTRY(MapClient
) link
;
3122 static LIST_HEAD(map_client_list
, MapClient
) map_client_list
3123 = LIST_HEAD_INITIALIZER(map_client_list
);
3125 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3127 MapClient
*client
= qemu_malloc(sizeof(*client
));
3129 client
->opaque
= opaque
;
3130 client
->callback
= callback
;
3131 LIST_INSERT_HEAD(&map_client_list
, client
, link
);
3135 void cpu_unregister_map_client(void *_client
)
3137 MapClient
*client
= (MapClient
*)_client
;
3139 LIST_REMOVE(client
, link
);
3142 static void cpu_notify_map_clients(void)
3146 while (!LIST_EMPTY(&map_client_list
)) {
3147 client
= LIST_FIRST(&map_client_list
);
3148 client
->callback(client
->opaque
);
3149 LIST_REMOVE(client
, link
);
3153 /* Map a physical memory region into a host virtual address.
3154 * May map a subset of the requested range, given by and returned in *plen.
3155 * May return NULL if resources needed to perform the mapping are exhausted.
3156 * Use only for reads OR writes - not for read-modify-write operations.
3157 * Use cpu_register_map_client() to know when retrying the map operation is
3158 * likely to succeed.
3160 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3161 target_phys_addr_t
*plen
,
3164 target_phys_addr_t len
= *plen
;
3165 target_phys_addr_t done
= 0;
3167 uint8_t *ret
= NULL
;
3169 target_phys_addr_t page
;
3172 unsigned long addr1
;
3175 page
= addr
& TARGET_PAGE_MASK
;
3176 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3179 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3181 pd
= IO_MEM_UNASSIGNED
;
3183 pd
= p
->phys_offset
;
3186 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3187 if (done
|| bounce
.buffer
) {
3190 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3194 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3196 ptr
= bounce
.buffer
;
3198 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3199 ptr
= phys_ram_base
+ addr1
;
3203 } else if (ret
+ done
!= ptr
) {
3215 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3216 * Will also mark the memory as dirty if is_write == 1. access_len gives
3217 * the amount of memory that was actually read or written by the caller.
3219 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3220 int is_write
, target_phys_addr_t access_len
)
3222 if (buffer
!= bounce
.buffer
) {
3224 unsigned long addr1
= (uint8_t *)buffer
- phys_ram_base
;
3225 while (access_len
) {
3227 l
= TARGET_PAGE_SIZE
;
3230 if (!cpu_physical_memory_is_dirty(addr1
)) {
3231 /* invalidate code */
3232 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3234 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3235 (0xff & ~CODE_DIRTY_FLAG
);
3244 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3246 qemu_free(bounce
.buffer
);
3247 bounce
.buffer
= NULL
;
3248 cpu_notify_map_clients();
3251 /* warning: addr must be aligned */
3252 uint32_t ldl_phys(target_phys_addr_t addr
)
3260 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3262 pd
= IO_MEM_UNASSIGNED
;
3264 pd
= p
->phys_offset
;
3267 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3268 !(pd
& IO_MEM_ROMD
)) {
3270 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3272 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3273 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3276 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3277 (addr
& ~TARGET_PAGE_MASK
);
3283 /* warning: addr must be aligned */
3284 uint64_t ldq_phys(target_phys_addr_t addr
)
3292 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3294 pd
= IO_MEM_UNASSIGNED
;
3296 pd
= p
->phys_offset
;
3299 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3300 !(pd
& IO_MEM_ROMD
)) {
3302 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3304 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3305 #ifdef TARGET_WORDS_BIGENDIAN
3306 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3307 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3309 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3310 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3314 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3315 (addr
& ~TARGET_PAGE_MASK
);
3322 uint32_t ldub_phys(target_phys_addr_t addr
)
3325 cpu_physical_memory_read(addr
, &val
, 1);
3330 uint32_t lduw_phys(target_phys_addr_t addr
)
3333 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3334 return tswap16(val
);
3338 #define likely(x) __builtin_expect(!!(x), 1)
3339 #define unlikely(x) __builtin_expect(!!(x), 0)
3342 #define unlikely(x) x
3345 /* warning: addr must be aligned. The ram page is not masked as dirty
3346 and the code inside is not invalidated. It is useful if the dirty
3347 bits are used to track modified PTEs */
3348 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3355 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3357 pd
= IO_MEM_UNASSIGNED
;
3359 pd
= p
->phys_offset
;
3362 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3363 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3365 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3366 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3368 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3369 ptr
= phys_ram_base
+ addr1
;
3372 if (unlikely(in_migration
)) {
3373 if (!cpu_physical_memory_is_dirty(addr1
)) {
3374 /* invalidate code */
3375 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3377 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3378 (0xff & ~CODE_DIRTY_FLAG
);
3384 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3391 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3393 pd
= IO_MEM_UNASSIGNED
;
3395 pd
= p
->phys_offset
;
3398 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3399 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3401 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3402 #ifdef TARGET_WORDS_BIGENDIAN
3403 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3404 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3406 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3407 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3410 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3411 (addr
& ~TARGET_PAGE_MASK
);
3416 /* warning: addr must be aligned */
3417 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3424 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3426 pd
= IO_MEM_UNASSIGNED
;
3428 pd
= p
->phys_offset
;
3431 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3432 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3434 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3435 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3437 unsigned long addr1
;
3438 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3440 ptr
= phys_ram_base
+ addr1
;
3442 if (!cpu_physical_memory_is_dirty(addr1
)) {
3443 /* invalidate code */
3444 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3446 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3447 (0xff & ~CODE_DIRTY_FLAG
);
3453 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3456 cpu_physical_memory_write(addr
, &v
, 1);
3460 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3462 uint16_t v
= tswap16(val
);
3463 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3467 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3470 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3475 /* virtual memory access for debug */
3476 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3477 uint8_t *buf
, int len
, int is_write
)
3480 target_phys_addr_t phys_addr
;
3484 page
= addr
& TARGET_PAGE_MASK
;
3485 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3486 /* if no physical page mapped, return an error */
3487 if (phys_addr
== -1)
3489 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3492 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3501 /* in deterministic execution mode, instructions doing device I/Os
3502 must be at the end of the TB */
3503 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3505 TranslationBlock
*tb
;
3507 target_ulong pc
, cs_base
;
3510 tb
= tb_find_pc((unsigned long)retaddr
);
3512 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3515 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3516 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3517 /* Calculate how many instructions had been executed before the fault
3519 n
= n
- env
->icount_decr
.u16
.low
;
3520 /* Generate a new TB ending on the I/O insn. */
3522 /* On MIPS and SH, delay slot instructions can only be restarted if
3523 they were already the first instruction in the TB. If this is not
3524 the first instruction in a TB then re-execute the preceding
3526 #if defined(TARGET_MIPS)
3527 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3528 env
->active_tc
.PC
-= 4;
3529 env
->icount_decr
.u16
.low
++;
3530 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3532 #elif defined(TARGET_SH4)
3533 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3536 env
->icount_decr
.u16
.low
++;
3537 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3540 /* This should never happen. */
3541 if (n
> CF_COUNT_MASK
)
3542 cpu_abort(env
, "TB too big during recompile");
3544 cflags
= n
| CF_LAST_IO
;
3546 cs_base
= tb
->cs_base
;
3548 tb_phys_invalidate(tb
, -1);
3549 /* FIXME: In theory this could raise an exception. In practice
3550 we have already translated the block once so it's probably ok. */
3551 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3552 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3553 the first in the TB) then we end up generating a whole new TB and
3554 repeating the fault, which is horribly inefficient.
3555 Better would be to execute just this insn uncached, or generate a
3557 cpu_resume_from_signal(env
, NULL
);
3560 void dump_exec_info(FILE *f
,
3561 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3563 int i
, target_code_size
, max_target_code_size
;
3564 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3565 TranslationBlock
*tb
;
3567 target_code_size
= 0;
3568 max_target_code_size
= 0;
3570 direct_jmp_count
= 0;
3571 direct_jmp2_count
= 0;
3572 for(i
= 0; i
< nb_tbs
; i
++) {
3574 target_code_size
+= tb
->size
;
3575 if (tb
->size
> max_target_code_size
)
3576 max_target_code_size
= tb
->size
;
3577 if (tb
->page_addr
[1] != -1)
3579 if (tb
->tb_next_offset
[0] != 0xffff) {
3581 if (tb
->tb_next_offset
[1] != 0xffff) {
3582 direct_jmp2_count
++;
3586 /* XXX: avoid using doubles ? */
3587 cpu_fprintf(f
, "Translation buffer state:\n");
3588 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3589 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3590 cpu_fprintf(f
, "TB count %d/%d\n",
3591 nb_tbs
, code_gen_max_blocks
);
3592 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3593 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3594 max_target_code_size
);
3595 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3596 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3597 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3598 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3600 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3601 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3603 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3605 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3606 cpu_fprintf(f
, "\nStatistics:\n");
3607 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3608 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3609 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3610 tcg_dump_info(f
, cpu_fprintf
);
3613 #if !defined(CONFIG_USER_ONLY)
3615 #define MMUSUFFIX _cmmu
3616 #define GETPC() NULL
3617 #define env cpu_single_env
3618 #define SOFTMMU_CODE_ACCESS
3621 #include "softmmu_template.h"
3624 #include "softmmu_template.h"
3627 #include "softmmu_template.h"
3630 #include "softmmu_template.h"