2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
48 #if defined(CONFIG_USER_ONLY)
52 //#define DEBUG_TB_INVALIDATE
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #define MMAP_AREA_START 0x00000000
72 #define MMAP_AREA_END 0xa8000000
74 #if defined(TARGET_SPARC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 41
76 #elif defined(TARGET_SPARC)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #elif defined(TARGET_ALPHA)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #define TARGET_VIRT_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_PPC64)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 42
85 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
86 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 #elif defined(TARGET_IA64)
88 #define TARGET_PHYS_ADDR_SPACE_BITS 36
90 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
91 #define TARGET_PHYS_ADDR_SPACE_BITS 32
94 static TranslationBlock
*tbs
;
95 int code_gen_max_blocks
;
96 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
98 /* any access to the tbs or the page table must use this lock */
99 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
101 #if defined(__arm__) || defined(__sparc_v9__)
102 /* The prologue must be reachable with a direct jump. ARM and Sparc64
103 have limited branch ranges (possibly also PPC) so place it in a
104 section close to code segment. */
105 #define code_gen_section \
106 __attribute__((__section__(".gen_code"))) \
107 __attribute__((aligned (32)))
109 #define code_gen_section \
110 __attribute__((aligned (32)))
113 uint8_t code_gen_prologue
[1024] code_gen_section
;
114 static uint8_t *code_gen_buffer
;
115 static unsigned long code_gen_buffer_size
;
116 /* threshold to flush the translated code buffer */
117 static unsigned long code_gen_buffer_max_size
;
118 uint8_t *code_gen_ptr
;
120 #if !defined(CONFIG_USER_ONLY)
121 ram_addr_t phys_ram_size
;
123 uint8_t *phys_ram_base
;
124 uint8_t *phys_ram_dirty
;
126 static int in_migration
;
127 static ram_addr_t phys_ram_alloc_offset
= 0;
131 /* current CPU in the current thread. It is only valid inside
133 CPUState
*cpu_single_env
;
134 /* 0 = Do not count executed instructions.
135 1 = Precise instruction counting.
136 2 = Adaptive rate instruction counting. */
138 /* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
142 typedef struct PageDesc
{
143 /* list of TBs intersecting this ram page */
144 TranslationBlock
*first_tb
;
145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count
;
148 uint8_t *code_bitmap
;
149 #if defined(CONFIG_USER_ONLY)
154 typedef struct PhysPageDesc
{
155 /* offset in host memory of the page + io_index in the low bits */
156 ram_addr_t phys_offset
;
160 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
161 /* XXX: this is a temporary hack for alpha target.
162 * In the future, this is to be replaced by a multi-level table
163 * to actually be able to handle the complete 64 bits address space.
165 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
170 #define L1_SIZE (1 << L1_BITS)
171 #define L2_SIZE (1 << L2_BITS)
173 unsigned long qemu_real_host_page_size
;
174 unsigned long qemu_host_page_bits
;
175 unsigned long qemu_host_page_size
;
176 unsigned long qemu_host_page_mask
;
178 /* XXX: for system emulation, it could just be an array */
179 static PageDesc
*l1_map
[L1_SIZE
];
180 static PhysPageDesc
**l1_phys_map
;
182 #if !defined(CONFIG_USER_ONLY)
183 static void io_mem_init(void);
185 /* io memory support */
186 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
187 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
188 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
189 char io_mem_used
[IO_MEM_NB_ENTRIES
];
190 static int io_mem_watch
;
194 static const char *logfilename
= "/tmp/qemu.log";
197 static int log_append
= 0;
200 static int tlb_flush_count
;
201 static int tb_flush_count
;
202 static int tb_phys_invalidate_count
;
204 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
205 typedef struct subpage_t
{
206 target_phys_addr_t base
;
207 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
208 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
209 void *opaque
[TARGET_PAGE_SIZE
][2][4];
213 static void map_exec(void *addr
, long size
)
216 VirtualProtect(addr
, size
,
217 PAGE_EXECUTE_READWRITE
, &old_protect
);
221 static void map_exec(void *addr
, long size
)
223 unsigned long start
, end
, page_size
;
225 page_size
= getpagesize();
226 start
= (unsigned long)addr
;
227 start
&= ~(page_size
- 1);
229 end
= (unsigned long)addr
+ size
;
230 end
+= page_size
- 1;
231 end
&= ~(page_size
- 1);
233 mprotect((void *)start
, end
- start
,
234 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
238 static void page_init(void)
240 /* NOTE: we can always suppose that qemu_host_page_size >=
244 SYSTEM_INFO system_info
;
246 GetSystemInfo(&system_info
);
247 qemu_real_host_page_size
= system_info
.dwPageSize
;
250 qemu_real_host_page_size
= getpagesize();
252 if (qemu_host_page_size
== 0)
253 qemu_host_page_size
= qemu_real_host_page_size
;
254 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
255 qemu_host_page_size
= TARGET_PAGE_SIZE
;
256 qemu_host_page_bits
= 0;
257 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
258 qemu_host_page_bits
++;
259 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
260 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
261 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
263 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
265 long long startaddr
, endaddr
;
270 last_brk
= (unsigned long)sbrk(0);
271 f
= fopen("/proc/self/maps", "r");
274 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
276 startaddr
= MIN(startaddr
,
277 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
278 endaddr
= MIN(endaddr
,
279 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
280 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
281 TARGET_PAGE_ALIGN(endaddr
),
292 static inline PageDesc
**page_l1_map(target_ulong index
)
294 #if TARGET_LONG_BITS > 32
295 /* Host memory outside guest VM. For 32-bit targets we have already
296 excluded high addresses. */
297 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
300 return &l1_map
[index
>> L2_BITS
];
303 static inline PageDesc
*page_find_alloc(target_ulong index
)
306 lp
= page_l1_map(index
);
312 /* allocate if not found */
313 #if defined(CONFIG_USER_ONLY)
315 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
316 /* Don't use qemu_malloc because it may recurse. */
317 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
318 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
321 if (addr
== (target_ulong
)addr
) {
322 page_set_flags(addr
& TARGET_PAGE_MASK
,
323 TARGET_PAGE_ALIGN(addr
+ len
),
327 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
331 return p
+ (index
& (L2_SIZE
- 1));
334 static inline PageDesc
*page_find(target_ulong index
)
337 lp
= page_l1_map(index
);
344 return p
+ (index
& (L2_SIZE
- 1));
347 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
352 p
= (void **)l1_phys_map
;
353 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
355 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
356 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
358 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
361 /* allocate if not found */
364 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
365 memset(p
, 0, sizeof(void *) * L1_SIZE
);
369 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
373 /* allocate if not found */
376 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
378 for (i
= 0; i
< L2_SIZE
; i
++)
379 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
381 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
384 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
386 return phys_page_find_alloc(index
, 0);
389 #if !defined(CONFIG_USER_ONLY)
390 static void tlb_protect_code(ram_addr_t ram_addr
);
391 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
393 #define mmap_lock() do { } while(0)
394 #define mmap_unlock() do { } while(0)
397 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399 #if defined(CONFIG_USER_ONLY)
400 /* Currently it is not recommanded to allocate big chunks of data in
401 user mode. It will change when a dedicated libc will be used */
402 #define USE_STATIC_CODE_GEN_BUFFER
405 #ifdef USE_STATIC_CODE_GEN_BUFFER
406 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
409 static void code_gen_alloc(unsigned long tb_size
)
414 #ifdef USE_STATIC_CODE_GEN_BUFFER
415 code_gen_buffer
= static_code_gen_buffer
;
416 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
417 map_exec(code_gen_buffer
, code_gen_buffer_size
);
419 code_gen_buffer_size
= tb_size
;
420 if (code_gen_buffer_size
== 0) {
421 #if defined(CONFIG_USER_ONLY)
422 /* in user mode, phys_ram_size is not meaningful */
423 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
425 /* XXX: needs ajustments */
426 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
429 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
430 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
431 /* The code gen buffer location may have constraints depending on
432 the host cpu and OS */
433 #if defined(__linux__)
438 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
439 #if defined(__x86_64__)
441 /* Cannot map more than that */
442 if (code_gen_buffer_size
> (800 * 1024 * 1024))
443 code_gen_buffer_size
= (800 * 1024 * 1024);
444 #elif defined(__sparc_v9__)
445 // Map the buffer below 2G, so we can use direct calls and branches
447 start
= (void *) 0x60000000UL
;
448 if (code_gen_buffer_size
> (512 * 1024 * 1024))
449 code_gen_buffer_size
= (512 * 1024 * 1024);
451 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
452 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
454 if (code_gen_buffer
== MAP_FAILED
) {
455 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
459 #elif defined(__FreeBSD__)
463 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
464 #if defined(__x86_64__)
465 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
466 * 0x40000000 is free */
468 addr
= (void *)0x40000000;
469 /* Cannot map more than that */
470 if (code_gen_buffer_size
> (800 * 1024 * 1024))
471 code_gen_buffer_size
= (800 * 1024 * 1024);
473 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
474 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
476 if (code_gen_buffer
== MAP_FAILED
) {
477 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
482 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
483 if (!code_gen_buffer
) {
484 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
487 map_exec(code_gen_buffer
, code_gen_buffer_size
);
489 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
490 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
491 code_gen_buffer_max_size
= code_gen_buffer_size
-
492 code_gen_max_block_size();
493 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
494 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
497 /* Must be called before using the QEMU cpus. 'tb_size' is the size
498 (in bytes) allocated to the translation buffer. Zero means default
500 void cpu_exec_init_all(unsigned long tb_size
)
503 code_gen_alloc(tb_size
);
504 code_gen_ptr
= code_gen_buffer
;
506 #if !defined(CONFIG_USER_ONLY)
511 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
513 #define CPU_COMMON_SAVE_VERSION 1
515 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
517 CPUState
*env
= opaque
;
519 qemu_put_be32s(f
, &env
->halted
);
520 qemu_put_be32s(f
, &env
->interrupt_request
);
523 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
525 CPUState
*env
= opaque
;
527 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
530 qemu_get_be32s(f
, &env
->halted
);
531 qemu_get_be32s(f
, &env
->interrupt_request
);
538 void cpu_exec_init(CPUState
*env
)
543 env
->next_cpu
= NULL
;
546 while (*penv
!= NULL
) {
547 penv
= (CPUState
**)&(*penv
)->next_cpu
;
550 env
->cpu_index
= cpu_index
;
552 env
->thread_id
= GetCurrentProcessId();
554 env
->thread_id
= getpid();
557 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
558 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
559 cpu_common_save
, cpu_common_load
, env
);
560 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
561 cpu_save
, cpu_load
, env
);
565 static inline void invalidate_page_bitmap(PageDesc
*p
)
567 if (p
->code_bitmap
) {
568 qemu_free(p
->code_bitmap
);
569 p
->code_bitmap
= NULL
;
571 p
->code_write_count
= 0;
574 /* set to NULL all the 'first_tb' fields in all PageDescs */
575 static void page_flush_tb(void)
580 for(i
= 0; i
< L1_SIZE
; i
++) {
583 for(j
= 0; j
< L2_SIZE
; j
++) {
585 invalidate_page_bitmap(p
);
592 /* flush all the translation blocks */
593 /* XXX: tb_flush is currently not thread safe */
594 void tb_flush(CPUState
*env1
)
597 #if defined(DEBUG_FLUSH)
598 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
599 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
601 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
603 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
604 cpu_abort(env1
, "Internal error: code buffer overflow\n");
608 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
609 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
612 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
615 code_gen_ptr
= code_gen_buffer
;
616 /* XXX: flush processor icache at this point if cache flush is
621 #ifdef DEBUG_TB_CHECK
623 static void tb_invalidate_check(target_ulong address
)
625 TranslationBlock
*tb
;
627 address
&= TARGET_PAGE_MASK
;
628 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
629 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
630 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
631 address
>= tb
->pc
+ tb
->size
)) {
632 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
633 address
, (long)tb
->pc
, tb
->size
);
639 /* verify that all the pages have correct rights for code */
640 static void tb_page_check(void)
642 TranslationBlock
*tb
;
643 int i
, flags1
, flags2
;
645 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
646 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
647 flags1
= page_get_flags(tb
->pc
);
648 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
649 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
650 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
651 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
657 static void tb_jmp_check(TranslationBlock
*tb
)
659 TranslationBlock
*tb1
;
662 /* suppress any remaining jumps to this TB */
666 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
669 tb1
= tb1
->jmp_next
[n1
];
671 /* check end of list */
673 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
679 /* invalidate one TB */
680 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
683 TranslationBlock
*tb1
;
687 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
690 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
694 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
696 TranslationBlock
*tb1
;
702 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
704 *ptb
= tb1
->page_next
[n1
];
707 ptb
= &tb1
->page_next
[n1
];
711 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
713 TranslationBlock
*tb1
, **ptb
;
716 ptb
= &tb
->jmp_next
[n
];
719 /* find tb(n) in circular list */
723 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
724 if (n1
== n
&& tb1
== tb
)
727 ptb
= &tb1
->jmp_first
;
729 ptb
= &tb1
->jmp_next
[n1
];
732 /* now we can suppress tb(n) from the list */
733 *ptb
= tb
->jmp_next
[n
];
735 tb
->jmp_next
[n
] = NULL
;
739 /* reset the jump entry 'n' of a TB so that it is not chained to
741 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
743 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
746 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
751 target_phys_addr_t phys_pc
;
752 TranslationBlock
*tb1
, *tb2
;
754 /* remove the TB from the hash list */
755 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
756 h
= tb_phys_hash_func(phys_pc
);
757 tb_remove(&tb_phys_hash
[h
], tb
,
758 offsetof(TranslationBlock
, phys_hash_next
));
760 /* remove the TB from the page list */
761 if (tb
->page_addr
[0] != page_addr
) {
762 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
763 tb_page_remove(&p
->first_tb
, tb
);
764 invalidate_page_bitmap(p
);
766 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
767 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
768 tb_page_remove(&p
->first_tb
, tb
);
769 invalidate_page_bitmap(p
);
772 tb_invalidated_flag
= 1;
774 /* remove the TB from the hash list */
775 h
= tb_jmp_cache_hash_func(tb
->pc
);
776 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
777 if (env
->tb_jmp_cache
[h
] == tb
)
778 env
->tb_jmp_cache
[h
] = NULL
;
781 /* suppress this TB from the two jump lists */
782 tb_jmp_remove(tb
, 0);
783 tb_jmp_remove(tb
, 1);
785 /* suppress any remaining jumps to this TB */
791 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
792 tb2
= tb1
->jmp_next
[n1
];
793 tb_reset_jump(tb1
, n1
);
794 tb1
->jmp_next
[n1
] = NULL
;
797 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
799 tb_phys_invalidate_count
++;
802 static inline void set_bits(uint8_t *tab
, int start
, int len
)
808 mask
= 0xff << (start
& 7);
809 if ((start
& ~7) == (end
& ~7)) {
811 mask
&= ~(0xff << (end
& 7));
816 start
= (start
+ 8) & ~7;
818 while (start
< end1
) {
823 mask
= ~(0xff << (end
& 7));
829 static void build_page_bitmap(PageDesc
*p
)
831 int n
, tb_start
, tb_end
;
832 TranslationBlock
*tb
;
834 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
841 tb
= (TranslationBlock
*)((long)tb
& ~3);
842 /* NOTE: this is subtle as a TB may span two physical pages */
844 /* NOTE: tb_end may be after the end of the page, but
845 it is not a problem */
846 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
847 tb_end
= tb_start
+ tb
->size
;
848 if (tb_end
> TARGET_PAGE_SIZE
)
849 tb_end
= TARGET_PAGE_SIZE
;
852 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
854 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
855 tb
= tb
->page_next
[n
];
859 TranslationBlock
*tb_gen_code(CPUState
*env
,
860 target_ulong pc
, target_ulong cs_base
,
861 int flags
, int cflags
)
863 TranslationBlock
*tb
;
865 target_ulong phys_pc
, phys_page2
, virt_page2
;
868 phys_pc
= get_phys_addr_code(env
, pc
);
871 /* flush must be done */
873 /* cannot fail at this point */
875 /* Don't forget to invalidate previous TB info. */
876 tb_invalidated_flag
= 1;
878 tc_ptr
= code_gen_ptr
;
880 tb
->cs_base
= cs_base
;
883 cpu_gen_code(env
, tb
, &code_gen_size
);
884 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
886 /* check next page if needed */
887 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
889 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
890 phys_page2
= get_phys_addr_code(env
, virt_page2
);
892 tb_link_phys(tb
, phys_pc
, phys_page2
);
896 /* invalidate all TBs which intersect with the target physical page
897 starting in range [start;end[. NOTE: start and end must refer to
898 the same physical page. 'is_cpu_write_access' should be true if called
899 from a real cpu write access: the virtual CPU will exit the current
900 TB if code is modified inside this TB. */
901 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
902 int is_cpu_write_access
)
904 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
905 CPUState
*env
= cpu_single_env
;
906 target_ulong tb_start
, tb_end
;
909 #ifdef TARGET_HAS_PRECISE_SMC
910 int current_tb_not_found
= is_cpu_write_access
;
911 TranslationBlock
*current_tb
= NULL
;
912 int current_tb_modified
= 0;
913 target_ulong current_pc
= 0;
914 target_ulong current_cs_base
= 0;
915 int current_flags
= 0;
916 #endif /* TARGET_HAS_PRECISE_SMC */
918 p
= page_find(start
>> TARGET_PAGE_BITS
);
921 if (!p
->code_bitmap
&&
922 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
923 is_cpu_write_access
) {
924 /* build code bitmap */
925 build_page_bitmap(p
);
928 /* we remove all the TBs in the range [start, end[ */
929 /* XXX: see if in some cases it could be faster to invalidate all the code */
933 tb
= (TranslationBlock
*)((long)tb
& ~3);
934 tb_next
= tb
->page_next
[n
];
935 /* NOTE: this is subtle as a TB may span two physical pages */
937 /* NOTE: tb_end may be after the end of the page, but
938 it is not a problem */
939 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
940 tb_end
= tb_start
+ tb
->size
;
942 tb_start
= tb
->page_addr
[1];
943 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
945 if (!(tb_end
<= start
|| tb_start
>= end
)) {
946 #ifdef TARGET_HAS_PRECISE_SMC
947 if (current_tb_not_found
) {
948 current_tb_not_found
= 0;
950 if (env
->mem_io_pc
) {
951 /* now we have a real cpu fault */
952 current_tb
= tb_find_pc(env
->mem_io_pc
);
955 if (current_tb
== tb
&&
956 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
957 /* If we are modifying the current TB, we must stop
958 its execution. We could be more precise by checking
959 that the modification is after the current PC, but it
960 would require a specialized function to partially
961 restore the CPU state */
963 current_tb_modified
= 1;
964 cpu_restore_state(current_tb
, env
,
965 env
->mem_io_pc
, NULL
);
966 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
969 #endif /* TARGET_HAS_PRECISE_SMC */
970 /* we need to do that to handle the case where a signal
971 occurs while doing tb_phys_invalidate() */
974 saved_tb
= env
->current_tb
;
975 env
->current_tb
= NULL
;
977 tb_phys_invalidate(tb
, -1);
979 env
->current_tb
= saved_tb
;
980 if (env
->interrupt_request
&& env
->current_tb
)
981 cpu_interrupt(env
, env
->interrupt_request
);
986 #if !defined(CONFIG_USER_ONLY)
987 /* if no code remaining, no need to continue to use slow writes */
989 invalidate_page_bitmap(p
);
990 if (is_cpu_write_access
) {
991 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
995 #ifdef TARGET_HAS_PRECISE_SMC
996 if (current_tb_modified
) {
997 /* we generate a block containing just the instruction
998 modifying the memory. It will ensure that it cannot modify
1000 env
->current_tb
= NULL
;
1001 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1002 cpu_resume_from_signal(env
, NULL
);
1007 /* len must be <= 8 and start must be a multiple of len */
1008 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1015 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1016 cpu_single_env
->mem_io_vaddr
, len
,
1017 cpu_single_env
->eip
,
1018 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1022 p
= page_find(start
>> TARGET_PAGE_BITS
);
1025 if (p
->code_bitmap
) {
1026 offset
= start
& ~TARGET_PAGE_MASK
;
1027 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1028 if (b
& ((1 << len
) - 1))
1032 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1036 #if !defined(CONFIG_SOFTMMU)
1037 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1038 unsigned long pc
, void *puc
)
1040 TranslationBlock
*tb
;
1043 #ifdef TARGET_HAS_PRECISE_SMC
1044 TranslationBlock
*current_tb
= NULL
;
1045 CPUState
*env
= cpu_single_env
;
1046 int current_tb_modified
= 0;
1047 target_ulong current_pc
= 0;
1048 target_ulong current_cs_base
= 0;
1049 int current_flags
= 0;
1052 addr
&= TARGET_PAGE_MASK
;
1053 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1057 #ifdef TARGET_HAS_PRECISE_SMC
1058 if (tb
&& pc
!= 0) {
1059 current_tb
= tb_find_pc(pc
);
1062 while (tb
!= NULL
) {
1064 tb
= (TranslationBlock
*)((long)tb
& ~3);
1065 #ifdef TARGET_HAS_PRECISE_SMC
1066 if (current_tb
== tb
&&
1067 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1068 /* If we are modifying the current TB, we must stop
1069 its execution. We could be more precise by checking
1070 that the modification is after the current PC, but it
1071 would require a specialized function to partially
1072 restore the CPU state */
1074 current_tb_modified
= 1;
1075 cpu_restore_state(current_tb
, env
, pc
, puc
);
1076 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1079 #endif /* TARGET_HAS_PRECISE_SMC */
1080 tb_phys_invalidate(tb
, addr
);
1081 tb
= tb
->page_next
[n
];
1084 #ifdef TARGET_HAS_PRECISE_SMC
1085 if (current_tb_modified
) {
1086 /* we generate a block containing just the instruction
1087 modifying the memory. It will ensure that it cannot modify
1089 env
->current_tb
= NULL
;
1090 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1091 cpu_resume_from_signal(env
, puc
);
1097 /* add the tb in the target page and protect it if necessary */
1098 static inline void tb_alloc_page(TranslationBlock
*tb
,
1099 unsigned int n
, target_ulong page_addr
)
1102 TranslationBlock
*last_first_tb
;
1104 tb
->page_addr
[n
] = page_addr
;
1105 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1106 tb
->page_next
[n
] = p
->first_tb
;
1107 last_first_tb
= p
->first_tb
;
1108 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1109 invalidate_page_bitmap(p
);
1111 #if defined(TARGET_HAS_SMC) || 1
1113 #if defined(CONFIG_USER_ONLY)
1114 if (p
->flags
& PAGE_WRITE
) {
1119 /* force the host page as non writable (writes will have a
1120 page fault + mprotect overhead) */
1121 page_addr
&= qemu_host_page_mask
;
1123 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1124 addr
+= TARGET_PAGE_SIZE
) {
1126 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1130 p2
->flags
&= ~PAGE_WRITE
;
1131 page_get_flags(addr
);
1133 mprotect(g2h(page_addr
), qemu_host_page_size
,
1134 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1135 #ifdef DEBUG_TB_INVALIDATE
1136 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1141 /* if some code is already present, then the pages are already
1142 protected. So we handle the case where only the first TB is
1143 allocated in a physical page */
1144 if (!last_first_tb
) {
1145 tlb_protect_code(page_addr
);
1149 #endif /* TARGET_HAS_SMC */
1152 /* Allocate a new translation block. Flush the translation buffer if
1153 too many translation blocks or too much generated code. */
1154 TranslationBlock
*tb_alloc(target_ulong pc
)
1156 TranslationBlock
*tb
;
1158 if (nb_tbs
>= code_gen_max_blocks
||
1159 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1161 tb
= &tbs
[nb_tbs
++];
1167 void tb_free(TranslationBlock
*tb
)
1169 /* In practice this is mostly used for single use temporary TB
1170 Ignore the hard cases and just back up if this TB happens to
1171 be the last one generated. */
1172 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1173 code_gen_ptr
= tb
->tc_ptr
;
1178 /* add a new TB and link it to the physical page tables. phys_page2 is
1179 (-1) to indicate that only one page contains the TB. */
1180 void tb_link_phys(TranslationBlock
*tb
,
1181 target_ulong phys_pc
, target_ulong phys_page2
)
1184 TranslationBlock
**ptb
;
1186 /* Grab the mmap lock to stop another thread invalidating this TB
1187 before we are done. */
1189 /* add in the physical hash table */
1190 h
= tb_phys_hash_func(phys_pc
);
1191 ptb
= &tb_phys_hash
[h
];
1192 tb
->phys_hash_next
= *ptb
;
1195 /* add in the page list */
1196 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1197 if (phys_page2
!= -1)
1198 tb_alloc_page(tb
, 1, phys_page2
);
1200 tb
->page_addr
[1] = -1;
1202 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1203 tb
->jmp_next
[0] = NULL
;
1204 tb
->jmp_next
[1] = NULL
;
1206 /* init original jump addresses */
1207 if (tb
->tb_next_offset
[0] != 0xffff)
1208 tb_reset_jump(tb
, 0);
1209 if (tb
->tb_next_offset
[1] != 0xffff)
1210 tb_reset_jump(tb
, 1);
1212 #ifdef DEBUG_TB_CHECK
1218 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1219 tb[1].tc_ptr. Return NULL if not found */
1220 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1222 int m_min
, m_max
, m
;
1224 TranslationBlock
*tb
;
1228 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1229 tc_ptr
>= (unsigned long)code_gen_ptr
)
1231 /* binary search (cf Knuth) */
1234 while (m_min
<= m_max
) {
1235 m
= (m_min
+ m_max
) >> 1;
1237 v
= (unsigned long)tb
->tc_ptr
;
1240 else if (tc_ptr
< v
) {
1249 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1251 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1253 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1256 tb1
= tb
->jmp_next
[n
];
1258 /* find head of list */
1261 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1264 tb1
= tb1
->jmp_next
[n1
];
1266 /* we are now sure now that tb jumps to tb1 */
1269 /* remove tb from the jmp_first list */
1270 ptb
= &tb_next
->jmp_first
;
1274 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1275 if (n1
== n
&& tb1
== tb
)
1277 ptb
= &tb1
->jmp_next
[n1
];
1279 *ptb
= tb
->jmp_next
[n
];
1280 tb
->jmp_next
[n
] = NULL
;
1282 /* suppress the jump to next tb in generated code */
1283 tb_reset_jump(tb
, n
);
1285 /* suppress jumps in the tb on which we could have jumped */
1286 tb_reset_jump_recursive(tb_next
);
1290 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1292 tb_reset_jump_recursive2(tb
, 0);
1293 tb_reset_jump_recursive2(tb
, 1);
1296 #if defined(TARGET_HAS_ICE)
1297 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1299 target_phys_addr_t addr
;
1301 ram_addr_t ram_addr
;
1304 addr
= cpu_get_phys_page_debug(env
, pc
);
1305 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1307 pd
= IO_MEM_UNASSIGNED
;
1309 pd
= p
->phys_offset
;
1311 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1312 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1316 /* Add a watchpoint. */
1317 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1318 int flags
, CPUWatchpoint
**watchpoint
)
1320 target_ulong len_mask
= ~(len
- 1);
1321 CPUWatchpoint
*wp
, *prev_wp
;
1323 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1324 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1325 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1326 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1329 wp
= qemu_malloc(sizeof(*wp
));
1334 wp
->len_mask
= len_mask
;
1337 /* keep all GDB-injected watchpoints in front */
1338 if (!(flags
& BP_GDB
) && env
->watchpoints
) {
1339 prev_wp
= env
->watchpoints
;
1340 while (prev_wp
->next
!= NULL
&& (prev_wp
->next
->flags
& BP_GDB
))
1341 prev_wp
= prev_wp
->next
;
1346 /* Insert new watchpoint */
1348 wp
->next
= prev_wp
->next
;
1351 wp
->next
= env
->watchpoints
;
1352 env
->watchpoints
= wp
;
1355 wp
->next
->prev
= wp
;
1358 tlb_flush_page(env
, addr
);
1365 /* Remove a specific watchpoint. */
1366 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1369 target_ulong len_mask
= ~(len
- 1);
1372 for (wp
= env
->watchpoints
; wp
!= NULL
; wp
= wp
->next
) {
1373 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1374 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1375 cpu_watchpoint_remove_by_ref(env
, wp
);
1382 /* Remove a specific watchpoint by reference. */
1383 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1385 if (watchpoint
->next
)
1386 watchpoint
->next
->prev
= watchpoint
->prev
;
1387 if (watchpoint
->prev
)
1388 watchpoint
->prev
->next
= watchpoint
->next
;
1390 env
->watchpoints
= watchpoint
->next
;
1392 tlb_flush_page(env
, watchpoint
->vaddr
);
1394 qemu_free(watchpoint
);
1397 /* Remove all matching watchpoints. */
1398 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1402 for (wp
= env
->watchpoints
; wp
!= NULL
; wp
= wp
->next
)
1403 if (wp
->flags
& mask
)
1404 cpu_watchpoint_remove_by_ref(env
, wp
);
1407 /* Add a breakpoint. */
1408 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1409 CPUBreakpoint
**breakpoint
)
1411 #if defined(TARGET_HAS_ICE)
1412 CPUBreakpoint
*bp
, *prev_bp
;
1414 bp
= qemu_malloc(sizeof(*bp
));
1421 /* keep all GDB-injected breakpoints in front */
1422 if (!(flags
& BP_GDB
) && env
->breakpoints
) {
1423 prev_bp
= env
->breakpoints
;
1424 while (prev_bp
->next
!= NULL
&& (prev_bp
->next
->flags
& BP_GDB
))
1425 prev_bp
= prev_bp
->next
;
1430 /* Insert new breakpoint */
1432 bp
->next
= prev_bp
->next
;
1435 bp
->next
= env
->breakpoints
;
1436 env
->breakpoints
= bp
;
1439 bp
->next
->prev
= bp
;
1443 kvm_update_debugger(env
);
1445 breakpoint_invalidate(env
, pc
);
1455 /* Remove a specific breakpoint. */
1456 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1458 #if defined(TARGET_HAS_ICE)
1461 for (bp
= env
->breakpoints
; bp
!= NULL
; bp
= bp
->next
) {
1462 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1463 cpu_breakpoint_remove_by_ref(env
, bp
);
1473 /* Remove a specific breakpoint by reference. */
1474 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1476 #if defined(TARGET_HAS_ICE)
1477 if (breakpoint
->next
)
1478 breakpoint
->next
->prev
= breakpoint
->prev
;
1479 if (breakpoint
->prev
)
1480 breakpoint
->prev
->next
= breakpoint
->next
;
1482 env
->breakpoints
= breakpoint
->next
;
1485 kvm_update_debugger(env
);
1487 breakpoint_invalidate(env
, breakpoint
->pc
);
1489 qemu_free(breakpoint
);
1493 /* Remove all matching breakpoints. */
1494 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1496 #if defined(TARGET_HAS_ICE)
1499 for (bp
= env
->breakpoints
; bp
!= NULL
; bp
= bp
->next
)
1500 if (bp
->flags
& mask
)
1501 cpu_breakpoint_remove_by_ref(env
, bp
);
1505 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1506 CPU loop after each instruction */
1507 void cpu_single_step(CPUState
*env
, int enabled
)
1509 #if defined(TARGET_HAS_ICE)
1510 if (env
->singlestep_enabled
!= enabled
) {
1511 env
->singlestep_enabled
= enabled
;
1512 /* must flush all the translated code to avoid inconsistancies */
1513 /* XXX: only flush what is necessary */
1517 kvm_update_debugger(env
);
1521 /* enable or disable low levels log */
1522 void cpu_set_log(int log_flags
)
1524 loglevel
= log_flags
;
1525 if (loglevel
&& !logfile
) {
1526 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1528 perror(logfilename
);
1531 #if !defined(CONFIG_SOFTMMU)
1532 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1534 static char logfile_buf
[4096];
1535 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1538 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1542 if (!loglevel
&& logfile
) {
1548 void cpu_set_log_filename(const char *filename
)
1550 logfilename
= strdup(filename
);
1555 cpu_set_log(loglevel
);
1558 /* mask must never be zero, except for A20 change call */
1559 void cpu_interrupt(CPUState
*env
, int mask
)
1561 #if !defined(USE_NPTL)
1562 TranslationBlock
*tb
;
1563 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1567 old_mask
= env
->interrupt_request
;
1568 /* FIXME: This is probably not threadsafe. A different thread could
1569 be in the middle of a read-modify-write operation. */
1570 env
->interrupt_request
|= mask
;
1571 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1572 kvm_update_interrupt_request(env
);
1573 #if defined(USE_NPTL)
1574 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1575 problem and hope the cpu will stop of its own accord. For userspace
1576 emulation this often isn't actually as bad as it sounds. Often
1577 signals are used primarily to interrupt blocking syscalls. */
1580 env
->icount_decr
.u16
.high
= 0xffff;
1581 #ifndef CONFIG_USER_ONLY
1582 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1583 an async event happened and we need to process it. */
1585 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1586 cpu_abort(env
, "Raised interrupt while not in I/O function");
1590 tb
= env
->current_tb
;
1591 /* if the cpu is currently executing code, we must unlink it and
1592 all the potentially executing TB */
1593 if (tb
&& !testandset(&interrupt_lock
)) {
1594 env
->current_tb
= NULL
;
1595 tb_reset_jump_recursive(tb
);
1596 resetlock(&interrupt_lock
);
1602 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1604 env
->interrupt_request
&= ~mask
;
1607 const CPULogItem cpu_log_items
[] = {
1608 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1609 "show generated host assembly code for each compiled TB" },
1610 { CPU_LOG_TB_IN_ASM
, "in_asm",
1611 "show target assembly code for each compiled TB" },
1612 { CPU_LOG_TB_OP
, "op",
1613 "show micro ops for each compiled TB" },
1614 { CPU_LOG_TB_OP_OPT
, "op_opt",
1617 "before eflags optimization and "
1619 "after liveness analysis" },
1620 { CPU_LOG_INT
, "int",
1621 "show interrupts/exceptions in short format" },
1622 { CPU_LOG_EXEC
, "exec",
1623 "show trace before each executed TB (lots of logs)" },
1624 { CPU_LOG_TB_CPU
, "cpu",
1625 "show CPU state before block translation" },
1627 { CPU_LOG_PCALL
, "pcall",
1628 "show protected mode far calls/returns/exceptions" },
1631 { CPU_LOG_IOPORT
, "ioport",
1632 "show all i/o ports accesses" },
1637 static int cmp1(const char *s1
, int n
, const char *s2
)
1639 if (strlen(s2
) != n
)
1641 return memcmp(s1
, s2
, n
) == 0;
1644 /* takes a comma separated list of log masks. Return 0 if error. */
1645 int cpu_str_to_log_mask(const char *str
)
1647 const CPULogItem
*item
;
1654 p1
= strchr(p
, ',');
1657 if(cmp1(p
,p1
-p
,"all")) {
1658 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1662 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1663 if (cmp1(p
, p1
- p
, item
->name
))
1677 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1684 fprintf(stderr
, "qemu: fatal: ");
1685 vfprintf(stderr
, fmt
, ap
);
1686 fprintf(stderr
, "\n");
1688 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1690 cpu_dump_state(env
, stderr
, fprintf
, 0);
1693 fprintf(logfile
, "qemu: fatal: ");
1694 vfprintf(logfile
, fmt
, ap2
);
1695 fprintf(logfile
, "\n");
1697 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1699 cpu_dump_state(env
, logfile
, fprintf
, 0);
1709 CPUState
*cpu_copy(CPUState
*env
)
1711 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1712 /* preserve chaining and index */
1713 CPUState
*next_cpu
= new_env
->next_cpu
;
1714 int cpu_index
= new_env
->cpu_index
;
1715 memcpy(new_env
, env
, sizeof(CPUState
));
1716 new_env
->next_cpu
= next_cpu
;
1717 new_env
->cpu_index
= cpu_index
;
1721 #if !defined(CONFIG_USER_ONLY)
1723 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1727 /* Discard jump cache entries for any tb which might potentially
1728 overlap the flushed page. */
1729 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1730 memset (&env
->tb_jmp_cache
[i
], 0,
1731 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1733 i
= tb_jmp_cache_hash_page(addr
);
1734 memset (&env
->tb_jmp_cache
[i
], 0,
1735 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1738 /* NOTE: if flush_global is true, also flush global entries (not
1740 void tlb_flush(CPUState
*env
, int flush_global
)
1744 #if defined(DEBUG_TLB)
1745 printf("tlb_flush:\n");
1747 /* must reset current TB so that interrupts cannot modify the
1748 links while we are modifying them */
1749 env
->current_tb
= NULL
;
1751 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1752 env
->tlb_table
[0][i
].addr_read
= -1;
1753 env
->tlb_table
[0][i
].addr_write
= -1;
1754 env
->tlb_table
[0][i
].addr_code
= -1;
1755 env
->tlb_table
[1][i
].addr_read
= -1;
1756 env
->tlb_table
[1][i
].addr_write
= -1;
1757 env
->tlb_table
[1][i
].addr_code
= -1;
1758 #if (NB_MMU_MODES >= 3)
1759 env
->tlb_table
[2][i
].addr_read
= -1;
1760 env
->tlb_table
[2][i
].addr_write
= -1;
1761 env
->tlb_table
[2][i
].addr_code
= -1;
1762 #if (NB_MMU_MODES == 4)
1763 env
->tlb_table
[3][i
].addr_read
= -1;
1764 env
->tlb_table
[3][i
].addr_write
= -1;
1765 env
->tlb_table
[3][i
].addr_code
= -1;
1770 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1773 if (env
->kqemu_enabled
) {
1774 kqemu_flush(env
, flush_global
);
1780 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1782 if (addr
== (tlb_entry
->addr_read
&
1783 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1784 addr
== (tlb_entry
->addr_write
&
1785 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1786 addr
== (tlb_entry
->addr_code
&
1787 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1788 tlb_entry
->addr_read
= -1;
1789 tlb_entry
->addr_write
= -1;
1790 tlb_entry
->addr_code
= -1;
1794 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1798 #if defined(DEBUG_TLB)
1799 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1801 /* must reset current TB so that interrupts cannot modify the
1802 links while we are modifying them */
1803 env
->current_tb
= NULL
;
1805 addr
&= TARGET_PAGE_MASK
;
1806 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1807 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1808 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1809 #if (NB_MMU_MODES >= 3)
1810 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1811 #if (NB_MMU_MODES == 4)
1812 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1816 tlb_flush_jmp_cache(env
, addr
);
1819 if (env
->kqemu_enabled
) {
1820 kqemu_flush_page(env
, addr
);
1825 /* update the TLBs so that writes to code in the virtual page 'addr'
1827 static void tlb_protect_code(ram_addr_t ram_addr
)
1829 cpu_physical_memory_reset_dirty(ram_addr
,
1830 ram_addr
+ TARGET_PAGE_SIZE
,
1834 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1835 tested for self modifying code */
1836 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1839 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1842 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1843 unsigned long start
, unsigned long length
)
1846 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1847 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1848 if ((addr
- start
) < length
) {
1849 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1854 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1858 unsigned long length
, start1
;
1862 start
&= TARGET_PAGE_MASK
;
1863 end
= TARGET_PAGE_ALIGN(end
);
1865 length
= end
- start
;
1868 len
= length
>> TARGET_PAGE_BITS
;
1870 /* XXX: should not depend on cpu context */
1872 if (env
->kqemu_enabled
) {
1875 for(i
= 0; i
< len
; i
++) {
1876 kqemu_set_notdirty(env
, addr
);
1877 addr
+= TARGET_PAGE_SIZE
;
1881 mask
= ~dirty_flags
;
1882 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1883 for(i
= 0; i
< len
; i
++)
1886 /* we modify the TLB cache so that the dirty bit will be set again
1887 when accessing the range */
1888 start1
= start
+ (unsigned long)phys_ram_base
;
1889 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1890 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1891 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1892 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1893 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1894 #if (NB_MMU_MODES >= 3)
1895 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1896 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1897 #if (NB_MMU_MODES == 4)
1898 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1899 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1905 int cpu_physical_memory_set_dirty_tracking(int enable
)
1910 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1911 in_migration
= enable
;
1915 int cpu_physical_memory_get_dirty_tracking(void)
1917 return in_migration
;
1920 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1922 ram_addr_t ram_addr
;
1924 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1925 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1926 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1927 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1928 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1933 /* update the TLB according to the current state of the dirty bits */
1934 void cpu_tlb_update_dirty(CPUState
*env
)
1937 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1938 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1939 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1940 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1941 #if (NB_MMU_MODES >= 3)
1942 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1943 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1944 #if (NB_MMU_MODES == 4)
1945 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1946 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1951 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1953 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1954 tlb_entry
->addr_write
= vaddr
;
1957 /* update the TLB corresponding to virtual page vaddr
1958 so that it is no longer dirty */
1959 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1963 vaddr
&= TARGET_PAGE_MASK
;
1964 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1965 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1966 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1967 #if (NB_MMU_MODES >= 3)
1968 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1969 #if (NB_MMU_MODES == 4)
1970 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1975 /* add a new TLB entry. At most one entry for a given virtual address
1976 is permitted. Return 0 if OK or 2 if the page could not be mapped
1977 (can only happen in non SOFTMMU mode for I/O pages or pages
1978 conflicting with the host address space). */
1979 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1980 target_phys_addr_t paddr
, int prot
,
1981 int mmu_idx
, int is_softmmu
)
1986 target_ulong address
;
1987 target_ulong code_address
;
1988 target_phys_addr_t addend
;
1992 target_phys_addr_t iotlb
;
1994 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1996 pd
= IO_MEM_UNASSIGNED
;
1998 pd
= p
->phys_offset
;
2000 #if defined(DEBUG_TLB)
2001 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2002 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2007 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2008 /* IO memory case (romd handled later) */
2009 address
|= TLB_MMIO
;
2011 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
2012 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2014 iotlb
= pd
& TARGET_PAGE_MASK
;
2015 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2016 iotlb
|= IO_MEM_NOTDIRTY
;
2018 iotlb
|= IO_MEM_ROM
;
2020 /* IO handlers are currently passed a phsical address.
2021 It would be nice to pass an offset from the base address
2022 of that region. This would avoid having to special case RAM,
2023 and avoid full address decoding in every device.
2024 We can't use the high bits of pd for this because
2025 IO_MEM_ROMD uses these as a ram address. */
2026 iotlb
= (pd
& ~TARGET_PAGE_MASK
) + paddr
;
2029 code_address
= address
;
2030 /* Make accesses to pages with watchpoints go via the
2031 watchpoint trap routines. */
2032 for (wp
= env
->watchpoints
; wp
!= NULL
; wp
= wp
->next
) {
2033 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2034 iotlb
= io_mem_watch
+ paddr
;
2035 /* TODO: The memory case can be optimized by not trapping
2036 reads of pages with a write breakpoint. */
2037 address
|= TLB_MMIO
;
2041 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2042 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2043 te
= &env
->tlb_table
[mmu_idx
][index
];
2044 te
->addend
= addend
- vaddr
;
2045 if (prot
& PAGE_READ
) {
2046 te
->addr_read
= address
;
2051 if (prot
& PAGE_EXEC
) {
2052 te
->addr_code
= code_address
;
2056 if (prot
& PAGE_WRITE
) {
2057 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2058 (pd
& IO_MEM_ROMD
)) {
2059 /* Write access calls the I/O callback. */
2060 te
->addr_write
= address
| TLB_MMIO
;
2061 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2062 !cpu_physical_memory_is_dirty(pd
)) {
2063 te
->addr_write
= address
| TLB_NOTDIRTY
;
2065 te
->addr_write
= address
;
2068 te
->addr_write
= -1;
2075 void tlb_flush(CPUState
*env
, int flush_global
)
2079 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2083 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2084 target_phys_addr_t paddr
, int prot
,
2085 int mmu_idx
, int is_softmmu
)
2090 /* dump memory mappings */
2091 void page_dump(FILE *f
)
2093 unsigned long start
, end
;
2094 int i
, j
, prot
, prot1
;
2097 fprintf(f
, "%-8s %-8s %-8s %s\n",
2098 "start", "end", "size", "prot");
2102 for(i
= 0; i
<= L1_SIZE
; i
++) {
2107 for(j
= 0;j
< L2_SIZE
; j
++) {
2112 if (prot1
!= prot
) {
2113 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2115 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2116 start
, end
, end
- start
,
2117 prot
& PAGE_READ
? 'r' : '-',
2118 prot
& PAGE_WRITE
? 'w' : '-',
2119 prot
& PAGE_EXEC
? 'x' : '-');
2133 int page_get_flags(target_ulong address
)
2137 p
= page_find(address
>> TARGET_PAGE_BITS
);
2143 /* modify the flags of a page and invalidate the code if
2144 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2145 depending on PAGE_WRITE */
2146 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2151 /* mmap_lock should already be held. */
2152 start
= start
& TARGET_PAGE_MASK
;
2153 end
= TARGET_PAGE_ALIGN(end
);
2154 if (flags
& PAGE_WRITE
)
2155 flags
|= PAGE_WRITE_ORG
;
2156 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2157 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2158 /* We may be called for host regions that are outside guest
2162 /* if the write protection is set, then we invalidate the code
2164 if (!(p
->flags
& PAGE_WRITE
) &&
2165 (flags
& PAGE_WRITE
) &&
2167 tb_invalidate_phys_page(addr
, 0, NULL
);
2173 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2179 if (start
+ len
< start
)
2180 /* we've wrapped around */
2183 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2184 start
= start
& TARGET_PAGE_MASK
;
2186 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2187 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2190 if( !(p
->flags
& PAGE_VALID
) )
2193 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2195 if (flags
& PAGE_WRITE
) {
2196 if (!(p
->flags
& PAGE_WRITE_ORG
))
2198 /* unprotect the page if it was put read-only because it
2199 contains translated code */
2200 if (!(p
->flags
& PAGE_WRITE
)) {
2201 if (!page_unprotect(addr
, 0, NULL
))
2210 /* called from signal handler: invalidate the code and unprotect the
2211 page. Return TRUE if the fault was succesfully handled. */
2212 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2214 unsigned int page_index
, prot
, pindex
;
2216 target_ulong host_start
, host_end
, addr
;
2218 /* Technically this isn't safe inside a signal handler. However we
2219 know this only ever happens in a synchronous SEGV handler, so in
2220 practice it seems to be ok. */
2223 host_start
= address
& qemu_host_page_mask
;
2224 page_index
= host_start
>> TARGET_PAGE_BITS
;
2225 p1
= page_find(page_index
);
2230 host_end
= host_start
+ qemu_host_page_size
;
2233 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2237 /* if the page was really writable, then we change its
2238 protection back to writable */
2239 if (prot
& PAGE_WRITE_ORG
) {
2240 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2241 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2242 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2243 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2244 p1
[pindex
].flags
|= PAGE_WRITE
;
2245 /* and since the content will be modified, we must invalidate
2246 the corresponding translated code. */
2247 tb_invalidate_phys_page(address
, pc
, puc
);
2248 #ifdef DEBUG_TB_CHECK
2249 tb_invalidate_check(address
);
2259 static inline void tlb_set_dirty(CPUState
*env
,
2260 unsigned long addr
, target_ulong vaddr
)
2263 #endif /* defined(CONFIG_USER_ONLY) */
2265 #if !defined(CONFIG_USER_ONLY)
2266 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2268 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2269 ram_addr_t orig_memory
);
2270 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2273 if (addr > start_addr) \
2276 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2277 if (start_addr2 > 0) \
2281 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2282 end_addr2 = TARGET_PAGE_SIZE - 1; \
2284 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2285 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2290 /* register physical memory. 'size' must be a multiple of the target
2291 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2293 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2295 ram_addr_t phys_offset
)
2297 target_phys_addr_t addr
, end_addr
;
2300 ram_addr_t orig_size
= size
;
2304 /* XXX: should not depend on cpu context */
2306 if (env
->kqemu_enabled
) {
2307 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2311 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2313 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2314 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2315 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2316 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2317 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2318 ram_addr_t orig_memory
= p
->phys_offset
;
2319 target_phys_addr_t start_addr2
, end_addr2
;
2320 int need_subpage
= 0;
2322 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2324 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2325 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2326 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2327 &p
->phys_offset
, orig_memory
);
2329 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2332 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2334 p
->phys_offset
= phys_offset
;
2335 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2336 (phys_offset
& IO_MEM_ROMD
))
2337 phys_offset
+= TARGET_PAGE_SIZE
;
2340 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2341 p
->phys_offset
= phys_offset
;
2342 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2343 (phys_offset
& IO_MEM_ROMD
))
2344 phys_offset
+= TARGET_PAGE_SIZE
;
2346 target_phys_addr_t start_addr2
, end_addr2
;
2347 int need_subpage
= 0;
2349 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2350 end_addr2
, need_subpage
);
2352 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2353 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2354 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2355 subpage_register(subpage
, start_addr2
, end_addr2
,
2362 /* since each CPU stores ram addresses in its TLB cache, we must
2363 reset the modified entries */
2365 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2370 /* XXX: temporary until new memory mapping API */
2371 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2375 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2377 return IO_MEM_UNASSIGNED
;
2378 return p
->phys_offset
;
2381 /* XXX: better than nothing */
2382 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2385 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2386 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2387 (uint64_t)size
, (uint64_t)phys_ram_size
);
2390 addr
= phys_ram_alloc_offset
;
2391 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2395 void qemu_ram_free(ram_addr_t addr
)
2399 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2401 #ifdef DEBUG_UNASSIGNED
2402 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2404 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2405 do_unassigned_access(addr
, 0, 0, 0, 1);
2410 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2412 #ifdef DEBUG_UNASSIGNED
2413 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2415 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2416 do_unassigned_access(addr
, 0, 0, 0, 2);
2421 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2423 #ifdef DEBUG_UNASSIGNED
2424 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2426 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2427 do_unassigned_access(addr
, 0, 0, 0, 4);
2432 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2434 #ifdef DEBUG_UNASSIGNED
2435 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2437 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2438 do_unassigned_access(addr
, 1, 0, 0, 1);
2442 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2444 #ifdef DEBUG_UNASSIGNED
2445 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2447 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2448 do_unassigned_access(addr
, 1, 0, 0, 2);
2452 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2454 #ifdef DEBUG_UNASSIGNED
2455 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2457 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2458 do_unassigned_access(addr
, 1, 0, 0, 4);
2462 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2463 unassigned_mem_readb
,
2464 unassigned_mem_readw
,
2465 unassigned_mem_readl
,
2468 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2469 unassigned_mem_writeb
,
2470 unassigned_mem_writew
,
2471 unassigned_mem_writel
,
2474 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2478 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2479 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2480 #if !defined(CONFIG_USER_ONLY)
2481 tb_invalidate_phys_page_fast(ram_addr
, 1);
2482 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2485 stb_p(phys_ram_base
+ ram_addr
, val
);
2487 if (cpu_single_env
->kqemu_enabled
&&
2488 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2489 kqemu_modify_page(cpu_single_env
, ram_addr
);
2491 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2492 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2493 /* we remove the notdirty callback only if the code has been
2495 if (dirty_flags
== 0xff)
2496 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2499 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2503 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2504 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2505 #if !defined(CONFIG_USER_ONLY)
2506 tb_invalidate_phys_page_fast(ram_addr
, 2);
2507 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2510 stw_p(phys_ram_base
+ ram_addr
, val
);
2512 if (cpu_single_env
->kqemu_enabled
&&
2513 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2514 kqemu_modify_page(cpu_single_env
, ram_addr
);
2516 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2517 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2518 /* we remove the notdirty callback only if the code has been
2520 if (dirty_flags
== 0xff)
2521 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2524 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2528 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2529 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2530 #if !defined(CONFIG_USER_ONLY)
2531 tb_invalidate_phys_page_fast(ram_addr
, 4);
2532 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2535 stl_p(phys_ram_base
+ ram_addr
, val
);
2537 if (cpu_single_env
->kqemu_enabled
&&
2538 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2539 kqemu_modify_page(cpu_single_env
, ram_addr
);
2541 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2542 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2543 /* we remove the notdirty callback only if the code has been
2545 if (dirty_flags
== 0xff)
2546 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2549 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2550 NULL
, /* never used */
2551 NULL
, /* never used */
2552 NULL
, /* never used */
2555 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2556 notdirty_mem_writeb
,
2557 notdirty_mem_writew
,
2558 notdirty_mem_writel
,
2561 /* Generate a debug exception if a watchpoint has been hit. */
2562 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2564 CPUState
*env
= cpu_single_env
;
2565 target_ulong pc
, cs_base
;
2566 TranslationBlock
*tb
;
2571 if (env
->watchpoint_hit
) {
2572 /* We re-entered the check after replacing the TB. Now raise
2573 * the debug interrupt so that is will trigger after the
2574 * current instruction. */
2575 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2578 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2579 for (wp
= env
->watchpoints
; wp
!= NULL
; wp
= wp
->next
) {
2580 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2581 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2582 wp
->flags
|= BP_WATCHPOINT_HIT
;
2583 if (!env
->watchpoint_hit
) {
2584 env
->watchpoint_hit
= wp
;
2585 tb
= tb_find_pc(env
->mem_io_pc
);
2587 cpu_abort(env
, "check_watchpoint: could not find TB for "
2588 "pc=%p", (void *)env
->mem_io_pc
);
2590 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2591 tb_phys_invalidate(tb
, -1);
2592 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2593 env
->exception_index
= EXCP_DEBUG
;
2595 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2596 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2598 cpu_resume_from_signal(env
, NULL
);
2601 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2606 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2607 so these check for a hit then pass through to the normal out-of-line
2609 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2611 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2612 return ldub_phys(addr
);
2615 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2617 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2618 return lduw_phys(addr
);
2621 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2623 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2624 return ldl_phys(addr
);
2627 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2630 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2631 stb_phys(addr
, val
);
2634 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2637 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2638 stw_phys(addr
, val
);
2641 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2644 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2645 stl_phys(addr
, val
);
2648 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2654 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2660 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2666 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2667 #if defined(DEBUG_SUBPAGE)
2668 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2669 mmio
, len
, addr
, idx
);
2671 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2676 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2677 uint32_t value
, unsigned int len
)
2681 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2682 #if defined(DEBUG_SUBPAGE)
2683 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2684 mmio
, len
, addr
, idx
, value
);
2686 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2689 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2691 #if defined(DEBUG_SUBPAGE)
2692 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2695 return subpage_readlen(opaque
, addr
, 0);
2698 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2701 #if defined(DEBUG_SUBPAGE)
2702 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2704 subpage_writelen(opaque
, addr
, value
, 0);
2707 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2709 #if defined(DEBUG_SUBPAGE)
2710 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2713 return subpage_readlen(opaque
, addr
, 1);
2716 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2719 #if defined(DEBUG_SUBPAGE)
2720 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2722 subpage_writelen(opaque
, addr
, value
, 1);
2725 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2727 #if defined(DEBUG_SUBPAGE)
2728 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2731 return subpage_readlen(opaque
, addr
, 2);
2734 static void subpage_writel (void *opaque
,
2735 target_phys_addr_t addr
, uint32_t value
)
2737 #if defined(DEBUG_SUBPAGE)
2738 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2740 subpage_writelen(opaque
, addr
, value
, 2);
2743 static CPUReadMemoryFunc
*subpage_read
[] = {
2749 static CPUWriteMemoryFunc
*subpage_write
[] = {
2755 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2761 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2763 idx
= SUBPAGE_IDX(start
);
2764 eidx
= SUBPAGE_IDX(end
);
2765 #if defined(DEBUG_SUBPAGE)
2766 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2767 mmio
, start
, end
, idx
, eidx
, memory
);
2769 memory
>>= IO_MEM_SHIFT
;
2770 for (; idx
<= eidx
; idx
++) {
2771 for (i
= 0; i
< 4; i
++) {
2772 if (io_mem_read
[memory
][i
]) {
2773 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2774 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2776 if (io_mem_write
[memory
][i
]) {
2777 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2778 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2786 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2787 ram_addr_t orig_memory
)
2792 mmio
= qemu_mallocz(sizeof(subpage_t
));
2795 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2796 #if defined(DEBUG_SUBPAGE)
2797 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2798 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2800 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2801 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2807 static int get_free_io_mem_idx(void)
2811 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2812 if (!io_mem_used
[i
]) {
2820 static void io_mem_init(void)
2824 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2825 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2826 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2830 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2831 watch_mem_write
, NULL
);
2832 /* alloc dirty bits array */
2833 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2834 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2837 /* mem_read and mem_write are arrays of functions containing the
2838 function to access byte (index 0), word (index 1) and dword (index
2839 2). Functions can be omitted with a NULL function pointer. The
2840 registered functions may be modified dynamically later.
2841 If io_index is non zero, the corresponding io zone is
2842 modified. If it is zero, a new io zone is allocated. The return
2843 value can be used with cpu_register_physical_memory(). (-1) is
2844 returned if error. */
2845 int cpu_register_io_memory(int io_index
,
2846 CPUReadMemoryFunc
**mem_read
,
2847 CPUWriteMemoryFunc
**mem_write
,
2850 int i
, subwidth
= 0;
2852 if (io_index
<= 0) {
2853 io_index
= get_free_io_mem_idx();
2857 if (io_index
>= IO_MEM_NB_ENTRIES
)
2861 for(i
= 0;i
< 3; i
++) {
2862 if (!mem_read
[i
] || !mem_write
[i
])
2863 subwidth
= IO_MEM_SUBWIDTH
;
2864 io_mem_read
[io_index
][i
] = mem_read
[i
];
2865 io_mem_write
[io_index
][i
] = mem_write
[i
];
2867 io_mem_opaque
[io_index
] = opaque
;
2868 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2871 void cpu_unregister_io_memory(int io_table_address
)
2874 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
2876 for (i
=0;i
< 3; i
++) {
2877 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
2878 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
2880 io_mem_opaque
[io_index
] = NULL
;
2881 io_mem_used
[io_index
] = 0;
2884 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2886 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2889 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2891 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2894 #endif /* !defined(CONFIG_USER_ONLY) */
2896 /* physical memory access (slow version, mainly for debug) */
2897 #if defined(CONFIG_USER_ONLY)
2898 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2899 int len
, int is_write
)
2906 page
= addr
& TARGET_PAGE_MASK
;
2907 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2910 flags
= page_get_flags(page
);
2911 if (!(flags
& PAGE_VALID
))
2914 if (!(flags
& PAGE_WRITE
))
2916 /* XXX: this code should not depend on lock_user */
2917 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2918 /* FIXME - should this return an error rather than just fail? */
2921 unlock_user(p
, addr
, l
);
2923 if (!(flags
& PAGE_READ
))
2925 /* XXX: this code should not depend on lock_user */
2926 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2927 /* FIXME - should this return an error rather than just fail? */
2930 unlock_user(p
, addr
, 0);
2939 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2940 int len
, int is_write
)
2945 target_phys_addr_t page
;
2950 page
= addr
& TARGET_PAGE_MASK
;
2951 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2954 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2956 pd
= IO_MEM_UNASSIGNED
;
2958 pd
= p
->phys_offset
;
2962 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2963 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2964 /* XXX: could force cpu_single_env to NULL to avoid
2966 if (l
>= 4 && ((addr
& 3) == 0)) {
2967 /* 32 bit write access */
2969 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2971 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2972 /* 16 bit write access */
2974 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2977 /* 8 bit write access */
2979 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2983 unsigned long addr1
;
2984 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2986 ptr
= phys_ram_base
+ addr1
;
2987 memcpy(ptr
, buf
, l
);
2988 if (!cpu_physical_memory_is_dirty(addr1
)) {
2989 /* invalidate code */
2990 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2992 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2993 (0xff & ~CODE_DIRTY_FLAG
);
2995 /* qemu doesn't execute guest code directly, but kvm does
2996 therefore fluch instruction caches */
2998 flush_icache_range((unsigned long)ptr
,
2999 ((unsigned long)ptr
)+l
);
3002 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3003 !(pd
& IO_MEM_ROMD
)) {
3005 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3006 if (l
>= 4 && ((addr
& 3) == 0)) {
3007 /* 32 bit read access */
3008 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3011 } else if (l
>= 2 && ((addr
& 1) == 0)) {
3012 /* 16 bit read access */
3013 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
3017 /* 8 bit read access */
3018 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
3024 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3025 (addr
& ~TARGET_PAGE_MASK
);
3026 memcpy(buf
, ptr
, l
);
3035 /* used for ROM loading : can write in RAM and ROM */
3036 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3037 const uint8_t *buf
, int len
)
3041 target_phys_addr_t page
;
3046 page
= addr
& TARGET_PAGE_MASK
;
3047 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3050 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3052 pd
= IO_MEM_UNASSIGNED
;
3054 pd
= p
->phys_offset
;
3057 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3058 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3059 !(pd
& IO_MEM_ROMD
)) {
3062 unsigned long addr1
;
3063 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3065 ptr
= phys_ram_base
+ addr1
;
3066 memcpy(ptr
, buf
, l
);
3075 /* warning: addr must be aligned */
3076 uint32_t ldl_phys(target_phys_addr_t addr
)
3084 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3086 pd
= IO_MEM_UNASSIGNED
;
3088 pd
= p
->phys_offset
;
3091 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3092 !(pd
& IO_MEM_ROMD
)) {
3094 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3095 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3098 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3099 (addr
& ~TARGET_PAGE_MASK
);
3105 /* warning: addr must be aligned */
3106 uint64_t ldq_phys(target_phys_addr_t addr
)
3114 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3116 pd
= IO_MEM_UNASSIGNED
;
3118 pd
= p
->phys_offset
;
3121 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3122 !(pd
& IO_MEM_ROMD
)) {
3124 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3125 #ifdef TARGET_WORDS_BIGENDIAN
3126 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3127 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3129 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3130 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3134 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3135 (addr
& ~TARGET_PAGE_MASK
);
3142 uint32_t ldub_phys(target_phys_addr_t addr
)
3145 cpu_physical_memory_read(addr
, &val
, 1);
3150 uint32_t lduw_phys(target_phys_addr_t addr
)
3153 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3154 return tswap16(val
);
3158 #define likely(x) __builtin_expect(!!(x), 1)
3159 #define unlikely(x) __builtin_expect(!!(x), 0)
3162 #define unlikely(x) x
3165 /* warning: addr must be aligned. The ram page is not masked as dirty
3166 and the code inside is not invalidated. It is useful if the dirty
3167 bits are used to track modified PTEs */
3168 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3175 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3177 pd
= IO_MEM_UNASSIGNED
;
3179 pd
= p
->phys_offset
;
3182 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3183 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3184 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3186 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3187 ptr
= phys_ram_base
+ addr1
;
3190 if (unlikely(in_migration
)) {
3191 if (!cpu_physical_memory_is_dirty(addr1
)) {
3192 /* invalidate code */
3193 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3195 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3196 (0xff & ~CODE_DIRTY_FLAG
);
3202 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3209 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3211 pd
= IO_MEM_UNASSIGNED
;
3213 pd
= p
->phys_offset
;
3216 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3217 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3218 #ifdef TARGET_WORDS_BIGENDIAN
3219 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3220 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3222 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3223 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3226 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3227 (addr
& ~TARGET_PAGE_MASK
);
3232 /* warning: addr must be aligned */
3233 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3240 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3242 pd
= IO_MEM_UNASSIGNED
;
3244 pd
= p
->phys_offset
;
3247 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3248 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3249 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3251 unsigned long addr1
;
3252 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3254 ptr
= phys_ram_base
+ addr1
;
3256 if (!cpu_physical_memory_is_dirty(addr1
)) {
3257 /* invalidate code */
3258 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3260 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3261 (0xff & ~CODE_DIRTY_FLAG
);
3267 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3270 cpu_physical_memory_write(addr
, &v
, 1);
3274 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3276 uint16_t v
= tswap16(val
);
3277 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3281 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3284 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3289 /* virtual memory access for debug */
3290 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3291 uint8_t *buf
, int len
, int is_write
)
3294 target_phys_addr_t phys_addr
;
3298 page
= addr
& TARGET_PAGE_MASK
;
3299 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3300 /* if no physical page mapped, return an error */
3301 if (phys_addr
== -1)
3303 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3306 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3315 /* in deterministic execution mode, instructions doing device I/Os
3316 must be at the end of the TB */
3317 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3319 TranslationBlock
*tb
;
3321 target_ulong pc
, cs_base
;
3324 tb
= tb_find_pc((unsigned long)retaddr
);
3326 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3329 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3330 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3331 /* Calculate how many instructions had been executed before the fault
3333 n
= n
- env
->icount_decr
.u16
.low
;
3334 /* Generate a new TB ending on the I/O insn. */
3336 /* On MIPS and SH, delay slot instructions can only be restarted if
3337 they were already the first instruction in the TB. If this is not
3338 the first instruction in a TB then re-execute the preceding
3340 #if defined(TARGET_MIPS)
3341 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3342 env
->active_tc
.PC
-= 4;
3343 env
->icount_decr
.u16
.low
++;
3344 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3346 #elif defined(TARGET_SH4)
3347 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3350 env
->icount_decr
.u16
.low
++;
3351 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3354 /* This should never happen. */
3355 if (n
> CF_COUNT_MASK
)
3356 cpu_abort(env
, "TB too big during recompile");
3358 cflags
= n
| CF_LAST_IO
;
3360 cs_base
= tb
->cs_base
;
3362 tb_phys_invalidate(tb
, -1);
3363 /* FIXME: In theory this could raise an exception. In practice
3364 we have already translated the block once so it's probably ok. */
3365 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3366 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3367 the first in the TB) then we end up generating a whole new TB and
3368 repeating the fault, which is horribly inefficient.
3369 Better would be to execute just this insn uncached, or generate a
3371 cpu_resume_from_signal(env
, NULL
);
3374 void dump_exec_info(FILE *f
,
3375 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3377 int i
, target_code_size
, max_target_code_size
;
3378 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3379 TranslationBlock
*tb
;
3381 target_code_size
= 0;
3382 max_target_code_size
= 0;
3384 direct_jmp_count
= 0;
3385 direct_jmp2_count
= 0;
3386 for(i
= 0; i
< nb_tbs
; i
++) {
3388 target_code_size
+= tb
->size
;
3389 if (tb
->size
> max_target_code_size
)
3390 max_target_code_size
= tb
->size
;
3391 if (tb
->page_addr
[1] != -1)
3393 if (tb
->tb_next_offset
[0] != 0xffff) {
3395 if (tb
->tb_next_offset
[1] != 0xffff) {
3396 direct_jmp2_count
++;
3400 /* XXX: avoid using doubles ? */
3401 cpu_fprintf(f
, "Translation buffer state:\n");
3402 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3403 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3404 cpu_fprintf(f
, "TB count %d/%d\n",
3405 nb_tbs
, code_gen_max_blocks
);
3406 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3407 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3408 max_target_code_size
);
3409 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3410 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3411 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3412 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3414 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3415 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3417 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3419 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3420 cpu_fprintf(f
, "\nStatistics:\n");
3421 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3422 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3423 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3424 tcg_dump_info(f
, cpu_fprintf
);
3427 #if !defined(CONFIG_USER_ONLY)
3429 #define MMUSUFFIX _cmmu
3430 #define GETPC() NULL
3431 #define env cpu_single_env
3432 #define SOFTMMU_CODE_ACCESS
3435 #include "softmmu_template.h"
3438 #include "softmmu_template.h"
3441 #include "softmmu_template.h"
3444 #include "softmmu_template.h"