2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
48 #if defined(CONFIG_USER_ONLY)
52 //#define DEBUG_TB_INVALIDATE
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #define MMAP_AREA_START 0x00000000
72 #define MMAP_AREA_END 0xa8000000
74 #if defined(TARGET_SPARC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 41
76 #elif defined(TARGET_SPARC)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #elif defined(TARGET_ALPHA)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #define TARGET_VIRT_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_PPC64)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 42
85 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
86 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 #elif defined(TARGET_IA64)
88 #define TARGET_PHYS_ADDR_SPACE_BITS 36
90 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
91 #define TARGET_PHYS_ADDR_SPACE_BITS 32
94 static TranslationBlock
*tbs
;
95 int code_gen_max_blocks
;
96 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
98 /* any access to the tbs or the page table must use this lock */
99 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
101 #if defined(__arm__) || defined(__sparc_v9__)
102 /* The prologue must be reachable with a direct jump. ARM and Sparc64
103 have limited branch ranges (possibly also PPC) so place it in a
104 section close to code segment. */
105 #define code_gen_section \
106 __attribute__((__section__(".gen_code"))) \
107 __attribute__((aligned (32)))
109 #define code_gen_section \
110 __attribute__((aligned (32)))
113 uint8_t code_gen_prologue
[1024] code_gen_section
;
114 static uint8_t *code_gen_buffer
;
115 static unsigned long code_gen_buffer_size
;
116 /* threshold to flush the translated code buffer */
117 static unsigned long code_gen_buffer_max_size
;
118 uint8_t *code_gen_ptr
;
120 #if !defined(CONFIG_USER_ONLY)
121 ram_addr_t phys_ram_size
;
123 uint8_t *phys_ram_base
;
124 uint8_t *phys_ram_dirty
;
126 static int in_migration
;
127 static ram_addr_t phys_ram_alloc_offset
= 0;
131 /* current CPU in the current thread. It is only valid inside
133 CPUState
*cpu_single_env
;
134 /* 0 = Do not count executed instructions.
135 1 = Precise instruction counting.
136 2 = Adaptive rate instruction counting. */
138 /* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
142 typedef struct PageDesc
{
143 /* list of TBs intersecting this ram page */
144 TranslationBlock
*first_tb
;
145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count
;
148 uint8_t *code_bitmap
;
149 #if defined(CONFIG_USER_ONLY)
154 typedef struct PhysPageDesc
{
155 /* offset in host memory of the page + io_index in the low bits */
156 ram_addr_t phys_offset
;
157 ram_addr_t region_offset
;
161 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162 /* XXX: this is a temporary hack for alpha target.
163 * In the future, this is to be replaced by a multi-level table
164 * to actually be able to handle the complete 64 bits address space.
166 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
171 #define L1_SIZE (1 << L1_BITS)
172 #define L2_SIZE (1 << L2_BITS)
174 unsigned long qemu_real_host_page_size
;
175 unsigned long qemu_host_page_bits
;
176 unsigned long qemu_host_page_size
;
177 unsigned long qemu_host_page_mask
;
179 /* XXX: for system emulation, it could just be an array */
180 static PageDesc
*l1_map
[L1_SIZE
];
181 static PhysPageDesc
**l1_phys_map
;
183 #if !defined(CONFIG_USER_ONLY)
184 static void io_mem_init(void);
186 /* io memory support */
187 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
188 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
189 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
190 char io_mem_used
[IO_MEM_NB_ENTRIES
];
191 static int io_mem_watch
;
195 static const char *logfilename
= "/tmp/qemu.log";
198 static int log_append
= 0;
201 static int tlb_flush_count
;
202 static int tb_flush_count
;
203 static int tb_phys_invalidate_count
;
205 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
206 typedef struct subpage_t
{
207 target_phys_addr_t base
;
208 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
209 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
210 void *opaque
[TARGET_PAGE_SIZE
][2][4];
211 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
215 static void map_exec(void *addr
, long size
)
218 VirtualProtect(addr
, size
,
219 PAGE_EXECUTE_READWRITE
, &old_protect
);
223 static void map_exec(void *addr
, long size
)
225 unsigned long start
, end
, page_size
;
227 page_size
= getpagesize();
228 start
= (unsigned long)addr
;
229 start
&= ~(page_size
- 1);
231 end
= (unsigned long)addr
+ size
;
232 end
+= page_size
- 1;
233 end
&= ~(page_size
- 1);
235 mprotect((void *)start
, end
- start
,
236 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
240 static void page_init(void)
242 /* NOTE: we can always suppose that qemu_host_page_size >=
246 SYSTEM_INFO system_info
;
248 GetSystemInfo(&system_info
);
249 qemu_real_host_page_size
= system_info
.dwPageSize
;
252 qemu_real_host_page_size
= getpagesize();
254 if (qemu_host_page_size
== 0)
255 qemu_host_page_size
= qemu_real_host_page_size
;
256 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
257 qemu_host_page_size
= TARGET_PAGE_SIZE
;
258 qemu_host_page_bits
= 0;
259 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
260 qemu_host_page_bits
++;
261 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
262 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
263 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
265 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
267 long long startaddr
, endaddr
;
272 last_brk
= (unsigned long)sbrk(0);
273 f
= fopen("/proc/self/maps", "r");
276 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
278 startaddr
= MIN(startaddr
,
279 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
280 endaddr
= MIN(endaddr
,
281 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
282 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
283 TARGET_PAGE_ALIGN(endaddr
),
294 static inline PageDesc
**page_l1_map(target_ulong index
)
296 #if TARGET_LONG_BITS > 32
297 /* Host memory outside guest VM. For 32-bit targets we have already
298 excluded high addresses. */
299 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
302 return &l1_map
[index
>> L2_BITS
];
305 static inline PageDesc
*page_find_alloc(target_ulong index
)
308 lp
= page_l1_map(index
);
314 /* allocate if not found */
315 #if defined(CONFIG_USER_ONLY)
317 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
318 /* Don't use qemu_malloc because it may recurse. */
319 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
320 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
323 if (addr
== (target_ulong
)addr
) {
324 page_set_flags(addr
& TARGET_PAGE_MASK
,
325 TARGET_PAGE_ALIGN(addr
+ len
),
329 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
333 return p
+ (index
& (L2_SIZE
- 1));
336 static inline PageDesc
*page_find(target_ulong index
)
339 lp
= page_l1_map(index
);
346 return p
+ (index
& (L2_SIZE
- 1));
349 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
354 p
= (void **)l1_phys_map
;
355 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
357 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
360 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
363 /* allocate if not found */
366 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
367 memset(p
, 0, sizeof(void *) * L1_SIZE
);
371 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
375 /* allocate if not found */
378 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
380 for (i
= 0; i
< L2_SIZE
; i
++)
381 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
383 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
386 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
388 return phys_page_find_alloc(index
, 0);
391 #if !defined(CONFIG_USER_ONLY)
392 static void tlb_protect_code(ram_addr_t ram_addr
);
393 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
395 #define mmap_lock() do { } while(0)
396 #define mmap_unlock() do { } while(0)
399 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
401 #if defined(CONFIG_USER_ONLY)
402 /* Currently it is not recommanded to allocate big chunks of data in
403 user mode. It will change when a dedicated libc will be used */
404 #define USE_STATIC_CODE_GEN_BUFFER
407 #ifdef USE_STATIC_CODE_GEN_BUFFER
408 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
411 static void code_gen_alloc(unsigned long tb_size
)
416 #ifdef USE_STATIC_CODE_GEN_BUFFER
417 code_gen_buffer
= static_code_gen_buffer
;
418 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
419 map_exec(code_gen_buffer
, code_gen_buffer_size
);
421 code_gen_buffer_size
= tb_size
;
422 if (code_gen_buffer_size
== 0) {
423 #if defined(CONFIG_USER_ONLY)
424 /* in user mode, phys_ram_size is not meaningful */
425 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
427 /* XXX: needs ajustments */
428 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
431 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
432 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
433 /* The code gen buffer location may have constraints depending on
434 the host cpu and OS */
435 #if defined(__linux__)
440 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
441 #if defined(__x86_64__)
443 /* Cannot map more than that */
444 if (code_gen_buffer_size
> (800 * 1024 * 1024))
445 code_gen_buffer_size
= (800 * 1024 * 1024);
446 #elif defined(__sparc_v9__)
447 // Map the buffer below 2G, so we can use direct calls and branches
449 start
= (void *) 0x60000000UL
;
450 if (code_gen_buffer_size
> (512 * 1024 * 1024))
451 code_gen_buffer_size
= (512 * 1024 * 1024);
452 #elif defined(__arm__)
453 /* Map the buffer below 32M, so we can use direct calls and branches */
455 start
= (void *) 0x01000000UL
;
456 if (code_gen_buffer_size
> 16 * 1024 * 1024)
457 code_gen_buffer_size
= 16 * 1024 * 1024;
459 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
460 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
462 if (code_gen_buffer
== MAP_FAILED
) {
463 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
467 #elif defined(__FreeBSD__)
471 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
472 #if defined(__x86_64__)
473 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
474 * 0x40000000 is free */
476 addr
= (void *)0x40000000;
477 /* Cannot map more than that */
478 if (code_gen_buffer_size
> (800 * 1024 * 1024))
479 code_gen_buffer_size
= (800 * 1024 * 1024);
481 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
482 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
484 if (code_gen_buffer
== MAP_FAILED
) {
485 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
490 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
491 if (!code_gen_buffer
) {
492 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
495 map_exec(code_gen_buffer
, code_gen_buffer_size
);
497 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
498 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
499 code_gen_buffer_max_size
= code_gen_buffer_size
-
500 code_gen_max_block_size();
501 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
502 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
505 /* Must be called before using the QEMU cpus. 'tb_size' is the size
506 (in bytes) allocated to the translation buffer. Zero means default
508 void cpu_exec_init_all(unsigned long tb_size
)
511 code_gen_alloc(tb_size
);
512 code_gen_ptr
= code_gen_buffer
;
514 #if !defined(CONFIG_USER_ONLY)
519 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
521 #define CPU_COMMON_SAVE_VERSION 1
523 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
525 CPUState
*env
= opaque
;
527 qemu_put_be32s(f
, &env
->halted
);
528 qemu_put_be32s(f
, &env
->interrupt_request
);
531 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
533 CPUState
*env
= opaque
;
535 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
538 qemu_get_be32s(f
, &env
->halted
);
539 qemu_get_be32s(f
, &env
->interrupt_request
);
546 void cpu_exec_init(CPUState
*env
)
551 env
->next_cpu
= NULL
;
554 while (*penv
!= NULL
) {
555 penv
= (CPUState
**)&(*penv
)->next_cpu
;
558 env
->cpu_index
= cpu_index
;
559 TAILQ_INIT(&env
->breakpoints
);
560 TAILQ_INIT(&env
->watchpoints
);
562 env
->thread_id
= GetCurrentProcessId();
564 env
->thread_id
= getpid();
567 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
568 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
569 cpu_common_save
, cpu_common_load
, env
);
570 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
571 cpu_save
, cpu_load
, env
);
575 static inline void invalidate_page_bitmap(PageDesc
*p
)
577 if (p
->code_bitmap
) {
578 qemu_free(p
->code_bitmap
);
579 p
->code_bitmap
= NULL
;
581 p
->code_write_count
= 0;
584 /* set to NULL all the 'first_tb' fields in all PageDescs */
585 static void page_flush_tb(void)
590 for(i
= 0; i
< L1_SIZE
; i
++) {
593 for(j
= 0; j
< L2_SIZE
; j
++) {
595 invalidate_page_bitmap(p
);
602 /* flush all the translation blocks */
603 /* XXX: tb_flush is currently not thread safe */
604 void tb_flush(CPUState
*env1
)
607 #if defined(DEBUG_FLUSH)
608 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
609 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
611 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
613 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
614 cpu_abort(env1
, "Internal error: code buffer overflow\n");
618 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
619 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
622 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
625 code_gen_ptr
= code_gen_buffer
;
626 /* XXX: flush processor icache at this point if cache flush is
631 #ifdef DEBUG_TB_CHECK
633 static void tb_invalidate_check(target_ulong address
)
635 TranslationBlock
*tb
;
637 address
&= TARGET_PAGE_MASK
;
638 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
639 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
640 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
641 address
>= tb
->pc
+ tb
->size
)) {
642 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
643 address
, (long)tb
->pc
, tb
->size
);
649 /* verify that all the pages have correct rights for code */
650 static void tb_page_check(void)
652 TranslationBlock
*tb
;
653 int i
, flags1
, flags2
;
655 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
656 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
657 flags1
= page_get_flags(tb
->pc
);
658 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
659 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
660 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
661 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
667 static void tb_jmp_check(TranslationBlock
*tb
)
669 TranslationBlock
*tb1
;
672 /* suppress any remaining jumps to this TB */
676 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
679 tb1
= tb1
->jmp_next
[n1
];
681 /* check end of list */
683 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
689 /* invalidate one TB */
690 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
693 TranslationBlock
*tb1
;
697 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
700 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
704 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
706 TranslationBlock
*tb1
;
712 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
714 *ptb
= tb1
->page_next
[n1
];
717 ptb
= &tb1
->page_next
[n1
];
721 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
723 TranslationBlock
*tb1
, **ptb
;
726 ptb
= &tb
->jmp_next
[n
];
729 /* find tb(n) in circular list */
733 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
734 if (n1
== n
&& tb1
== tb
)
737 ptb
= &tb1
->jmp_first
;
739 ptb
= &tb1
->jmp_next
[n1
];
742 /* now we can suppress tb(n) from the list */
743 *ptb
= tb
->jmp_next
[n
];
745 tb
->jmp_next
[n
] = NULL
;
749 /* reset the jump entry 'n' of a TB so that it is not chained to
751 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
753 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
756 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
761 target_phys_addr_t phys_pc
;
762 TranslationBlock
*tb1
, *tb2
;
764 /* remove the TB from the hash list */
765 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
766 h
= tb_phys_hash_func(phys_pc
);
767 tb_remove(&tb_phys_hash
[h
], tb
,
768 offsetof(TranslationBlock
, phys_hash_next
));
770 /* remove the TB from the page list */
771 if (tb
->page_addr
[0] != page_addr
) {
772 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
773 tb_page_remove(&p
->first_tb
, tb
);
774 invalidate_page_bitmap(p
);
776 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
777 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
778 tb_page_remove(&p
->first_tb
, tb
);
779 invalidate_page_bitmap(p
);
782 tb_invalidated_flag
= 1;
784 /* remove the TB from the hash list */
785 h
= tb_jmp_cache_hash_func(tb
->pc
);
786 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
787 if (env
->tb_jmp_cache
[h
] == tb
)
788 env
->tb_jmp_cache
[h
] = NULL
;
791 /* suppress this TB from the two jump lists */
792 tb_jmp_remove(tb
, 0);
793 tb_jmp_remove(tb
, 1);
795 /* suppress any remaining jumps to this TB */
801 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
802 tb2
= tb1
->jmp_next
[n1
];
803 tb_reset_jump(tb1
, n1
);
804 tb1
->jmp_next
[n1
] = NULL
;
807 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
809 tb_phys_invalidate_count
++;
812 static inline void set_bits(uint8_t *tab
, int start
, int len
)
818 mask
= 0xff << (start
& 7);
819 if ((start
& ~7) == (end
& ~7)) {
821 mask
&= ~(0xff << (end
& 7));
826 start
= (start
+ 8) & ~7;
828 while (start
< end1
) {
833 mask
= ~(0xff << (end
& 7));
839 static void build_page_bitmap(PageDesc
*p
)
841 int n
, tb_start
, tb_end
;
842 TranslationBlock
*tb
;
844 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
851 tb
= (TranslationBlock
*)((long)tb
& ~3);
852 /* NOTE: this is subtle as a TB may span two physical pages */
854 /* NOTE: tb_end may be after the end of the page, but
855 it is not a problem */
856 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
857 tb_end
= tb_start
+ tb
->size
;
858 if (tb_end
> TARGET_PAGE_SIZE
)
859 tb_end
= TARGET_PAGE_SIZE
;
862 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
864 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
865 tb
= tb
->page_next
[n
];
869 TranslationBlock
*tb_gen_code(CPUState
*env
,
870 target_ulong pc
, target_ulong cs_base
,
871 int flags
, int cflags
)
873 TranslationBlock
*tb
;
875 target_ulong phys_pc
, phys_page2
, virt_page2
;
878 phys_pc
= get_phys_addr_code(env
, pc
);
881 /* flush must be done */
883 /* cannot fail at this point */
885 /* Don't forget to invalidate previous TB info. */
886 tb_invalidated_flag
= 1;
888 tc_ptr
= code_gen_ptr
;
890 tb
->cs_base
= cs_base
;
893 cpu_gen_code(env
, tb
, &code_gen_size
);
894 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
896 /* check next page if needed */
897 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
899 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
900 phys_page2
= get_phys_addr_code(env
, virt_page2
);
902 tb_link_phys(tb
, phys_pc
, phys_page2
);
906 /* invalidate all TBs which intersect with the target physical page
907 starting in range [start;end[. NOTE: start and end must refer to
908 the same physical page. 'is_cpu_write_access' should be true if called
909 from a real cpu write access: the virtual CPU will exit the current
910 TB if code is modified inside this TB. */
911 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
912 int is_cpu_write_access
)
914 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
915 CPUState
*env
= cpu_single_env
;
916 target_ulong tb_start
, tb_end
;
919 #ifdef TARGET_HAS_PRECISE_SMC
920 int current_tb_not_found
= is_cpu_write_access
;
921 TranslationBlock
*current_tb
= NULL
;
922 int current_tb_modified
= 0;
923 target_ulong current_pc
= 0;
924 target_ulong current_cs_base
= 0;
925 int current_flags
= 0;
926 #endif /* TARGET_HAS_PRECISE_SMC */
928 p
= page_find(start
>> TARGET_PAGE_BITS
);
931 if (!p
->code_bitmap
&&
932 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
933 is_cpu_write_access
) {
934 /* build code bitmap */
935 build_page_bitmap(p
);
938 /* we remove all the TBs in the range [start, end[ */
939 /* XXX: see if in some cases it could be faster to invalidate all the code */
943 tb
= (TranslationBlock
*)((long)tb
& ~3);
944 tb_next
= tb
->page_next
[n
];
945 /* NOTE: this is subtle as a TB may span two physical pages */
947 /* NOTE: tb_end may be after the end of the page, but
948 it is not a problem */
949 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
950 tb_end
= tb_start
+ tb
->size
;
952 tb_start
= tb
->page_addr
[1];
953 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
955 if (!(tb_end
<= start
|| tb_start
>= end
)) {
956 #ifdef TARGET_HAS_PRECISE_SMC
957 if (current_tb_not_found
) {
958 current_tb_not_found
= 0;
960 if (env
->mem_io_pc
) {
961 /* now we have a real cpu fault */
962 current_tb
= tb_find_pc(env
->mem_io_pc
);
965 if (current_tb
== tb
&&
966 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
967 /* If we are modifying the current TB, we must stop
968 its execution. We could be more precise by checking
969 that the modification is after the current PC, but it
970 would require a specialized function to partially
971 restore the CPU state */
973 current_tb_modified
= 1;
974 cpu_restore_state(current_tb
, env
,
975 env
->mem_io_pc
, NULL
);
976 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
979 #endif /* TARGET_HAS_PRECISE_SMC */
980 /* we need to do that to handle the case where a signal
981 occurs while doing tb_phys_invalidate() */
984 saved_tb
= env
->current_tb
;
985 env
->current_tb
= NULL
;
987 tb_phys_invalidate(tb
, -1);
989 env
->current_tb
= saved_tb
;
990 if (env
->interrupt_request
&& env
->current_tb
)
991 cpu_interrupt(env
, env
->interrupt_request
);
996 #if !defined(CONFIG_USER_ONLY)
997 /* if no code remaining, no need to continue to use slow writes */
999 invalidate_page_bitmap(p
);
1000 if (is_cpu_write_access
) {
1001 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1005 #ifdef TARGET_HAS_PRECISE_SMC
1006 if (current_tb_modified
) {
1007 /* we generate a block containing just the instruction
1008 modifying the memory. It will ensure that it cannot modify
1010 env
->current_tb
= NULL
;
1011 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1012 cpu_resume_from_signal(env
, NULL
);
1017 /* len must be <= 8 and start must be a multiple of len */
1018 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1025 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1026 cpu_single_env
->mem_io_vaddr
, len
,
1027 cpu_single_env
->eip
,
1028 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1032 p
= page_find(start
>> TARGET_PAGE_BITS
);
1035 if (p
->code_bitmap
) {
1036 offset
= start
& ~TARGET_PAGE_MASK
;
1037 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1038 if (b
& ((1 << len
) - 1))
1042 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1046 #if !defined(CONFIG_SOFTMMU)
1047 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1048 unsigned long pc
, void *puc
)
1050 TranslationBlock
*tb
;
1053 #ifdef TARGET_HAS_PRECISE_SMC
1054 TranslationBlock
*current_tb
= NULL
;
1055 CPUState
*env
= cpu_single_env
;
1056 int current_tb_modified
= 0;
1057 target_ulong current_pc
= 0;
1058 target_ulong current_cs_base
= 0;
1059 int current_flags
= 0;
1062 addr
&= TARGET_PAGE_MASK
;
1063 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1067 #ifdef TARGET_HAS_PRECISE_SMC
1068 if (tb
&& pc
!= 0) {
1069 current_tb
= tb_find_pc(pc
);
1072 while (tb
!= NULL
) {
1074 tb
= (TranslationBlock
*)((long)tb
& ~3);
1075 #ifdef TARGET_HAS_PRECISE_SMC
1076 if (current_tb
== tb
&&
1077 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1078 /* If we are modifying the current TB, we must stop
1079 its execution. We could be more precise by checking
1080 that the modification is after the current PC, but it
1081 would require a specialized function to partially
1082 restore the CPU state */
1084 current_tb_modified
= 1;
1085 cpu_restore_state(current_tb
, env
, pc
, puc
);
1086 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1089 #endif /* TARGET_HAS_PRECISE_SMC */
1090 tb_phys_invalidate(tb
, addr
);
1091 tb
= tb
->page_next
[n
];
1094 #ifdef TARGET_HAS_PRECISE_SMC
1095 if (current_tb_modified
) {
1096 /* we generate a block containing just the instruction
1097 modifying the memory. It will ensure that it cannot modify
1099 env
->current_tb
= NULL
;
1100 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1101 cpu_resume_from_signal(env
, puc
);
1107 /* add the tb in the target page and protect it if necessary */
1108 static inline void tb_alloc_page(TranslationBlock
*tb
,
1109 unsigned int n
, target_ulong page_addr
)
1112 TranslationBlock
*last_first_tb
;
1114 tb
->page_addr
[n
] = page_addr
;
1115 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1116 tb
->page_next
[n
] = p
->first_tb
;
1117 last_first_tb
= p
->first_tb
;
1118 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1119 invalidate_page_bitmap(p
);
1121 #if defined(TARGET_HAS_SMC) || 1
1123 #if defined(CONFIG_USER_ONLY)
1124 if (p
->flags
& PAGE_WRITE
) {
1129 /* force the host page as non writable (writes will have a
1130 page fault + mprotect overhead) */
1131 page_addr
&= qemu_host_page_mask
;
1133 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1134 addr
+= TARGET_PAGE_SIZE
) {
1136 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1140 p2
->flags
&= ~PAGE_WRITE
;
1141 page_get_flags(addr
);
1143 mprotect(g2h(page_addr
), qemu_host_page_size
,
1144 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1145 #ifdef DEBUG_TB_INVALIDATE
1146 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1151 /* if some code is already present, then the pages are already
1152 protected. So we handle the case where only the first TB is
1153 allocated in a physical page */
1154 if (!last_first_tb
) {
1155 tlb_protect_code(page_addr
);
1159 #endif /* TARGET_HAS_SMC */
1162 /* Allocate a new translation block. Flush the translation buffer if
1163 too many translation blocks or too much generated code. */
1164 TranslationBlock
*tb_alloc(target_ulong pc
)
1166 TranslationBlock
*tb
;
1168 if (nb_tbs
>= code_gen_max_blocks
||
1169 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1171 tb
= &tbs
[nb_tbs
++];
1177 void tb_free(TranslationBlock
*tb
)
1179 /* In practice this is mostly used for single use temporary TB
1180 Ignore the hard cases and just back up if this TB happens to
1181 be the last one generated. */
1182 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1183 code_gen_ptr
= tb
->tc_ptr
;
1188 /* add a new TB and link it to the physical page tables. phys_page2 is
1189 (-1) to indicate that only one page contains the TB. */
1190 void tb_link_phys(TranslationBlock
*tb
,
1191 target_ulong phys_pc
, target_ulong phys_page2
)
1194 TranslationBlock
**ptb
;
1196 /* Grab the mmap lock to stop another thread invalidating this TB
1197 before we are done. */
1199 /* add in the physical hash table */
1200 h
= tb_phys_hash_func(phys_pc
);
1201 ptb
= &tb_phys_hash
[h
];
1202 tb
->phys_hash_next
= *ptb
;
1205 /* add in the page list */
1206 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1207 if (phys_page2
!= -1)
1208 tb_alloc_page(tb
, 1, phys_page2
);
1210 tb
->page_addr
[1] = -1;
1212 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1213 tb
->jmp_next
[0] = NULL
;
1214 tb
->jmp_next
[1] = NULL
;
1216 /* init original jump addresses */
1217 if (tb
->tb_next_offset
[0] != 0xffff)
1218 tb_reset_jump(tb
, 0);
1219 if (tb
->tb_next_offset
[1] != 0xffff)
1220 tb_reset_jump(tb
, 1);
1222 #ifdef DEBUG_TB_CHECK
1228 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1229 tb[1].tc_ptr. Return NULL if not found */
1230 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1232 int m_min
, m_max
, m
;
1234 TranslationBlock
*tb
;
1238 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1239 tc_ptr
>= (unsigned long)code_gen_ptr
)
1241 /* binary search (cf Knuth) */
1244 while (m_min
<= m_max
) {
1245 m
= (m_min
+ m_max
) >> 1;
1247 v
= (unsigned long)tb
->tc_ptr
;
1250 else if (tc_ptr
< v
) {
1259 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1261 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1263 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1266 tb1
= tb
->jmp_next
[n
];
1268 /* find head of list */
1271 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1274 tb1
= tb1
->jmp_next
[n1
];
1276 /* we are now sure now that tb jumps to tb1 */
1279 /* remove tb from the jmp_first list */
1280 ptb
= &tb_next
->jmp_first
;
1284 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1285 if (n1
== n
&& tb1
== tb
)
1287 ptb
= &tb1
->jmp_next
[n1
];
1289 *ptb
= tb
->jmp_next
[n
];
1290 tb
->jmp_next
[n
] = NULL
;
1292 /* suppress the jump to next tb in generated code */
1293 tb_reset_jump(tb
, n
);
1295 /* suppress jumps in the tb on which we could have jumped */
1296 tb_reset_jump_recursive(tb_next
);
1300 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1302 tb_reset_jump_recursive2(tb
, 0);
1303 tb_reset_jump_recursive2(tb
, 1);
1306 #if defined(TARGET_HAS_ICE)
1307 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1309 target_phys_addr_t addr
;
1311 ram_addr_t ram_addr
;
1314 addr
= cpu_get_phys_page_debug(env
, pc
);
1315 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1317 pd
= IO_MEM_UNASSIGNED
;
1319 pd
= p
->phys_offset
;
1321 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1322 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1326 /* Add a watchpoint. */
1327 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1328 int flags
, CPUWatchpoint
**watchpoint
)
1330 target_ulong len_mask
= ~(len
- 1);
1333 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1334 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1335 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1336 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1339 wp
= qemu_malloc(sizeof(*wp
));
1344 wp
->len_mask
= len_mask
;
1347 /* keep all GDB-injected watchpoints in front */
1349 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1351 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1353 tlb_flush_page(env
, addr
);
1360 /* Remove a specific watchpoint. */
1361 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1364 target_ulong len_mask
= ~(len
- 1);
1367 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1368 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1369 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1370 cpu_watchpoint_remove_by_ref(env
, wp
);
1377 /* Remove a specific watchpoint by reference. */
1378 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1380 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1382 tlb_flush_page(env
, watchpoint
->vaddr
);
1384 qemu_free(watchpoint
);
1387 /* Remove all matching watchpoints. */
1388 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1390 CPUWatchpoint
*wp
, *next
;
1392 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1393 if (wp
->flags
& mask
)
1394 cpu_watchpoint_remove_by_ref(env
, wp
);
1398 /* Add a breakpoint. */
1399 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1400 CPUBreakpoint
**breakpoint
)
1402 #if defined(TARGET_HAS_ICE)
1405 bp
= qemu_malloc(sizeof(*bp
));
1412 /* keep all GDB-injected breakpoints in front */
1414 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1416 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1419 kvm_update_debugger(env
);
1421 breakpoint_invalidate(env
, pc
);
1431 /* Remove a specific breakpoint. */
1432 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1434 #if defined(TARGET_HAS_ICE)
1437 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1438 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1439 cpu_breakpoint_remove_by_ref(env
, bp
);
1449 /* Remove a specific breakpoint by reference. */
1450 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1452 #if defined(TARGET_HAS_ICE)
1453 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1456 kvm_update_debugger(env
);
1458 breakpoint_invalidate(env
, breakpoint
->pc
);
1460 qemu_free(breakpoint
);
1464 /* Remove all matching breakpoints. */
1465 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1467 #if defined(TARGET_HAS_ICE)
1468 CPUBreakpoint
*bp
, *next
;
1470 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1471 if (bp
->flags
& mask
)
1472 cpu_breakpoint_remove_by_ref(env
, bp
);
1477 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1478 CPU loop after each instruction */
1479 void cpu_single_step(CPUState
*env
, int enabled
)
1481 #if defined(TARGET_HAS_ICE)
1482 if (env
->singlestep_enabled
!= enabled
) {
1483 env
->singlestep_enabled
= enabled
;
1484 /* must flush all the translated code to avoid inconsistancies */
1485 /* XXX: only flush what is necessary */
1489 kvm_update_debugger(env
);
1493 /* enable or disable low levels log */
1494 void cpu_set_log(int log_flags
)
1496 loglevel
= log_flags
;
1497 if (loglevel
&& !logfile
) {
1498 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1500 perror(logfilename
);
1503 #if !defined(CONFIG_SOFTMMU)
1504 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1506 static char logfile_buf
[4096];
1507 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1510 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1514 if (!loglevel
&& logfile
) {
1520 void cpu_set_log_filename(const char *filename
)
1522 logfilename
= strdup(filename
);
1527 cpu_set_log(loglevel
);
1530 /* mask must never be zero, except for A20 change call */
1531 void cpu_interrupt(CPUState
*env
, int mask
)
1533 #if !defined(USE_NPTL)
1534 TranslationBlock
*tb
;
1535 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1539 old_mask
= env
->interrupt_request
;
1540 /* FIXME: This is probably not threadsafe. A different thread could
1541 be in the middle of a read-modify-write operation. */
1542 env
->interrupt_request
|= mask
;
1543 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1544 kvm_update_interrupt_request(env
);
1545 #if defined(USE_NPTL)
1546 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1547 problem and hope the cpu will stop of its own accord. For userspace
1548 emulation this often isn't actually as bad as it sounds. Often
1549 signals are used primarily to interrupt blocking syscalls. */
1552 env
->icount_decr
.u16
.high
= 0xffff;
1553 #ifndef CONFIG_USER_ONLY
1554 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1555 an async event happened and we need to process it. */
1557 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1558 cpu_abort(env
, "Raised interrupt while not in I/O function");
1562 tb
= env
->current_tb
;
1563 /* if the cpu is currently executing code, we must unlink it and
1564 all the potentially executing TB */
1565 if (tb
&& !testandset(&interrupt_lock
)) {
1566 env
->current_tb
= NULL
;
1567 tb_reset_jump_recursive(tb
);
1568 resetlock(&interrupt_lock
);
1574 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1576 env
->interrupt_request
&= ~mask
;
1579 const CPULogItem cpu_log_items
[] = {
1580 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1581 "show generated host assembly code for each compiled TB" },
1582 { CPU_LOG_TB_IN_ASM
, "in_asm",
1583 "show target assembly code for each compiled TB" },
1584 { CPU_LOG_TB_OP
, "op",
1585 "show micro ops for each compiled TB" },
1586 { CPU_LOG_TB_OP_OPT
, "op_opt",
1589 "before eflags optimization and "
1591 "after liveness analysis" },
1592 { CPU_LOG_INT
, "int",
1593 "show interrupts/exceptions in short format" },
1594 { CPU_LOG_EXEC
, "exec",
1595 "show trace before each executed TB (lots of logs)" },
1596 { CPU_LOG_TB_CPU
, "cpu",
1597 "show CPU state before block translation" },
1599 { CPU_LOG_PCALL
, "pcall",
1600 "show protected mode far calls/returns/exceptions" },
1603 { CPU_LOG_IOPORT
, "ioport",
1604 "show all i/o ports accesses" },
1609 static int cmp1(const char *s1
, int n
, const char *s2
)
1611 if (strlen(s2
) != n
)
1613 return memcmp(s1
, s2
, n
) == 0;
1616 /* takes a comma separated list of log masks. Return 0 if error. */
1617 int cpu_str_to_log_mask(const char *str
)
1619 const CPULogItem
*item
;
1626 p1
= strchr(p
, ',');
1629 if(cmp1(p
,p1
-p
,"all")) {
1630 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1634 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1635 if (cmp1(p
, p1
- p
, item
->name
))
1649 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1656 fprintf(stderr
, "qemu: fatal: ");
1657 vfprintf(stderr
, fmt
, ap
);
1658 fprintf(stderr
, "\n");
1660 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1662 cpu_dump_state(env
, stderr
, fprintf
, 0);
1665 fprintf(logfile
, "qemu: fatal: ");
1666 vfprintf(logfile
, fmt
, ap2
);
1667 fprintf(logfile
, "\n");
1669 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1671 cpu_dump_state(env
, logfile
, fprintf
, 0);
1681 CPUState
*cpu_copy(CPUState
*env
)
1683 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1684 /* preserve chaining and index */
1685 CPUState
*next_cpu
= new_env
->next_cpu
;
1686 int cpu_index
= new_env
->cpu_index
;
1687 memcpy(new_env
, env
, sizeof(CPUState
));
1688 new_env
->next_cpu
= next_cpu
;
1689 new_env
->cpu_index
= cpu_index
;
1693 #if !defined(CONFIG_USER_ONLY)
1695 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1699 /* Discard jump cache entries for any tb which might potentially
1700 overlap the flushed page. */
1701 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1702 memset (&env
->tb_jmp_cache
[i
], 0,
1703 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1705 i
= tb_jmp_cache_hash_page(addr
);
1706 memset (&env
->tb_jmp_cache
[i
], 0,
1707 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1710 /* NOTE: if flush_global is true, also flush global entries (not
1712 void tlb_flush(CPUState
*env
, int flush_global
)
1716 #if defined(DEBUG_TLB)
1717 printf("tlb_flush:\n");
1719 /* must reset current TB so that interrupts cannot modify the
1720 links while we are modifying them */
1721 env
->current_tb
= NULL
;
1723 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1724 env
->tlb_table
[0][i
].addr_read
= -1;
1725 env
->tlb_table
[0][i
].addr_write
= -1;
1726 env
->tlb_table
[0][i
].addr_code
= -1;
1727 env
->tlb_table
[1][i
].addr_read
= -1;
1728 env
->tlb_table
[1][i
].addr_write
= -1;
1729 env
->tlb_table
[1][i
].addr_code
= -1;
1730 #if (NB_MMU_MODES >= 3)
1731 env
->tlb_table
[2][i
].addr_read
= -1;
1732 env
->tlb_table
[2][i
].addr_write
= -1;
1733 env
->tlb_table
[2][i
].addr_code
= -1;
1734 #if (NB_MMU_MODES == 4)
1735 env
->tlb_table
[3][i
].addr_read
= -1;
1736 env
->tlb_table
[3][i
].addr_write
= -1;
1737 env
->tlb_table
[3][i
].addr_code
= -1;
1742 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1745 if (env
->kqemu_enabled
) {
1746 kqemu_flush(env
, flush_global
);
1752 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1754 if (addr
== (tlb_entry
->addr_read
&
1755 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1756 addr
== (tlb_entry
->addr_write
&
1757 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1758 addr
== (tlb_entry
->addr_code
&
1759 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1760 tlb_entry
->addr_read
= -1;
1761 tlb_entry
->addr_write
= -1;
1762 tlb_entry
->addr_code
= -1;
1766 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1770 #if defined(DEBUG_TLB)
1771 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1773 /* must reset current TB so that interrupts cannot modify the
1774 links while we are modifying them */
1775 env
->current_tb
= NULL
;
1777 addr
&= TARGET_PAGE_MASK
;
1778 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1779 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1780 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1781 #if (NB_MMU_MODES >= 3)
1782 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1783 #if (NB_MMU_MODES == 4)
1784 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1788 tlb_flush_jmp_cache(env
, addr
);
1791 if (env
->kqemu_enabled
) {
1792 kqemu_flush_page(env
, addr
);
1797 /* update the TLBs so that writes to code in the virtual page 'addr'
1799 static void tlb_protect_code(ram_addr_t ram_addr
)
1801 cpu_physical_memory_reset_dirty(ram_addr
,
1802 ram_addr
+ TARGET_PAGE_SIZE
,
1806 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1807 tested for self modifying code */
1808 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1811 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1814 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1815 unsigned long start
, unsigned long length
)
1818 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1819 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1820 if ((addr
- start
) < length
) {
1821 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1826 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1830 unsigned long length
, start1
;
1834 start
&= TARGET_PAGE_MASK
;
1835 end
= TARGET_PAGE_ALIGN(end
);
1837 length
= end
- start
;
1840 len
= length
>> TARGET_PAGE_BITS
;
1842 /* XXX: should not depend on cpu context */
1844 if (env
->kqemu_enabled
) {
1847 for(i
= 0; i
< len
; i
++) {
1848 kqemu_set_notdirty(env
, addr
);
1849 addr
+= TARGET_PAGE_SIZE
;
1853 mask
= ~dirty_flags
;
1854 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1855 for(i
= 0; i
< len
; i
++)
1858 /* we modify the TLB cache so that the dirty bit will be set again
1859 when accessing the range */
1860 start1
= start
+ (unsigned long)phys_ram_base
;
1861 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1862 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1863 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1864 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1865 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1866 #if (NB_MMU_MODES >= 3)
1867 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1868 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1869 #if (NB_MMU_MODES == 4)
1870 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1871 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1877 int cpu_physical_memory_set_dirty_tracking(int enable
)
1882 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1883 in_migration
= enable
;
1887 int cpu_physical_memory_get_dirty_tracking(void)
1889 return in_migration
;
1892 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1895 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1898 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1900 ram_addr_t ram_addr
;
1902 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1903 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1904 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1905 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1906 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1911 /* update the TLB according to the current state of the dirty bits */
1912 void cpu_tlb_update_dirty(CPUState
*env
)
1915 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1916 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1917 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1918 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1919 #if (NB_MMU_MODES >= 3)
1920 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1921 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1922 #if (NB_MMU_MODES == 4)
1923 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1924 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1929 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1931 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1932 tlb_entry
->addr_write
= vaddr
;
1935 /* update the TLB corresponding to virtual page vaddr
1936 so that it is no longer dirty */
1937 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1941 vaddr
&= TARGET_PAGE_MASK
;
1942 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1943 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1944 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1945 #if (NB_MMU_MODES >= 3)
1946 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1947 #if (NB_MMU_MODES == 4)
1948 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1953 /* add a new TLB entry. At most one entry for a given virtual address
1954 is permitted. Return 0 if OK or 2 if the page could not be mapped
1955 (can only happen in non SOFTMMU mode for I/O pages or pages
1956 conflicting with the host address space). */
1957 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1958 target_phys_addr_t paddr
, int prot
,
1959 int mmu_idx
, int is_softmmu
)
1964 target_ulong address
;
1965 target_ulong code_address
;
1966 target_phys_addr_t addend
;
1970 target_phys_addr_t iotlb
;
1972 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1974 pd
= IO_MEM_UNASSIGNED
;
1976 pd
= p
->phys_offset
;
1978 #if defined(DEBUG_TLB)
1979 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1980 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1985 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1986 /* IO memory case (romd handled later) */
1987 address
|= TLB_MMIO
;
1989 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1990 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1992 iotlb
= pd
& TARGET_PAGE_MASK
;
1993 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1994 iotlb
|= IO_MEM_NOTDIRTY
;
1996 iotlb
|= IO_MEM_ROM
;
1998 /* IO handlers are currently passed a phsical address.
1999 It would be nice to pass an offset from the base address
2000 of that region. This would avoid having to special case RAM,
2001 and avoid full address decoding in every device.
2002 We can't use the high bits of pd for this because
2003 IO_MEM_ROMD uses these as a ram address. */
2004 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2006 iotlb
+= p
->region_offset
;
2012 code_address
= address
;
2013 /* Make accesses to pages with watchpoints go via the
2014 watchpoint trap routines. */
2015 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2016 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2017 iotlb
= io_mem_watch
+ paddr
;
2018 /* TODO: The memory case can be optimized by not trapping
2019 reads of pages with a write breakpoint. */
2020 address
|= TLB_MMIO
;
2024 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2025 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2026 te
= &env
->tlb_table
[mmu_idx
][index
];
2027 te
->addend
= addend
- vaddr
;
2028 if (prot
& PAGE_READ
) {
2029 te
->addr_read
= address
;
2034 if (prot
& PAGE_EXEC
) {
2035 te
->addr_code
= code_address
;
2039 if (prot
& PAGE_WRITE
) {
2040 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2041 (pd
& IO_MEM_ROMD
)) {
2042 /* Write access calls the I/O callback. */
2043 te
->addr_write
= address
| TLB_MMIO
;
2044 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2045 !cpu_physical_memory_is_dirty(pd
)) {
2046 te
->addr_write
= address
| TLB_NOTDIRTY
;
2048 te
->addr_write
= address
;
2051 te
->addr_write
= -1;
2058 void tlb_flush(CPUState
*env
, int flush_global
)
2062 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2066 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2067 target_phys_addr_t paddr
, int prot
,
2068 int mmu_idx
, int is_softmmu
)
2073 /* dump memory mappings */
2074 void page_dump(FILE *f
)
2076 unsigned long start
, end
;
2077 int i
, j
, prot
, prot1
;
2080 fprintf(f
, "%-8s %-8s %-8s %s\n",
2081 "start", "end", "size", "prot");
2085 for(i
= 0; i
<= L1_SIZE
; i
++) {
2090 for(j
= 0;j
< L2_SIZE
; j
++) {
2095 if (prot1
!= prot
) {
2096 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2098 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2099 start
, end
, end
- start
,
2100 prot
& PAGE_READ
? 'r' : '-',
2101 prot
& PAGE_WRITE
? 'w' : '-',
2102 prot
& PAGE_EXEC
? 'x' : '-');
2116 int page_get_flags(target_ulong address
)
2120 p
= page_find(address
>> TARGET_PAGE_BITS
);
2126 /* modify the flags of a page and invalidate the code if
2127 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2128 depending on PAGE_WRITE */
2129 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2134 /* mmap_lock should already be held. */
2135 start
= start
& TARGET_PAGE_MASK
;
2136 end
= TARGET_PAGE_ALIGN(end
);
2137 if (flags
& PAGE_WRITE
)
2138 flags
|= PAGE_WRITE_ORG
;
2139 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2140 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2141 /* We may be called for host regions that are outside guest
2145 /* if the write protection is set, then we invalidate the code
2147 if (!(p
->flags
& PAGE_WRITE
) &&
2148 (flags
& PAGE_WRITE
) &&
2150 tb_invalidate_phys_page(addr
, 0, NULL
);
2156 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2162 if (start
+ len
< start
)
2163 /* we've wrapped around */
2166 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2167 start
= start
& TARGET_PAGE_MASK
;
2169 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2170 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2173 if( !(p
->flags
& PAGE_VALID
) )
2176 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2178 if (flags
& PAGE_WRITE
) {
2179 if (!(p
->flags
& PAGE_WRITE_ORG
))
2181 /* unprotect the page if it was put read-only because it
2182 contains translated code */
2183 if (!(p
->flags
& PAGE_WRITE
)) {
2184 if (!page_unprotect(addr
, 0, NULL
))
2193 /* called from signal handler: invalidate the code and unprotect the
2194 page. Return TRUE if the fault was succesfully handled. */
2195 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2197 unsigned int page_index
, prot
, pindex
;
2199 target_ulong host_start
, host_end
, addr
;
2201 /* Technically this isn't safe inside a signal handler. However we
2202 know this only ever happens in a synchronous SEGV handler, so in
2203 practice it seems to be ok. */
2206 host_start
= address
& qemu_host_page_mask
;
2207 page_index
= host_start
>> TARGET_PAGE_BITS
;
2208 p1
= page_find(page_index
);
2213 host_end
= host_start
+ qemu_host_page_size
;
2216 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2220 /* if the page was really writable, then we change its
2221 protection back to writable */
2222 if (prot
& PAGE_WRITE_ORG
) {
2223 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2224 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2225 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2226 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2227 p1
[pindex
].flags
|= PAGE_WRITE
;
2228 /* and since the content will be modified, we must invalidate
2229 the corresponding translated code. */
2230 tb_invalidate_phys_page(address
, pc
, puc
);
2231 #ifdef DEBUG_TB_CHECK
2232 tb_invalidate_check(address
);
2242 static inline void tlb_set_dirty(CPUState
*env
,
2243 unsigned long addr
, target_ulong vaddr
)
2246 #endif /* defined(CONFIG_USER_ONLY) */
2248 #if !defined(CONFIG_USER_ONLY)
2250 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2251 ram_addr_t memory
, ram_addr_t region_offset
);
2252 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2253 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2254 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2257 if (addr > start_addr) \
2260 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2261 if (start_addr2 > 0) \
2265 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2266 end_addr2 = TARGET_PAGE_SIZE - 1; \
2268 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2269 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2274 /* register physical memory. 'size' must be a multiple of the target
2275 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2276 io memory page. The address used when calling the IO function is
2277 the offset from the start of the region, plus region_offset. Both
2278 start_region and regon_offset are rounded down to a page boundary
2279 before calculating this offset. This should not be a problem unless
2280 the low bits of start_addr and region_offset differ. */
2281 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2283 ram_addr_t phys_offset
,
2284 ram_addr_t region_offset
)
2286 target_phys_addr_t addr
, end_addr
;
2289 ram_addr_t orig_size
= size
;
2293 /* XXX: should not depend on cpu context */
2295 if (env
->kqemu_enabled
) {
2296 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2300 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2302 region_offset
&= TARGET_PAGE_MASK
;
2303 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2304 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2305 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2306 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2307 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2308 ram_addr_t orig_memory
= p
->phys_offset
;
2309 target_phys_addr_t start_addr2
, end_addr2
;
2310 int need_subpage
= 0;
2312 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2314 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2315 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2316 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2317 &p
->phys_offset
, orig_memory
,
2320 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2323 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2325 p
->region_offset
= 0;
2327 p
->phys_offset
= phys_offset
;
2328 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2329 (phys_offset
& IO_MEM_ROMD
))
2330 phys_offset
+= TARGET_PAGE_SIZE
;
2333 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2334 p
->phys_offset
= phys_offset
;
2335 p
->region_offset
= region_offset
;
2336 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2337 (phys_offset
& IO_MEM_ROMD
)) {
2338 phys_offset
+= TARGET_PAGE_SIZE
;
2340 target_phys_addr_t start_addr2
, end_addr2
;
2341 int need_subpage
= 0;
2343 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2344 end_addr2
, need_subpage
);
2346 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2347 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2348 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2350 subpage_register(subpage
, start_addr2
, end_addr2
,
2351 phys_offset
, region_offset
);
2352 p
->region_offset
= 0;
2356 region_offset
+= TARGET_PAGE_SIZE
;
2359 /* since each CPU stores ram addresses in its TLB cache, we must
2360 reset the modified entries */
2362 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2367 /* XXX: temporary until new memory mapping API */
2368 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2372 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2374 return IO_MEM_UNASSIGNED
;
2375 return p
->phys_offset
;
2378 /* XXX: better than nothing */
2379 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2382 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2383 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2384 (uint64_t)size
, (uint64_t)phys_ram_size
);
2387 addr
= phys_ram_alloc_offset
;
2388 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2392 void qemu_ram_free(ram_addr_t addr
)
2396 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2398 #ifdef DEBUG_UNASSIGNED
2399 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2401 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2402 do_unassigned_access(addr
, 0, 0, 0, 1);
2407 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2409 #ifdef DEBUG_UNASSIGNED
2410 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2412 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2413 do_unassigned_access(addr
, 0, 0, 0, 2);
2418 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2420 #ifdef DEBUG_UNASSIGNED
2421 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2423 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2424 do_unassigned_access(addr
, 0, 0, 0, 4);
2429 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2431 #ifdef DEBUG_UNASSIGNED
2432 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2434 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2435 do_unassigned_access(addr
, 1, 0, 0, 1);
2439 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2441 #ifdef DEBUG_UNASSIGNED
2442 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2444 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2445 do_unassigned_access(addr
, 1, 0, 0, 2);
2449 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2451 #ifdef DEBUG_UNASSIGNED
2452 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2454 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2455 do_unassigned_access(addr
, 1, 0, 0, 4);
2459 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2460 unassigned_mem_readb
,
2461 unassigned_mem_readw
,
2462 unassigned_mem_readl
,
2465 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2466 unassigned_mem_writeb
,
2467 unassigned_mem_writew
,
2468 unassigned_mem_writel
,
2471 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2475 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2476 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2477 #if !defined(CONFIG_USER_ONLY)
2478 tb_invalidate_phys_page_fast(ram_addr
, 1);
2479 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2482 stb_p(phys_ram_base
+ ram_addr
, val
);
2484 if (cpu_single_env
->kqemu_enabled
&&
2485 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2486 kqemu_modify_page(cpu_single_env
, ram_addr
);
2488 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2489 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2490 /* we remove the notdirty callback only if the code has been
2492 if (dirty_flags
== 0xff)
2493 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2496 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2500 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2501 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2502 #if !defined(CONFIG_USER_ONLY)
2503 tb_invalidate_phys_page_fast(ram_addr
, 2);
2504 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2507 stw_p(phys_ram_base
+ ram_addr
, val
);
2509 if (cpu_single_env
->kqemu_enabled
&&
2510 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2511 kqemu_modify_page(cpu_single_env
, ram_addr
);
2513 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2514 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2515 /* we remove the notdirty callback only if the code has been
2517 if (dirty_flags
== 0xff)
2518 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2521 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2525 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2526 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2527 #if !defined(CONFIG_USER_ONLY)
2528 tb_invalidate_phys_page_fast(ram_addr
, 4);
2529 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2532 stl_p(phys_ram_base
+ ram_addr
, val
);
2534 if (cpu_single_env
->kqemu_enabled
&&
2535 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2536 kqemu_modify_page(cpu_single_env
, ram_addr
);
2538 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2539 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2540 /* we remove the notdirty callback only if the code has been
2542 if (dirty_flags
== 0xff)
2543 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2546 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2547 NULL
, /* never used */
2548 NULL
, /* never used */
2549 NULL
, /* never used */
2552 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2553 notdirty_mem_writeb
,
2554 notdirty_mem_writew
,
2555 notdirty_mem_writel
,
2558 /* Generate a debug exception if a watchpoint has been hit. */
2559 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2561 CPUState
*env
= cpu_single_env
;
2562 target_ulong pc
, cs_base
;
2563 TranslationBlock
*tb
;
2568 if (env
->watchpoint_hit
) {
2569 /* We re-entered the check after replacing the TB. Now raise
2570 * the debug interrupt so that is will trigger after the
2571 * current instruction. */
2572 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2575 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2576 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2577 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2578 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2579 wp
->flags
|= BP_WATCHPOINT_HIT
;
2580 if (!env
->watchpoint_hit
) {
2581 env
->watchpoint_hit
= wp
;
2582 tb
= tb_find_pc(env
->mem_io_pc
);
2584 cpu_abort(env
, "check_watchpoint: could not find TB for "
2585 "pc=%p", (void *)env
->mem_io_pc
);
2587 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2588 tb_phys_invalidate(tb
, -1);
2589 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2590 env
->exception_index
= EXCP_DEBUG
;
2592 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2593 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2595 cpu_resume_from_signal(env
, NULL
);
2598 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2603 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2604 so these check for a hit then pass through to the normal out-of-line
2606 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2608 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2609 return ldub_phys(addr
);
2612 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2614 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2615 return lduw_phys(addr
);
2618 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2620 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2621 return ldl_phys(addr
);
2624 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2627 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2628 stb_phys(addr
, val
);
2631 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2634 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2635 stw_phys(addr
, val
);
2638 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2641 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2642 stl_phys(addr
, val
);
2645 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2651 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2657 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2663 idx
= SUBPAGE_IDX(addr
);
2664 #if defined(DEBUG_SUBPAGE)
2665 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2666 mmio
, len
, addr
, idx
);
2668 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2669 addr
+ mmio
->region_offset
[idx
][0][len
]);
2674 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2675 uint32_t value
, unsigned int len
)
2679 idx
= SUBPAGE_IDX(addr
);
2680 #if defined(DEBUG_SUBPAGE)
2681 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2682 mmio
, len
, addr
, idx
, value
);
2684 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2685 addr
+ mmio
->region_offset
[idx
][1][len
],
2689 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2691 #if defined(DEBUG_SUBPAGE)
2692 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2695 return subpage_readlen(opaque
, addr
, 0);
2698 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2701 #if defined(DEBUG_SUBPAGE)
2702 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2704 subpage_writelen(opaque
, addr
, value
, 0);
2707 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2709 #if defined(DEBUG_SUBPAGE)
2710 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2713 return subpage_readlen(opaque
, addr
, 1);
2716 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2719 #if defined(DEBUG_SUBPAGE)
2720 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2722 subpage_writelen(opaque
, addr
, value
, 1);
2725 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2727 #if defined(DEBUG_SUBPAGE)
2728 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2731 return subpage_readlen(opaque
, addr
, 2);
2734 static void subpage_writel (void *opaque
,
2735 target_phys_addr_t addr
, uint32_t value
)
2737 #if defined(DEBUG_SUBPAGE)
2738 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2740 subpage_writelen(opaque
, addr
, value
, 2);
2743 static CPUReadMemoryFunc
*subpage_read
[] = {
2749 static CPUWriteMemoryFunc
*subpage_write
[] = {
2755 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2756 ram_addr_t memory
, ram_addr_t region_offset
)
2761 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2763 idx
= SUBPAGE_IDX(start
);
2764 eidx
= SUBPAGE_IDX(end
);
2765 #if defined(DEBUG_SUBPAGE)
2766 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2767 mmio
, start
, end
, idx
, eidx
, memory
);
2769 memory
>>= IO_MEM_SHIFT
;
2770 for (; idx
<= eidx
; idx
++) {
2771 for (i
= 0; i
< 4; i
++) {
2772 if (io_mem_read
[memory
][i
]) {
2773 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2774 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2775 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2777 if (io_mem_write
[memory
][i
]) {
2778 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2779 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2780 mmio
->region_offset
[idx
][1][i
] = region_offset
;
2788 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2789 ram_addr_t orig_memory
, ram_addr_t region_offset
)
2794 mmio
= qemu_mallocz(sizeof(subpage_t
));
2797 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2798 #if defined(DEBUG_SUBPAGE)
2799 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2800 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2802 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2803 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
2810 static int get_free_io_mem_idx(void)
2814 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2815 if (!io_mem_used
[i
]) {
2823 static void io_mem_init(void)
2827 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2828 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2829 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2833 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2834 watch_mem_write
, NULL
);
2835 /* alloc dirty bits array */
2836 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2837 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2840 /* mem_read and mem_write are arrays of functions containing the
2841 function to access byte (index 0), word (index 1) and dword (index
2842 2). Functions can be omitted with a NULL function pointer. The
2843 registered functions may be modified dynamically later.
2844 If io_index is non zero, the corresponding io zone is
2845 modified. If it is zero, a new io zone is allocated. The return
2846 value can be used with cpu_register_physical_memory(). (-1) is
2847 returned if error. */
2848 int cpu_register_io_memory(int io_index
,
2849 CPUReadMemoryFunc
**mem_read
,
2850 CPUWriteMemoryFunc
**mem_write
,
2853 int i
, subwidth
= 0;
2855 if (io_index
<= 0) {
2856 io_index
= get_free_io_mem_idx();
2860 if (io_index
>= IO_MEM_NB_ENTRIES
)
2864 for(i
= 0;i
< 3; i
++) {
2865 if (!mem_read
[i
] || !mem_write
[i
])
2866 subwidth
= IO_MEM_SUBWIDTH
;
2867 io_mem_read
[io_index
][i
] = mem_read
[i
];
2868 io_mem_write
[io_index
][i
] = mem_write
[i
];
2870 io_mem_opaque
[io_index
] = opaque
;
2871 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2874 void cpu_unregister_io_memory(int io_table_address
)
2877 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
2879 for (i
=0;i
< 3; i
++) {
2880 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
2881 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
2883 io_mem_opaque
[io_index
] = NULL
;
2884 io_mem_used
[io_index
] = 0;
2887 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2889 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2892 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2894 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2897 #endif /* !defined(CONFIG_USER_ONLY) */
2899 /* physical memory access (slow version, mainly for debug) */
2900 #if defined(CONFIG_USER_ONLY)
2901 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2902 int len
, int is_write
)
2909 page
= addr
& TARGET_PAGE_MASK
;
2910 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2913 flags
= page_get_flags(page
);
2914 if (!(flags
& PAGE_VALID
))
2917 if (!(flags
& PAGE_WRITE
))
2919 /* XXX: this code should not depend on lock_user */
2920 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2921 /* FIXME - should this return an error rather than just fail? */
2924 unlock_user(p
, addr
, l
);
2926 if (!(flags
& PAGE_READ
))
2928 /* XXX: this code should not depend on lock_user */
2929 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2930 /* FIXME - should this return an error rather than just fail? */
2933 unlock_user(p
, addr
, 0);
2942 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2943 int len
, int is_write
)
2948 target_phys_addr_t page
;
2953 page
= addr
& TARGET_PAGE_MASK
;
2954 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2957 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2959 pd
= IO_MEM_UNASSIGNED
;
2961 pd
= p
->phys_offset
;
2965 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2966 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2968 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
2969 /* XXX: could force cpu_single_env to NULL to avoid
2971 if (l
>= 4 && ((addr
& 3) == 0)) {
2972 /* 32 bit write access */
2974 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2976 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2977 /* 16 bit write access */
2979 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2982 /* 8 bit write access */
2984 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2988 unsigned long addr1
;
2989 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2991 ptr
= phys_ram_base
+ addr1
;
2992 memcpy(ptr
, buf
, l
);
2993 if (!cpu_physical_memory_is_dirty(addr1
)) {
2994 /* invalidate code */
2995 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2997 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2998 (0xff & ~CODE_DIRTY_FLAG
);
3000 /* qemu doesn't execute guest code directly, but kvm does
3001 therefore fluch instruction caches */
3003 flush_icache_range((unsigned long)ptr
,
3004 ((unsigned long)ptr
)+l
);
3007 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3008 !(pd
& IO_MEM_ROMD
)) {
3010 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3012 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3013 if (l
>= 4 && ((addr
& 3) == 0)) {
3014 /* 32 bit read access */
3015 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3018 } else if (l
>= 2 && ((addr
& 1) == 0)) {
3019 /* 16 bit read access */
3020 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
3024 /* 8 bit read access */
3025 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
3031 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3032 (addr
& ~TARGET_PAGE_MASK
);
3033 memcpy(buf
, ptr
, l
);
3042 /* used for ROM loading : can write in RAM and ROM */
3043 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3044 const uint8_t *buf
, int len
)
3048 target_phys_addr_t page
;
3053 page
= addr
& TARGET_PAGE_MASK
;
3054 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3057 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3059 pd
= IO_MEM_UNASSIGNED
;
3061 pd
= p
->phys_offset
;
3064 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3065 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3066 !(pd
& IO_MEM_ROMD
)) {
3069 unsigned long addr1
;
3070 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3072 ptr
= phys_ram_base
+ addr1
;
3073 memcpy(ptr
, buf
, l
);
3082 /* warning: addr must be aligned */
3083 uint32_t ldl_phys(target_phys_addr_t addr
)
3091 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3093 pd
= IO_MEM_UNASSIGNED
;
3095 pd
= p
->phys_offset
;
3098 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3099 !(pd
& IO_MEM_ROMD
)) {
3101 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3103 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3104 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3107 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3108 (addr
& ~TARGET_PAGE_MASK
);
3114 /* warning: addr must be aligned */
3115 uint64_t ldq_phys(target_phys_addr_t addr
)
3123 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3125 pd
= IO_MEM_UNASSIGNED
;
3127 pd
= p
->phys_offset
;
3130 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3131 !(pd
& IO_MEM_ROMD
)) {
3133 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3135 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3136 #ifdef TARGET_WORDS_BIGENDIAN
3137 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3138 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3140 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3141 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3145 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3146 (addr
& ~TARGET_PAGE_MASK
);
3153 uint32_t ldub_phys(target_phys_addr_t addr
)
3156 cpu_physical_memory_read(addr
, &val
, 1);
3161 uint32_t lduw_phys(target_phys_addr_t addr
)
3164 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3165 return tswap16(val
);
3169 #define likely(x) __builtin_expect(!!(x), 1)
3170 #define unlikely(x) __builtin_expect(!!(x), 0)
3173 #define unlikely(x) x
3176 /* warning: addr must be aligned. The ram page is not masked as dirty
3177 and the code inside is not invalidated. It is useful if the dirty
3178 bits are used to track modified PTEs */
3179 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3186 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3188 pd
= IO_MEM_UNASSIGNED
;
3190 pd
= p
->phys_offset
;
3193 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3194 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3196 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3197 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3199 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3200 ptr
= phys_ram_base
+ addr1
;
3203 if (unlikely(in_migration
)) {
3204 if (!cpu_physical_memory_is_dirty(addr1
)) {
3205 /* invalidate code */
3206 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3208 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3209 (0xff & ~CODE_DIRTY_FLAG
);
3215 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3222 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3224 pd
= IO_MEM_UNASSIGNED
;
3226 pd
= p
->phys_offset
;
3229 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3230 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3232 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3233 #ifdef TARGET_WORDS_BIGENDIAN
3234 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3235 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3237 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3238 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3241 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3242 (addr
& ~TARGET_PAGE_MASK
);
3247 /* warning: addr must be aligned */
3248 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3255 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3257 pd
= IO_MEM_UNASSIGNED
;
3259 pd
= p
->phys_offset
;
3262 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3263 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3265 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3266 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3268 unsigned long addr1
;
3269 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3271 ptr
= phys_ram_base
+ addr1
;
3273 if (!cpu_physical_memory_is_dirty(addr1
)) {
3274 /* invalidate code */
3275 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3277 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3278 (0xff & ~CODE_DIRTY_FLAG
);
3284 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3287 cpu_physical_memory_write(addr
, &v
, 1);
3291 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3293 uint16_t v
= tswap16(val
);
3294 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3298 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3301 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3306 /* virtual memory access for debug */
3307 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3308 uint8_t *buf
, int len
, int is_write
)
3311 target_phys_addr_t phys_addr
;
3315 page
= addr
& TARGET_PAGE_MASK
;
3316 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3317 /* if no physical page mapped, return an error */
3318 if (phys_addr
== -1)
3320 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3323 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3332 /* in deterministic execution mode, instructions doing device I/Os
3333 must be at the end of the TB */
3334 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3336 TranslationBlock
*tb
;
3338 target_ulong pc
, cs_base
;
3341 tb
= tb_find_pc((unsigned long)retaddr
);
3343 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3346 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3347 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3348 /* Calculate how many instructions had been executed before the fault
3350 n
= n
- env
->icount_decr
.u16
.low
;
3351 /* Generate a new TB ending on the I/O insn. */
3353 /* On MIPS and SH, delay slot instructions can only be restarted if
3354 they were already the first instruction in the TB. If this is not
3355 the first instruction in a TB then re-execute the preceding
3357 #if defined(TARGET_MIPS)
3358 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3359 env
->active_tc
.PC
-= 4;
3360 env
->icount_decr
.u16
.low
++;
3361 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3363 #elif defined(TARGET_SH4)
3364 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3367 env
->icount_decr
.u16
.low
++;
3368 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3371 /* This should never happen. */
3372 if (n
> CF_COUNT_MASK
)
3373 cpu_abort(env
, "TB too big during recompile");
3375 cflags
= n
| CF_LAST_IO
;
3377 cs_base
= tb
->cs_base
;
3379 tb_phys_invalidate(tb
, -1);
3380 /* FIXME: In theory this could raise an exception. In practice
3381 we have already translated the block once so it's probably ok. */
3382 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3383 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3384 the first in the TB) then we end up generating a whole new TB and
3385 repeating the fault, which is horribly inefficient.
3386 Better would be to execute just this insn uncached, or generate a
3388 cpu_resume_from_signal(env
, NULL
);
3391 void dump_exec_info(FILE *f
,
3392 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3394 int i
, target_code_size
, max_target_code_size
;
3395 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3396 TranslationBlock
*tb
;
3398 target_code_size
= 0;
3399 max_target_code_size
= 0;
3401 direct_jmp_count
= 0;
3402 direct_jmp2_count
= 0;
3403 for(i
= 0; i
< nb_tbs
; i
++) {
3405 target_code_size
+= tb
->size
;
3406 if (tb
->size
> max_target_code_size
)
3407 max_target_code_size
= tb
->size
;
3408 if (tb
->page_addr
[1] != -1)
3410 if (tb
->tb_next_offset
[0] != 0xffff) {
3412 if (tb
->tb_next_offset
[1] != 0xffff) {
3413 direct_jmp2_count
++;
3417 /* XXX: avoid using doubles ? */
3418 cpu_fprintf(f
, "Translation buffer state:\n");
3419 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3420 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3421 cpu_fprintf(f
, "TB count %d/%d\n",
3422 nb_tbs
, code_gen_max_blocks
);
3423 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3424 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3425 max_target_code_size
);
3426 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3427 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3428 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3429 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3431 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3432 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3434 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3436 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3437 cpu_fprintf(f
, "\nStatistics:\n");
3438 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3439 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3440 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3441 tcg_dump_info(f
, cpu_fprintf
);
3444 #if !defined(CONFIG_USER_ONLY)
3446 #define MMUSUFFIX _cmmu
3447 #define GETPC() NULL
3448 #define env cpu_single_env
3449 #define SOFTMMU_CODE_ACCESS
3452 #include "softmmu_template.h"
3455 #include "softmmu_template.h"
3458 #include "softmmu_template.h"
3461 #include "softmmu_template.h"