2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
48 #if defined(CONFIG_USER_ONLY)
52 //#define DEBUG_TB_INVALIDATE
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #define MMAP_AREA_START 0x00000000
72 #define MMAP_AREA_END 0xa8000000
74 #if defined(TARGET_SPARC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 41
76 #elif defined(TARGET_SPARC)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #elif defined(TARGET_ALPHA)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #define TARGET_VIRT_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_PPC64)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 42
85 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
86 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 #elif defined(TARGET_IA64)
88 #define TARGET_PHYS_ADDR_SPACE_BITS 36
90 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
91 #define TARGET_PHYS_ADDR_SPACE_BITS 32
94 static TranslationBlock
*tbs
;
95 int code_gen_max_blocks
;
96 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
98 /* any access to the tbs or the page table must use this lock */
99 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
101 #if defined(__arm__) || defined(__sparc_v9__)
102 /* The prologue must be reachable with a direct jump. ARM and Sparc64
103 have limited branch ranges (possibly also PPC) so place it in a
104 section close to code segment. */
105 #define code_gen_section \
106 __attribute__((__section__(".gen_code"))) \
107 __attribute__((aligned (32)))
109 #define code_gen_section \
110 __attribute__((aligned (32)))
113 uint8_t code_gen_prologue
[1024] code_gen_section
;
114 static uint8_t *code_gen_buffer
;
115 static unsigned long code_gen_buffer_size
;
116 /* threshold to flush the translated code buffer */
117 static unsigned long code_gen_buffer_max_size
;
118 uint8_t *code_gen_ptr
;
120 #if !defined(CONFIG_USER_ONLY)
121 ram_addr_t phys_ram_size
;
123 uint8_t *phys_ram_base
;
124 uint8_t *phys_ram_dirty
;
126 static int in_migration
;
127 static ram_addr_t phys_ram_alloc_offset
= 0;
131 /* current CPU in the current thread. It is only valid inside
133 CPUState
*cpu_single_env
;
134 /* 0 = Do not count executed instructions.
135 1 = Precise instruction counting.
136 2 = Adaptive rate instruction counting. */
138 /* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
142 typedef struct PageDesc
{
143 /* list of TBs intersecting this ram page */
144 TranslationBlock
*first_tb
;
145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count
;
148 uint8_t *code_bitmap
;
149 #if defined(CONFIG_USER_ONLY)
154 typedef struct PhysPageDesc
{
155 /* offset in host memory of the page + io_index in the low bits */
156 ram_addr_t phys_offset
;
160 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
161 /* XXX: this is a temporary hack for alpha target.
162 * In the future, this is to be replaced by a multi-level table
163 * to actually be able to handle the complete 64 bits address space.
165 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
170 #define L1_SIZE (1 << L1_BITS)
171 #define L2_SIZE (1 << L2_BITS)
173 unsigned long qemu_real_host_page_size
;
174 unsigned long qemu_host_page_bits
;
175 unsigned long qemu_host_page_size
;
176 unsigned long qemu_host_page_mask
;
178 /* XXX: for system emulation, it could just be an array */
179 static PageDesc
*l1_map
[L1_SIZE
];
180 static PhysPageDesc
**l1_phys_map
;
182 #if !defined(CONFIG_USER_ONLY)
183 static void io_mem_init(void);
185 /* io memory support */
186 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
187 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
188 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
189 char io_mem_used
[IO_MEM_NB_ENTRIES
];
190 static int io_mem_watch
;
194 static const char *logfilename
= "/tmp/qemu.log";
197 static int log_append
= 0;
200 static int tlb_flush_count
;
201 static int tb_flush_count
;
202 static int tb_phys_invalidate_count
;
204 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
205 typedef struct subpage_t
{
206 target_phys_addr_t base
;
207 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
208 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
209 void *opaque
[TARGET_PAGE_SIZE
][2][4];
213 static void map_exec(void *addr
, long size
)
216 VirtualProtect(addr
, size
,
217 PAGE_EXECUTE_READWRITE
, &old_protect
);
221 static void map_exec(void *addr
, long size
)
223 unsigned long start
, end
, page_size
;
225 page_size
= getpagesize();
226 start
= (unsigned long)addr
;
227 start
&= ~(page_size
- 1);
229 end
= (unsigned long)addr
+ size
;
230 end
+= page_size
- 1;
231 end
&= ~(page_size
- 1);
233 mprotect((void *)start
, end
- start
,
234 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
238 static void page_init(void)
240 /* NOTE: we can always suppose that qemu_host_page_size >=
244 SYSTEM_INFO system_info
;
246 GetSystemInfo(&system_info
);
247 qemu_real_host_page_size
= system_info
.dwPageSize
;
250 qemu_real_host_page_size
= getpagesize();
252 if (qemu_host_page_size
== 0)
253 qemu_host_page_size
= qemu_real_host_page_size
;
254 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
255 qemu_host_page_size
= TARGET_PAGE_SIZE
;
256 qemu_host_page_bits
= 0;
257 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
258 qemu_host_page_bits
++;
259 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
260 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
261 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
263 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
265 long long startaddr
, endaddr
;
270 last_brk
= (unsigned long)sbrk(0);
271 f
= fopen("/proc/self/maps", "r");
274 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
276 startaddr
= MIN(startaddr
,
277 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
278 endaddr
= MIN(endaddr
,
279 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
280 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
281 TARGET_PAGE_ALIGN(endaddr
),
292 static inline PageDesc
**page_l1_map(target_ulong index
)
294 #if TARGET_LONG_BITS > 32
295 /* Host memory outside guest VM. For 32-bit targets we have already
296 excluded high addresses. */
297 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
300 return &l1_map
[index
>> L2_BITS
];
303 static inline PageDesc
*page_find_alloc(target_ulong index
)
306 lp
= page_l1_map(index
);
312 /* allocate if not found */
313 #if defined(CONFIG_USER_ONLY)
315 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
316 /* Don't use qemu_malloc because it may recurse. */
317 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
318 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
321 if (addr
== (target_ulong
)addr
) {
322 page_set_flags(addr
& TARGET_PAGE_MASK
,
323 TARGET_PAGE_ALIGN(addr
+ len
),
327 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
331 return p
+ (index
& (L2_SIZE
- 1));
334 static inline PageDesc
*page_find(target_ulong index
)
337 lp
= page_l1_map(index
);
344 return p
+ (index
& (L2_SIZE
- 1));
347 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
352 p
= (void **)l1_phys_map
;
353 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
355 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
356 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
358 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
361 /* allocate if not found */
364 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
365 memset(p
, 0, sizeof(void *) * L1_SIZE
);
369 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
373 /* allocate if not found */
376 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
378 for (i
= 0; i
< L2_SIZE
; i
++)
379 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
381 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
384 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
386 return phys_page_find_alloc(index
, 0);
389 #if !defined(CONFIG_USER_ONLY)
390 static void tlb_protect_code(ram_addr_t ram_addr
);
391 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
393 #define mmap_lock() do { } while(0)
394 #define mmap_unlock() do { } while(0)
397 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399 #if defined(CONFIG_USER_ONLY)
400 /* Currently it is not recommanded to allocate big chunks of data in
401 user mode. It will change when a dedicated libc will be used */
402 #define USE_STATIC_CODE_GEN_BUFFER
405 #ifdef USE_STATIC_CODE_GEN_BUFFER
406 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
409 static void code_gen_alloc(unsigned long tb_size
)
414 #ifdef USE_STATIC_CODE_GEN_BUFFER
415 code_gen_buffer
= static_code_gen_buffer
;
416 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
417 map_exec(code_gen_buffer
, code_gen_buffer_size
);
419 code_gen_buffer_size
= tb_size
;
420 if (code_gen_buffer_size
== 0) {
421 #if defined(CONFIG_USER_ONLY)
422 /* in user mode, phys_ram_size is not meaningful */
423 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
425 /* XXX: needs ajustments */
426 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
429 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
430 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
431 /* The code gen buffer location may have constraints depending on
432 the host cpu and OS */
433 #if defined(__linux__)
438 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
439 #if defined(__x86_64__)
441 /* Cannot map more than that */
442 if (code_gen_buffer_size
> (800 * 1024 * 1024))
443 code_gen_buffer_size
= (800 * 1024 * 1024);
444 #elif defined(__sparc_v9__)
445 // Map the buffer below 2G, so we can use direct calls and branches
447 start
= (void *) 0x60000000UL
;
448 if (code_gen_buffer_size
> (512 * 1024 * 1024))
449 code_gen_buffer_size
= (512 * 1024 * 1024);
451 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
452 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
454 if (code_gen_buffer
== MAP_FAILED
) {
455 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
459 #elif defined(__FreeBSD__)
463 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
464 #if defined(__x86_64__)
465 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
466 * 0x40000000 is free */
468 addr
= (void *)0x40000000;
469 /* Cannot map more than that */
470 if (code_gen_buffer_size
> (800 * 1024 * 1024))
471 code_gen_buffer_size
= (800 * 1024 * 1024);
473 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
474 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
476 if (code_gen_buffer
== MAP_FAILED
) {
477 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
482 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
483 if (!code_gen_buffer
) {
484 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
487 map_exec(code_gen_buffer
, code_gen_buffer_size
);
489 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
490 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
491 code_gen_buffer_max_size
= code_gen_buffer_size
-
492 code_gen_max_block_size();
493 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
494 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
497 /* Must be called before using the QEMU cpus. 'tb_size' is the size
498 (in bytes) allocated to the translation buffer. Zero means default
500 void cpu_exec_init_all(unsigned long tb_size
)
503 code_gen_alloc(tb_size
);
504 code_gen_ptr
= code_gen_buffer
;
506 #if !defined(CONFIG_USER_ONLY)
511 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
513 #define CPU_COMMON_SAVE_VERSION 1
515 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
517 CPUState
*env
= opaque
;
519 qemu_put_be32s(f
, &env
->halted
);
520 qemu_put_be32s(f
, &env
->interrupt_request
);
523 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
525 CPUState
*env
= opaque
;
527 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
530 qemu_get_be32s(f
, &env
->halted
);
531 qemu_get_be32s(f
, &env
->interrupt_request
);
538 void cpu_exec_init(CPUState
*env
)
543 env
->next_cpu
= NULL
;
546 while (*penv
!= NULL
) {
547 penv
= (CPUState
**)&(*penv
)->next_cpu
;
550 env
->cpu_index
= cpu_index
;
551 TAILQ_INIT(&env
->breakpoints
);
552 TAILQ_INIT(&env
->watchpoints
);
554 env
->thread_id
= GetCurrentProcessId();
556 env
->thread_id
= getpid();
559 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
560 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
561 cpu_common_save
, cpu_common_load
, env
);
562 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
563 cpu_save
, cpu_load
, env
);
567 static inline void invalidate_page_bitmap(PageDesc
*p
)
569 if (p
->code_bitmap
) {
570 qemu_free(p
->code_bitmap
);
571 p
->code_bitmap
= NULL
;
573 p
->code_write_count
= 0;
576 /* set to NULL all the 'first_tb' fields in all PageDescs */
577 static void page_flush_tb(void)
582 for(i
= 0; i
< L1_SIZE
; i
++) {
585 for(j
= 0; j
< L2_SIZE
; j
++) {
587 invalidate_page_bitmap(p
);
594 /* flush all the translation blocks */
595 /* XXX: tb_flush is currently not thread safe */
596 void tb_flush(CPUState
*env1
)
599 #if defined(DEBUG_FLUSH)
600 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
601 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
603 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
605 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
606 cpu_abort(env1
, "Internal error: code buffer overflow\n");
610 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
611 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
614 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
617 code_gen_ptr
= code_gen_buffer
;
618 /* XXX: flush processor icache at this point if cache flush is
623 #ifdef DEBUG_TB_CHECK
625 static void tb_invalidate_check(target_ulong address
)
627 TranslationBlock
*tb
;
629 address
&= TARGET_PAGE_MASK
;
630 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
631 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
632 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
633 address
>= tb
->pc
+ tb
->size
)) {
634 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
635 address
, (long)tb
->pc
, tb
->size
);
641 /* verify that all the pages have correct rights for code */
642 static void tb_page_check(void)
644 TranslationBlock
*tb
;
645 int i
, flags1
, flags2
;
647 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
648 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
649 flags1
= page_get_flags(tb
->pc
);
650 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
651 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
652 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
653 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
659 static void tb_jmp_check(TranslationBlock
*tb
)
661 TranslationBlock
*tb1
;
664 /* suppress any remaining jumps to this TB */
668 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
671 tb1
= tb1
->jmp_next
[n1
];
673 /* check end of list */
675 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
681 /* invalidate one TB */
682 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
685 TranslationBlock
*tb1
;
689 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
692 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
696 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
698 TranslationBlock
*tb1
;
704 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
706 *ptb
= tb1
->page_next
[n1
];
709 ptb
= &tb1
->page_next
[n1
];
713 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
715 TranslationBlock
*tb1
, **ptb
;
718 ptb
= &tb
->jmp_next
[n
];
721 /* find tb(n) in circular list */
725 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
726 if (n1
== n
&& tb1
== tb
)
729 ptb
= &tb1
->jmp_first
;
731 ptb
= &tb1
->jmp_next
[n1
];
734 /* now we can suppress tb(n) from the list */
735 *ptb
= tb
->jmp_next
[n
];
737 tb
->jmp_next
[n
] = NULL
;
741 /* reset the jump entry 'n' of a TB so that it is not chained to
743 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
745 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
748 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
753 target_phys_addr_t phys_pc
;
754 TranslationBlock
*tb1
, *tb2
;
756 /* remove the TB from the hash list */
757 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
758 h
= tb_phys_hash_func(phys_pc
);
759 tb_remove(&tb_phys_hash
[h
], tb
,
760 offsetof(TranslationBlock
, phys_hash_next
));
762 /* remove the TB from the page list */
763 if (tb
->page_addr
[0] != page_addr
) {
764 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
765 tb_page_remove(&p
->first_tb
, tb
);
766 invalidate_page_bitmap(p
);
768 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
769 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
770 tb_page_remove(&p
->first_tb
, tb
);
771 invalidate_page_bitmap(p
);
774 tb_invalidated_flag
= 1;
776 /* remove the TB from the hash list */
777 h
= tb_jmp_cache_hash_func(tb
->pc
);
778 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
779 if (env
->tb_jmp_cache
[h
] == tb
)
780 env
->tb_jmp_cache
[h
] = NULL
;
783 /* suppress this TB from the two jump lists */
784 tb_jmp_remove(tb
, 0);
785 tb_jmp_remove(tb
, 1);
787 /* suppress any remaining jumps to this TB */
793 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
794 tb2
= tb1
->jmp_next
[n1
];
795 tb_reset_jump(tb1
, n1
);
796 tb1
->jmp_next
[n1
] = NULL
;
799 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
801 tb_phys_invalidate_count
++;
804 static inline void set_bits(uint8_t *tab
, int start
, int len
)
810 mask
= 0xff << (start
& 7);
811 if ((start
& ~7) == (end
& ~7)) {
813 mask
&= ~(0xff << (end
& 7));
818 start
= (start
+ 8) & ~7;
820 while (start
< end1
) {
825 mask
= ~(0xff << (end
& 7));
831 static void build_page_bitmap(PageDesc
*p
)
833 int n
, tb_start
, tb_end
;
834 TranslationBlock
*tb
;
836 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
843 tb
= (TranslationBlock
*)((long)tb
& ~3);
844 /* NOTE: this is subtle as a TB may span two physical pages */
846 /* NOTE: tb_end may be after the end of the page, but
847 it is not a problem */
848 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
849 tb_end
= tb_start
+ tb
->size
;
850 if (tb_end
> TARGET_PAGE_SIZE
)
851 tb_end
= TARGET_PAGE_SIZE
;
854 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
856 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
857 tb
= tb
->page_next
[n
];
861 TranslationBlock
*tb_gen_code(CPUState
*env
,
862 target_ulong pc
, target_ulong cs_base
,
863 int flags
, int cflags
)
865 TranslationBlock
*tb
;
867 target_ulong phys_pc
, phys_page2
, virt_page2
;
870 phys_pc
= get_phys_addr_code(env
, pc
);
873 /* flush must be done */
875 /* cannot fail at this point */
877 /* Don't forget to invalidate previous TB info. */
878 tb_invalidated_flag
= 1;
880 tc_ptr
= code_gen_ptr
;
882 tb
->cs_base
= cs_base
;
885 cpu_gen_code(env
, tb
, &code_gen_size
);
886 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
888 /* check next page if needed */
889 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
891 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
892 phys_page2
= get_phys_addr_code(env
, virt_page2
);
894 tb_link_phys(tb
, phys_pc
, phys_page2
);
898 /* invalidate all TBs which intersect with the target physical page
899 starting in range [start;end[. NOTE: start and end must refer to
900 the same physical page. 'is_cpu_write_access' should be true if called
901 from a real cpu write access: the virtual CPU will exit the current
902 TB if code is modified inside this TB. */
903 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
904 int is_cpu_write_access
)
906 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
907 CPUState
*env
= cpu_single_env
;
908 target_ulong tb_start
, tb_end
;
911 #ifdef TARGET_HAS_PRECISE_SMC
912 int current_tb_not_found
= is_cpu_write_access
;
913 TranslationBlock
*current_tb
= NULL
;
914 int current_tb_modified
= 0;
915 target_ulong current_pc
= 0;
916 target_ulong current_cs_base
= 0;
917 int current_flags
= 0;
918 #endif /* TARGET_HAS_PRECISE_SMC */
920 p
= page_find(start
>> TARGET_PAGE_BITS
);
923 if (!p
->code_bitmap
&&
924 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
925 is_cpu_write_access
) {
926 /* build code bitmap */
927 build_page_bitmap(p
);
930 /* we remove all the TBs in the range [start, end[ */
931 /* XXX: see if in some cases it could be faster to invalidate all the code */
935 tb
= (TranslationBlock
*)((long)tb
& ~3);
936 tb_next
= tb
->page_next
[n
];
937 /* NOTE: this is subtle as a TB may span two physical pages */
939 /* NOTE: tb_end may be after the end of the page, but
940 it is not a problem */
941 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
942 tb_end
= tb_start
+ tb
->size
;
944 tb_start
= tb
->page_addr
[1];
945 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
947 if (!(tb_end
<= start
|| tb_start
>= end
)) {
948 #ifdef TARGET_HAS_PRECISE_SMC
949 if (current_tb_not_found
) {
950 current_tb_not_found
= 0;
952 if (env
->mem_io_pc
) {
953 /* now we have a real cpu fault */
954 current_tb
= tb_find_pc(env
->mem_io_pc
);
957 if (current_tb
== tb
&&
958 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
959 /* If we are modifying the current TB, we must stop
960 its execution. We could be more precise by checking
961 that the modification is after the current PC, but it
962 would require a specialized function to partially
963 restore the CPU state */
965 current_tb_modified
= 1;
966 cpu_restore_state(current_tb
, env
,
967 env
->mem_io_pc
, NULL
);
968 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
971 #endif /* TARGET_HAS_PRECISE_SMC */
972 /* we need to do that to handle the case where a signal
973 occurs while doing tb_phys_invalidate() */
976 saved_tb
= env
->current_tb
;
977 env
->current_tb
= NULL
;
979 tb_phys_invalidate(tb
, -1);
981 env
->current_tb
= saved_tb
;
982 if (env
->interrupt_request
&& env
->current_tb
)
983 cpu_interrupt(env
, env
->interrupt_request
);
988 #if !defined(CONFIG_USER_ONLY)
989 /* if no code remaining, no need to continue to use slow writes */
991 invalidate_page_bitmap(p
);
992 if (is_cpu_write_access
) {
993 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
997 #ifdef TARGET_HAS_PRECISE_SMC
998 if (current_tb_modified
) {
999 /* we generate a block containing just the instruction
1000 modifying the memory. It will ensure that it cannot modify
1002 env
->current_tb
= NULL
;
1003 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1004 cpu_resume_from_signal(env
, NULL
);
1009 /* len must be <= 8 and start must be a multiple of len */
1010 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1017 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1018 cpu_single_env
->mem_io_vaddr
, len
,
1019 cpu_single_env
->eip
,
1020 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1024 p
= page_find(start
>> TARGET_PAGE_BITS
);
1027 if (p
->code_bitmap
) {
1028 offset
= start
& ~TARGET_PAGE_MASK
;
1029 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1030 if (b
& ((1 << len
) - 1))
1034 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1038 #if !defined(CONFIG_SOFTMMU)
1039 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1040 unsigned long pc
, void *puc
)
1042 TranslationBlock
*tb
;
1045 #ifdef TARGET_HAS_PRECISE_SMC
1046 TranslationBlock
*current_tb
= NULL
;
1047 CPUState
*env
= cpu_single_env
;
1048 int current_tb_modified
= 0;
1049 target_ulong current_pc
= 0;
1050 target_ulong current_cs_base
= 0;
1051 int current_flags
= 0;
1054 addr
&= TARGET_PAGE_MASK
;
1055 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1059 #ifdef TARGET_HAS_PRECISE_SMC
1060 if (tb
&& pc
!= 0) {
1061 current_tb
= tb_find_pc(pc
);
1064 while (tb
!= NULL
) {
1066 tb
= (TranslationBlock
*)((long)tb
& ~3);
1067 #ifdef TARGET_HAS_PRECISE_SMC
1068 if (current_tb
== tb
&&
1069 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1070 /* If we are modifying the current TB, we must stop
1071 its execution. We could be more precise by checking
1072 that the modification is after the current PC, but it
1073 would require a specialized function to partially
1074 restore the CPU state */
1076 current_tb_modified
= 1;
1077 cpu_restore_state(current_tb
, env
, pc
, puc
);
1078 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1081 #endif /* TARGET_HAS_PRECISE_SMC */
1082 tb_phys_invalidate(tb
, addr
);
1083 tb
= tb
->page_next
[n
];
1086 #ifdef TARGET_HAS_PRECISE_SMC
1087 if (current_tb_modified
) {
1088 /* we generate a block containing just the instruction
1089 modifying the memory. It will ensure that it cannot modify
1091 env
->current_tb
= NULL
;
1092 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1093 cpu_resume_from_signal(env
, puc
);
1099 /* add the tb in the target page and protect it if necessary */
1100 static inline void tb_alloc_page(TranslationBlock
*tb
,
1101 unsigned int n
, target_ulong page_addr
)
1104 TranslationBlock
*last_first_tb
;
1106 tb
->page_addr
[n
] = page_addr
;
1107 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1108 tb
->page_next
[n
] = p
->first_tb
;
1109 last_first_tb
= p
->first_tb
;
1110 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1111 invalidate_page_bitmap(p
);
1113 #if defined(TARGET_HAS_SMC) || 1
1115 #if defined(CONFIG_USER_ONLY)
1116 if (p
->flags
& PAGE_WRITE
) {
1121 /* force the host page as non writable (writes will have a
1122 page fault + mprotect overhead) */
1123 page_addr
&= qemu_host_page_mask
;
1125 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1126 addr
+= TARGET_PAGE_SIZE
) {
1128 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1132 p2
->flags
&= ~PAGE_WRITE
;
1133 page_get_flags(addr
);
1135 mprotect(g2h(page_addr
), qemu_host_page_size
,
1136 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1137 #ifdef DEBUG_TB_INVALIDATE
1138 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1143 /* if some code is already present, then the pages are already
1144 protected. So we handle the case where only the first TB is
1145 allocated in a physical page */
1146 if (!last_first_tb
) {
1147 tlb_protect_code(page_addr
);
1151 #endif /* TARGET_HAS_SMC */
1154 /* Allocate a new translation block. Flush the translation buffer if
1155 too many translation blocks or too much generated code. */
1156 TranslationBlock
*tb_alloc(target_ulong pc
)
1158 TranslationBlock
*tb
;
1160 if (nb_tbs
>= code_gen_max_blocks
||
1161 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1163 tb
= &tbs
[nb_tbs
++];
1169 void tb_free(TranslationBlock
*tb
)
1171 /* In practice this is mostly used for single use temporary TB
1172 Ignore the hard cases and just back up if this TB happens to
1173 be the last one generated. */
1174 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1175 code_gen_ptr
= tb
->tc_ptr
;
1180 /* add a new TB and link it to the physical page tables. phys_page2 is
1181 (-1) to indicate that only one page contains the TB. */
1182 void tb_link_phys(TranslationBlock
*tb
,
1183 target_ulong phys_pc
, target_ulong phys_page2
)
1186 TranslationBlock
**ptb
;
1188 /* Grab the mmap lock to stop another thread invalidating this TB
1189 before we are done. */
1191 /* add in the physical hash table */
1192 h
= tb_phys_hash_func(phys_pc
);
1193 ptb
= &tb_phys_hash
[h
];
1194 tb
->phys_hash_next
= *ptb
;
1197 /* add in the page list */
1198 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1199 if (phys_page2
!= -1)
1200 tb_alloc_page(tb
, 1, phys_page2
);
1202 tb
->page_addr
[1] = -1;
1204 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1205 tb
->jmp_next
[0] = NULL
;
1206 tb
->jmp_next
[1] = NULL
;
1208 /* init original jump addresses */
1209 if (tb
->tb_next_offset
[0] != 0xffff)
1210 tb_reset_jump(tb
, 0);
1211 if (tb
->tb_next_offset
[1] != 0xffff)
1212 tb_reset_jump(tb
, 1);
1214 #ifdef DEBUG_TB_CHECK
1220 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1221 tb[1].tc_ptr. Return NULL if not found */
1222 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1224 int m_min
, m_max
, m
;
1226 TranslationBlock
*tb
;
1230 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1231 tc_ptr
>= (unsigned long)code_gen_ptr
)
1233 /* binary search (cf Knuth) */
1236 while (m_min
<= m_max
) {
1237 m
= (m_min
+ m_max
) >> 1;
1239 v
= (unsigned long)tb
->tc_ptr
;
1242 else if (tc_ptr
< v
) {
1251 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1253 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1255 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1258 tb1
= tb
->jmp_next
[n
];
1260 /* find head of list */
1263 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1266 tb1
= tb1
->jmp_next
[n1
];
1268 /* we are now sure now that tb jumps to tb1 */
1271 /* remove tb from the jmp_first list */
1272 ptb
= &tb_next
->jmp_first
;
1276 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1277 if (n1
== n
&& tb1
== tb
)
1279 ptb
= &tb1
->jmp_next
[n1
];
1281 *ptb
= tb
->jmp_next
[n
];
1282 tb
->jmp_next
[n
] = NULL
;
1284 /* suppress the jump to next tb in generated code */
1285 tb_reset_jump(tb
, n
);
1287 /* suppress jumps in the tb on which we could have jumped */
1288 tb_reset_jump_recursive(tb_next
);
1292 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1294 tb_reset_jump_recursive2(tb
, 0);
1295 tb_reset_jump_recursive2(tb
, 1);
1298 #if defined(TARGET_HAS_ICE)
1299 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1301 target_phys_addr_t addr
;
1303 ram_addr_t ram_addr
;
1306 addr
= cpu_get_phys_page_debug(env
, pc
);
1307 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1309 pd
= IO_MEM_UNASSIGNED
;
1311 pd
= p
->phys_offset
;
1313 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1314 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1318 /* Add a watchpoint. */
1319 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1320 int flags
, CPUWatchpoint
**watchpoint
)
1322 target_ulong len_mask
= ~(len
- 1);
1325 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1326 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1327 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1328 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1331 wp
= qemu_malloc(sizeof(*wp
));
1336 wp
->len_mask
= len_mask
;
1339 /* keep all GDB-injected watchpoints in front */
1341 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1343 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1345 tlb_flush_page(env
, addr
);
1352 /* Remove a specific watchpoint. */
1353 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1356 target_ulong len_mask
= ~(len
- 1);
1359 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1360 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1361 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1362 cpu_watchpoint_remove_by_ref(env
, wp
);
1369 /* Remove a specific watchpoint by reference. */
1370 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1372 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1374 tlb_flush_page(env
, watchpoint
->vaddr
);
1376 qemu_free(watchpoint
);
1379 /* Remove all matching watchpoints. */
1380 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1382 CPUWatchpoint
*wp
, *next
;
1384 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1385 if (wp
->flags
& mask
)
1386 cpu_watchpoint_remove_by_ref(env
, wp
);
1390 /* Add a breakpoint. */
1391 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1392 CPUBreakpoint
**breakpoint
)
1394 #if defined(TARGET_HAS_ICE)
1397 bp
= qemu_malloc(sizeof(*bp
));
1404 /* keep all GDB-injected breakpoints in front */
1406 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1408 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1411 kvm_update_debugger(env
);
1413 breakpoint_invalidate(env
, pc
);
1423 /* Remove a specific breakpoint. */
1424 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1426 #if defined(TARGET_HAS_ICE)
1429 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1430 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1431 cpu_breakpoint_remove_by_ref(env
, bp
);
1441 /* Remove a specific breakpoint by reference. */
1442 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1444 #if defined(TARGET_HAS_ICE)
1445 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1448 kvm_update_debugger(env
);
1450 breakpoint_invalidate(env
, breakpoint
->pc
);
1452 qemu_free(breakpoint
);
1456 /* Remove all matching breakpoints. */
1457 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1459 #if defined(TARGET_HAS_ICE)
1460 CPUBreakpoint
*bp
, *next
;
1462 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1463 if (bp
->flags
& mask
)
1464 cpu_breakpoint_remove_by_ref(env
, bp
);
1469 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1470 CPU loop after each instruction */
1471 void cpu_single_step(CPUState
*env
, int enabled
)
1473 #if defined(TARGET_HAS_ICE)
1474 if (env
->singlestep_enabled
!= enabled
) {
1475 env
->singlestep_enabled
= enabled
;
1476 /* must flush all the translated code to avoid inconsistancies */
1477 /* XXX: only flush what is necessary */
1481 kvm_update_debugger(env
);
1485 /* enable or disable low levels log */
1486 void cpu_set_log(int log_flags
)
1488 loglevel
= log_flags
;
1489 if (loglevel
&& !logfile
) {
1490 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1492 perror(logfilename
);
1495 #if !defined(CONFIG_SOFTMMU)
1496 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1498 static char logfile_buf
[4096];
1499 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1502 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1506 if (!loglevel
&& logfile
) {
1512 void cpu_set_log_filename(const char *filename
)
1514 logfilename
= strdup(filename
);
1519 cpu_set_log(loglevel
);
1522 /* mask must never be zero, except for A20 change call */
1523 void cpu_interrupt(CPUState
*env
, int mask
)
1525 #if !defined(USE_NPTL)
1526 TranslationBlock
*tb
;
1527 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1531 old_mask
= env
->interrupt_request
;
1532 /* FIXME: This is probably not threadsafe. A different thread could
1533 be in the middle of a read-modify-write operation. */
1534 env
->interrupt_request
|= mask
;
1535 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1536 kvm_update_interrupt_request(env
);
1537 #if defined(USE_NPTL)
1538 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1539 problem and hope the cpu will stop of its own accord. For userspace
1540 emulation this often isn't actually as bad as it sounds. Often
1541 signals are used primarily to interrupt blocking syscalls. */
1544 env
->icount_decr
.u16
.high
= 0xffff;
1545 #ifndef CONFIG_USER_ONLY
1546 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1547 an async event happened and we need to process it. */
1549 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1550 cpu_abort(env
, "Raised interrupt while not in I/O function");
1554 tb
= env
->current_tb
;
1555 /* if the cpu is currently executing code, we must unlink it and
1556 all the potentially executing TB */
1557 if (tb
&& !testandset(&interrupt_lock
)) {
1558 env
->current_tb
= NULL
;
1559 tb_reset_jump_recursive(tb
);
1560 resetlock(&interrupt_lock
);
1566 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1568 env
->interrupt_request
&= ~mask
;
1571 const CPULogItem cpu_log_items
[] = {
1572 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1573 "show generated host assembly code for each compiled TB" },
1574 { CPU_LOG_TB_IN_ASM
, "in_asm",
1575 "show target assembly code for each compiled TB" },
1576 { CPU_LOG_TB_OP
, "op",
1577 "show micro ops for each compiled TB" },
1578 { CPU_LOG_TB_OP_OPT
, "op_opt",
1581 "before eflags optimization and "
1583 "after liveness analysis" },
1584 { CPU_LOG_INT
, "int",
1585 "show interrupts/exceptions in short format" },
1586 { CPU_LOG_EXEC
, "exec",
1587 "show trace before each executed TB (lots of logs)" },
1588 { CPU_LOG_TB_CPU
, "cpu",
1589 "show CPU state before block translation" },
1591 { CPU_LOG_PCALL
, "pcall",
1592 "show protected mode far calls/returns/exceptions" },
1595 { CPU_LOG_IOPORT
, "ioport",
1596 "show all i/o ports accesses" },
1601 static int cmp1(const char *s1
, int n
, const char *s2
)
1603 if (strlen(s2
) != n
)
1605 return memcmp(s1
, s2
, n
) == 0;
1608 /* takes a comma separated list of log masks. Return 0 if error. */
1609 int cpu_str_to_log_mask(const char *str
)
1611 const CPULogItem
*item
;
1618 p1
= strchr(p
, ',');
1621 if(cmp1(p
,p1
-p
,"all")) {
1622 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1626 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1627 if (cmp1(p
, p1
- p
, item
->name
))
1641 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1648 fprintf(stderr
, "qemu: fatal: ");
1649 vfprintf(stderr
, fmt
, ap
);
1650 fprintf(stderr
, "\n");
1652 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1654 cpu_dump_state(env
, stderr
, fprintf
, 0);
1657 fprintf(logfile
, "qemu: fatal: ");
1658 vfprintf(logfile
, fmt
, ap2
);
1659 fprintf(logfile
, "\n");
1661 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1663 cpu_dump_state(env
, logfile
, fprintf
, 0);
1673 CPUState
*cpu_copy(CPUState
*env
)
1675 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1676 /* preserve chaining and index */
1677 CPUState
*next_cpu
= new_env
->next_cpu
;
1678 int cpu_index
= new_env
->cpu_index
;
1679 memcpy(new_env
, env
, sizeof(CPUState
));
1680 new_env
->next_cpu
= next_cpu
;
1681 new_env
->cpu_index
= cpu_index
;
1685 #if !defined(CONFIG_USER_ONLY)
1687 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1691 /* Discard jump cache entries for any tb which might potentially
1692 overlap the flushed page. */
1693 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1694 memset (&env
->tb_jmp_cache
[i
], 0,
1695 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1697 i
= tb_jmp_cache_hash_page(addr
);
1698 memset (&env
->tb_jmp_cache
[i
], 0,
1699 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1702 /* NOTE: if flush_global is true, also flush global entries (not
1704 void tlb_flush(CPUState
*env
, int flush_global
)
1708 #if defined(DEBUG_TLB)
1709 printf("tlb_flush:\n");
1711 /* must reset current TB so that interrupts cannot modify the
1712 links while we are modifying them */
1713 env
->current_tb
= NULL
;
1715 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1716 env
->tlb_table
[0][i
].addr_read
= -1;
1717 env
->tlb_table
[0][i
].addr_write
= -1;
1718 env
->tlb_table
[0][i
].addr_code
= -1;
1719 env
->tlb_table
[1][i
].addr_read
= -1;
1720 env
->tlb_table
[1][i
].addr_write
= -1;
1721 env
->tlb_table
[1][i
].addr_code
= -1;
1722 #if (NB_MMU_MODES >= 3)
1723 env
->tlb_table
[2][i
].addr_read
= -1;
1724 env
->tlb_table
[2][i
].addr_write
= -1;
1725 env
->tlb_table
[2][i
].addr_code
= -1;
1726 #if (NB_MMU_MODES == 4)
1727 env
->tlb_table
[3][i
].addr_read
= -1;
1728 env
->tlb_table
[3][i
].addr_write
= -1;
1729 env
->tlb_table
[3][i
].addr_code
= -1;
1734 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1737 if (env
->kqemu_enabled
) {
1738 kqemu_flush(env
, flush_global
);
1744 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1746 if (addr
== (tlb_entry
->addr_read
&
1747 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1748 addr
== (tlb_entry
->addr_write
&
1749 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1750 addr
== (tlb_entry
->addr_code
&
1751 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1752 tlb_entry
->addr_read
= -1;
1753 tlb_entry
->addr_write
= -1;
1754 tlb_entry
->addr_code
= -1;
1758 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1762 #if defined(DEBUG_TLB)
1763 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1765 /* must reset current TB so that interrupts cannot modify the
1766 links while we are modifying them */
1767 env
->current_tb
= NULL
;
1769 addr
&= TARGET_PAGE_MASK
;
1770 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1771 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1772 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1773 #if (NB_MMU_MODES >= 3)
1774 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1775 #if (NB_MMU_MODES == 4)
1776 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1780 tlb_flush_jmp_cache(env
, addr
);
1783 if (env
->kqemu_enabled
) {
1784 kqemu_flush_page(env
, addr
);
1789 /* update the TLBs so that writes to code in the virtual page 'addr'
1791 static void tlb_protect_code(ram_addr_t ram_addr
)
1793 cpu_physical_memory_reset_dirty(ram_addr
,
1794 ram_addr
+ TARGET_PAGE_SIZE
,
1798 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1799 tested for self modifying code */
1800 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1803 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1806 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1807 unsigned long start
, unsigned long length
)
1810 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1811 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1812 if ((addr
- start
) < length
) {
1813 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1818 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1822 unsigned long length
, start1
;
1826 start
&= TARGET_PAGE_MASK
;
1827 end
= TARGET_PAGE_ALIGN(end
);
1829 length
= end
- start
;
1832 len
= length
>> TARGET_PAGE_BITS
;
1834 /* XXX: should not depend on cpu context */
1836 if (env
->kqemu_enabled
) {
1839 for(i
= 0; i
< len
; i
++) {
1840 kqemu_set_notdirty(env
, addr
);
1841 addr
+= TARGET_PAGE_SIZE
;
1845 mask
= ~dirty_flags
;
1846 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1847 for(i
= 0; i
< len
; i
++)
1850 /* we modify the TLB cache so that the dirty bit will be set again
1851 when accessing the range */
1852 start1
= start
+ (unsigned long)phys_ram_base
;
1853 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1854 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1855 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1856 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1857 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1858 #if (NB_MMU_MODES >= 3)
1859 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1860 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1861 #if (NB_MMU_MODES == 4)
1862 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1863 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1869 int cpu_physical_memory_set_dirty_tracking(int enable
)
1874 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1875 in_migration
= enable
;
1879 int cpu_physical_memory_get_dirty_tracking(void)
1881 return in_migration
;
1884 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1887 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1890 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1892 ram_addr_t ram_addr
;
1894 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1895 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1896 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1897 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1898 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1903 /* update the TLB according to the current state of the dirty bits */
1904 void cpu_tlb_update_dirty(CPUState
*env
)
1907 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1908 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1909 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1910 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1911 #if (NB_MMU_MODES >= 3)
1912 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1913 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1914 #if (NB_MMU_MODES == 4)
1915 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1916 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1921 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1923 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1924 tlb_entry
->addr_write
= vaddr
;
1927 /* update the TLB corresponding to virtual page vaddr
1928 so that it is no longer dirty */
1929 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1933 vaddr
&= TARGET_PAGE_MASK
;
1934 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1935 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1936 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1937 #if (NB_MMU_MODES >= 3)
1938 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1939 #if (NB_MMU_MODES == 4)
1940 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1945 /* add a new TLB entry. At most one entry for a given virtual address
1946 is permitted. Return 0 if OK or 2 if the page could not be mapped
1947 (can only happen in non SOFTMMU mode for I/O pages or pages
1948 conflicting with the host address space). */
1949 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1950 target_phys_addr_t paddr
, int prot
,
1951 int mmu_idx
, int is_softmmu
)
1956 target_ulong address
;
1957 target_ulong code_address
;
1958 target_phys_addr_t addend
;
1962 target_phys_addr_t iotlb
;
1964 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1966 pd
= IO_MEM_UNASSIGNED
;
1968 pd
= p
->phys_offset
;
1970 #if defined(DEBUG_TLB)
1971 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1972 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1977 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1978 /* IO memory case (romd handled later) */
1979 address
|= TLB_MMIO
;
1981 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1982 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1984 iotlb
= pd
& TARGET_PAGE_MASK
;
1985 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1986 iotlb
|= IO_MEM_NOTDIRTY
;
1988 iotlb
|= IO_MEM_ROM
;
1990 /* IO handlers are currently passed a phsical address.
1991 It would be nice to pass an offset from the base address
1992 of that region. This would avoid having to special case RAM,
1993 and avoid full address decoding in every device.
1994 We can't use the high bits of pd for this because
1995 IO_MEM_ROMD uses these as a ram address. */
1996 iotlb
= (pd
& ~TARGET_PAGE_MASK
) + paddr
;
1999 code_address
= address
;
2000 /* Make accesses to pages with watchpoints go via the
2001 watchpoint trap routines. */
2002 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2003 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2004 iotlb
= io_mem_watch
+ paddr
;
2005 /* TODO: The memory case can be optimized by not trapping
2006 reads of pages with a write breakpoint. */
2007 address
|= TLB_MMIO
;
2011 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2012 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2013 te
= &env
->tlb_table
[mmu_idx
][index
];
2014 te
->addend
= addend
- vaddr
;
2015 if (prot
& PAGE_READ
) {
2016 te
->addr_read
= address
;
2021 if (prot
& PAGE_EXEC
) {
2022 te
->addr_code
= code_address
;
2026 if (prot
& PAGE_WRITE
) {
2027 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2028 (pd
& IO_MEM_ROMD
)) {
2029 /* Write access calls the I/O callback. */
2030 te
->addr_write
= address
| TLB_MMIO
;
2031 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2032 !cpu_physical_memory_is_dirty(pd
)) {
2033 te
->addr_write
= address
| TLB_NOTDIRTY
;
2035 te
->addr_write
= address
;
2038 te
->addr_write
= -1;
2045 void tlb_flush(CPUState
*env
, int flush_global
)
2049 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2053 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2054 target_phys_addr_t paddr
, int prot
,
2055 int mmu_idx
, int is_softmmu
)
2060 /* dump memory mappings */
2061 void page_dump(FILE *f
)
2063 unsigned long start
, end
;
2064 int i
, j
, prot
, prot1
;
2067 fprintf(f
, "%-8s %-8s %-8s %s\n",
2068 "start", "end", "size", "prot");
2072 for(i
= 0; i
<= L1_SIZE
; i
++) {
2077 for(j
= 0;j
< L2_SIZE
; j
++) {
2082 if (prot1
!= prot
) {
2083 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2085 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2086 start
, end
, end
- start
,
2087 prot
& PAGE_READ
? 'r' : '-',
2088 prot
& PAGE_WRITE
? 'w' : '-',
2089 prot
& PAGE_EXEC
? 'x' : '-');
2103 int page_get_flags(target_ulong address
)
2107 p
= page_find(address
>> TARGET_PAGE_BITS
);
2113 /* modify the flags of a page and invalidate the code if
2114 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2115 depending on PAGE_WRITE */
2116 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2121 /* mmap_lock should already be held. */
2122 start
= start
& TARGET_PAGE_MASK
;
2123 end
= TARGET_PAGE_ALIGN(end
);
2124 if (flags
& PAGE_WRITE
)
2125 flags
|= PAGE_WRITE_ORG
;
2126 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2127 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2128 /* We may be called for host regions that are outside guest
2132 /* if the write protection is set, then we invalidate the code
2134 if (!(p
->flags
& PAGE_WRITE
) &&
2135 (flags
& PAGE_WRITE
) &&
2137 tb_invalidate_phys_page(addr
, 0, NULL
);
2143 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2149 if (start
+ len
< start
)
2150 /* we've wrapped around */
2153 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2154 start
= start
& TARGET_PAGE_MASK
;
2156 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2157 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2160 if( !(p
->flags
& PAGE_VALID
) )
2163 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2165 if (flags
& PAGE_WRITE
) {
2166 if (!(p
->flags
& PAGE_WRITE_ORG
))
2168 /* unprotect the page if it was put read-only because it
2169 contains translated code */
2170 if (!(p
->flags
& PAGE_WRITE
)) {
2171 if (!page_unprotect(addr
, 0, NULL
))
2180 /* called from signal handler: invalidate the code and unprotect the
2181 page. Return TRUE if the fault was succesfully handled. */
2182 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2184 unsigned int page_index
, prot
, pindex
;
2186 target_ulong host_start
, host_end
, addr
;
2188 /* Technically this isn't safe inside a signal handler. However we
2189 know this only ever happens in a synchronous SEGV handler, so in
2190 practice it seems to be ok. */
2193 host_start
= address
& qemu_host_page_mask
;
2194 page_index
= host_start
>> TARGET_PAGE_BITS
;
2195 p1
= page_find(page_index
);
2200 host_end
= host_start
+ qemu_host_page_size
;
2203 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2207 /* if the page was really writable, then we change its
2208 protection back to writable */
2209 if (prot
& PAGE_WRITE_ORG
) {
2210 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2211 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2212 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2213 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2214 p1
[pindex
].flags
|= PAGE_WRITE
;
2215 /* and since the content will be modified, we must invalidate
2216 the corresponding translated code. */
2217 tb_invalidate_phys_page(address
, pc
, puc
);
2218 #ifdef DEBUG_TB_CHECK
2219 tb_invalidate_check(address
);
2229 static inline void tlb_set_dirty(CPUState
*env
,
2230 unsigned long addr
, target_ulong vaddr
)
2233 #endif /* defined(CONFIG_USER_ONLY) */
2235 #if !defined(CONFIG_USER_ONLY)
2236 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2238 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2239 ram_addr_t orig_memory
);
2240 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2243 if (addr > start_addr) \
2246 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2247 if (start_addr2 > 0) \
2251 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2252 end_addr2 = TARGET_PAGE_SIZE - 1; \
2254 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2255 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2260 /* register physical memory. 'size' must be a multiple of the target
2261 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2263 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2265 ram_addr_t phys_offset
)
2267 target_phys_addr_t addr
, end_addr
;
2270 ram_addr_t orig_size
= size
;
2274 /* XXX: should not depend on cpu context */
2276 if (env
->kqemu_enabled
) {
2277 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2281 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2283 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2284 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2285 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2286 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2287 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2288 ram_addr_t orig_memory
= p
->phys_offset
;
2289 target_phys_addr_t start_addr2
, end_addr2
;
2290 int need_subpage
= 0;
2292 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2294 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2295 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2296 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2297 &p
->phys_offset
, orig_memory
);
2299 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2302 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2304 p
->phys_offset
= phys_offset
;
2305 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2306 (phys_offset
& IO_MEM_ROMD
))
2307 phys_offset
+= TARGET_PAGE_SIZE
;
2310 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2311 p
->phys_offset
= phys_offset
;
2312 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2313 (phys_offset
& IO_MEM_ROMD
))
2314 phys_offset
+= TARGET_PAGE_SIZE
;
2316 target_phys_addr_t start_addr2
, end_addr2
;
2317 int need_subpage
= 0;
2319 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2320 end_addr2
, need_subpage
);
2322 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2323 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2324 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2325 subpage_register(subpage
, start_addr2
, end_addr2
,
2332 /* since each CPU stores ram addresses in its TLB cache, we must
2333 reset the modified entries */
2335 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2340 /* XXX: temporary until new memory mapping API */
2341 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2345 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2347 return IO_MEM_UNASSIGNED
;
2348 return p
->phys_offset
;
2351 /* XXX: better than nothing */
2352 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2355 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2356 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2357 (uint64_t)size
, (uint64_t)phys_ram_size
);
2360 addr
= phys_ram_alloc_offset
;
2361 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2365 void qemu_ram_free(ram_addr_t addr
)
2369 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2371 #ifdef DEBUG_UNASSIGNED
2372 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2374 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2375 do_unassigned_access(addr
, 0, 0, 0, 1);
2380 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2382 #ifdef DEBUG_UNASSIGNED
2383 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2385 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2386 do_unassigned_access(addr
, 0, 0, 0, 2);
2391 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2393 #ifdef DEBUG_UNASSIGNED
2394 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2396 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2397 do_unassigned_access(addr
, 0, 0, 0, 4);
2402 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2404 #ifdef DEBUG_UNASSIGNED
2405 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2407 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2408 do_unassigned_access(addr
, 1, 0, 0, 1);
2412 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2414 #ifdef DEBUG_UNASSIGNED
2415 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2417 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2418 do_unassigned_access(addr
, 1, 0, 0, 2);
2422 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2424 #ifdef DEBUG_UNASSIGNED
2425 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2427 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2428 do_unassigned_access(addr
, 1, 0, 0, 4);
2432 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2433 unassigned_mem_readb
,
2434 unassigned_mem_readw
,
2435 unassigned_mem_readl
,
2438 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2439 unassigned_mem_writeb
,
2440 unassigned_mem_writew
,
2441 unassigned_mem_writel
,
2444 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2448 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2449 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2450 #if !defined(CONFIG_USER_ONLY)
2451 tb_invalidate_phys_page_fast(ram_addr
, 1);
2452 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2455 stb_p(phys_ram_base
+ ram_addr
, val
);
2457 if (cpu_single_env
->kqemu_enabled
&&
2458 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2459 kqemu_modify_page(cpu_single_env
, ram_addr
);
2461 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2462 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2463 /* we remove the notdirty callback only if the code has been
2465 if (dirty_flags
== 0xff)
2466 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2469 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2473 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2474 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2475 #if !defined(CONFIG_USER_ONLY)
2476 tb_invalidate_phys_page_fast(ram_addr
, 2);
2477 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2480 stw_p(phys_ram_base
+ ram_addr
, val
);
2482 if (cpu_single_env
->kqemu_enabled
&&
2483 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2484 kqemu_modify_page(cpu_single_env
, ram_addr
);
2486 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2487 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2488 /* we remove the notdirty callback only if the code has been
2490 if (dirty_flags
== 0xff)
2491 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2494 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2498 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2499 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2500 #if !defined(CONFIG_USER_ONLY)
2501 tb_invalidate_phys_page_fast(ram_addr
, 4);
2502 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2505 stl_p(phys_ram_base
+ ram_addr
, val
);
2507 if (cpu_single_env
->kqemu_enabled
&&
2508 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2509 kqemu_modify_page(cpu_single_env
, ram_addr
);
2511 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2512 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2513 /* we remove the notdirty callback only if the code has been
2515 if (dirty_flags
== 0xff)
2516 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2519 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2520 NULL
, /* never used */
2521 NULL
, /* never used */
2522 NULL
, /* never used */
2525 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2526 notdirty_mem_writeb
,
2527 notdirty_mem_writew
,
2528 notdirty_mem_writel
,
2531 /* Generate a debug exception if a watchpoint has been hit. */
2532 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2534 CPUState
*env
= cpu_single_env
;
2535 target_ulong pc
, cs_base
;
2536 TranslationBlock
*tb
;
2541 if (env
->watchpoint_hit
) {
2542 /* We re-entered the check after replacing the TB. Now raise
2543 * the debug interrupt so that is will trigger after the
2544 * current instruction. */
2545 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2548 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2549 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2550 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2551 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2552 wp
->flags
|= BP_WATCHPOINT_HIT
;
2553 if (!env
->watchpoint_hit
) {
2554 env
->watchpoint_hit
= wp
;
2555 tb
= tb_find_pc(env
->mem_io_pc
);
2557 cpu_abort(env
, "check_watchpoint: could not find TB for "
2558 "pc=%p", (void *)env
->mem_io_pc
);
2560 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2561 tb_phys_invalidate(tb
, -1);
2562 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2563 env
->exception_index
= EXCP_DEBUG
;
2565 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2566 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2568 cpu_resume_from_signal(env
, NULL
);
2571 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2576 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2577 so these check for a hit then pass through to the normal out-of-line
2579 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2581 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2582 return ldub_phys(addr
);
2585 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2587 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2588 return lduw_phys(addr
);
2591 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2593 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2594 return ldl_phys(addr
);
2597 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2600 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2601 stb_phys(addr
, val
);
2604 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2607 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2608 stw_phys(addr
, val
);
2611 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2614 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2615 stl_phys(addr
, val
);
2618 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2624 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2630 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2636 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2637 #if defined(DEBUG_SUBPAGE)
2638 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2639 mmio
, len
, addr
, idx
);
2641 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2646 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2647 uint32_t value
, unsigned int len
)
2651 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2652 #if defined(DEBUG_SUBPAGE)
2653 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2654 mmio
, len
, addr
, idx
, value
);
2656 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2659 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2661 #if defined(DEBUG_SUBPAGE)
2662 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2665 return subpage_readlen(opaque
, addr
, 0);
2668 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2671 #if defined(DEBUG_SUBPAGE)
2672 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2674 subpage_writelen(opaque
, addr
, value
, 0);
2677 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2679 #if defined(DEBUG_SUBPAGE)
2680 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2683 return subpage_readlen(opaque
, addr
, 1);
2686 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2689 #if defined(DEBUG_SUBPAGE)
2690 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2692 subpage_writelen(opaque
, addr
, value
, 1);
2695 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2697 #if defined(DEBUG_SUBPAGE)
2698 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2701 return subpage_readlen(opaque
, addr
, 2);
2704 static void subpage_writel (void *opaque
,
2705 target_phys_addr_t addr
, uint32_t value
)
2707 #if defined(DEBUG_SUBPAGE)
2708 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2710 subpage_writelen(opaque
, addr
, value
, 2);
2713 static CPUReadMemoryFunc
*subpage_read
[] = {
2719 static CPUWriteMemoryFunc
*subpage_write
[] = {
2725 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2731 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2733 idx
= SUBPAGE_IDX(start
);
2734 eidx
= SUBPAGE_IDX(end
);
2735 #if defined(DEBUG_SUBPAGE)
2736 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2737 mmio
, start
, end
, idx
, eidx
, memory
);
2739 memory
>>= IO_MEM_SHIFT
;
2740 for (; idx
<= eidx
; idx
++) {
2741 for (i
= 0; i
< 4; i
++) {
2742 if (io_mem_read
[memory
][i
]) {
2743 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2744 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2746 if (io_mem_write
[memory
][i
]) {
2747 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2748 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2756 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2757 ram_addr_t orig_memory
)
2762 mmio
= qemu_mallocz(sizeof(subpage_t
));
2765 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2766 #if defined(DEBUG_SUBPAGE)
2767 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2768 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2770 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2771 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2777 static int get_free_io_mem_idx(void)
2781 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2782 if (!io_mem_used
[i
]) {
2790 static void io_mem_init(void)
2794 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2795 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2796 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2800 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2801 watch_mem_write
, NULL
);
2802 /* alloc dirty bits array */
2803 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2804 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2807 /* mem_read and mem_write are arrays of functions containing the
2808 function to access byte (index 0), word (index 1) and dword (index
2809 2). Functions can be omitted with a NULL function pointer. The
2810 registered functions may be modified dynamically later.
2811 If io_index is non zero, the corresponding io zone is
2812 modified. If it is zero, a new io zone is allocated. The return
2813 value can be used with cpu_register_physical_memory(). (-1) is
2814 returned if error. */
2815 int cpu_register_io_memory(int io_index
,
2816 CPUReadMemoryFunc
**mem_read
,
2817 CPUWriteMemoryFunc
**mem_write
,
2820 int i
, subwidth
= 0;
2822 if (io_index
<= 0) {
2823 io_index
= get_free_io_mem_idx();
2827 if (io_index
>= IO_MEM_NB_ENTRIES
)
2831 for(i
= 0;i
< 3; i
++) {
2832 if (!mem_read
[i
] || !mem_write
[i
])
2833 subwidth
= IO_MEM_SUBWIDTH
;
2834 io_mem_read
[io_index
][i
] = mem_read
[i
];
2835 io_mem_write
[io_index
][i
] = mem_write
[i
];
2837 io_mem_opaque
[io_index
] = opaque
;
2838 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2841 void cpu_unregister_io_memory(int io_table_address
)
2844 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
2846 for (i
=0;i
< 3; i
++) {
2847 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
2848 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
2850 io_mem_opaque
[io_index
] = NULL
;
2851 io_mem_used
[io_index
] = 0;
2854 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2856 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2859 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2861 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2864 #endif /* !defined(CONFIG_USER_ONLY) */
2866 /* physical memory access (slow version, mainly for debug) */
2867 #if defined(CONFIG_USER_ONLY)
2868 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2869 int len
, int is_write
)
2876 page
= addr
& TARGET_PAGE_MASK
;
2877 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2880 flags
= page_get_flags(page
);
2881 if (!(flags
& PAGE_VALID
))
2884 if (!(flags
& PAGE_WRITE
))
2886 /* XXX: this code should not depend on lock_user */
2887 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2888 /* FIXME - should this return an error rather than just fail? */
2891 unlock_user(p
, addr
, l
);
2893 if (!(flags
& PAGE_READ
))
2895 /* XXX: this code should not depend on lock_user */
2896 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2897 /* FIXME - should this return an error rather than just fail? */
2900 unlock_user(p
, addr
, 0);
2909 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2910 int len
, int is_write
)
2915 target_phys_addr_t page
;
2920 page
= addr
& TARGET_PAGE_MASK
;
2921 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2924 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2926 pd
= IO_MEM_UNASSIGNED
;
2928 pd
= p
->phys_offset
;
2932 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2933 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2934 /* XXX: could force cpu_single_env to NULL to avoid
2936 if (l
>= 4 && ((addr
& 3) == 0)) {
2937 /* 32 bit write access */
2939 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2941 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2942 /* 16 bit write access */
2944 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2947 /* 8 bit write access */
2949 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2953 unsigned long addr1
;
2954 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2956 ptr
= phys_ram_base
+ addr1
;
2957 memcpy(ptr
, buf
, l
);
2958 if (!cpu_physical_memory_is_dirty(addr1
)) {
2959 /* invalidate code */
2960 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2962 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2963 (0xff & ~CODE_DIRTY_FLAG
);
2965 /* qemu doesn't execute guest code directly, but kvm does
2966 therefore fluch instruction caches */
2968 flush_icache_range((unsigned long)ptr
,
2969 ((unsigned long)ptr
)+l
);
2972 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2973 !(pd
& IO_MEM_ROMD
)) {
2975 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2976 if (l
>= 4 && ((addr
& 3) == 0)) {
2977 /* 32 bit read access */
2978 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2981 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2982 /* 16 bit read access */
2983 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2987 /* 8 bit read access */
2988 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2994 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2995 (addr
& ~TARGET_PAGE_MASK
);
2996 memcpy(buf
, ptr
, l
);
3005 /* used for ROM loading : can write in RAM and ROM */
3006 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3007 const uint8_t *buf
, int len
)
3011 target_phys_addr_t page
;
3016 page
= addr
& TARGET_PAGE_MASK
;
3017 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3020 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3022 pd
= IO_MEM_UNASSIGNED
;
3024 pd
= p
->phys_offset
;
3027 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3028 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3029 !(pd
& IO_MEM_ROMD
)) {
3032 unsigned long addr1
;
3033 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3035 ptr
= phys_ram_base
+ addr1
;
3036 memcpy(ptr
, buf
, l
);
3045 /* warning: addr must be aligned */
3046 uint32_t ldl_phys(target_phys_addr_t addr
)
3054 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3056 pd
= IO_MEM_UNASSIGNED
;
3058 pd
= p
->phys_offset
;
3061 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3062 !(pd
& IO_MEM_ROMD
)) {
3064 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3065 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3068 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3069 (addr
& ~TARGET_PAGE_MASK
);
3075 /* warning: addr must be aligned */
3076 uint64_t ldq_phys(target_phys_addr_t addr
)
3084 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3086 pd
= IO_MEM_UNASSIGNED
;
3088 pd
= p
->phys_offset
;
3091 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3092 !(pd
& IO_MEM_ROMD
)) {
3094 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3095 #ifdef TARGET_WORDS_BIGENDIAN
3096 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3097 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3099 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3100 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3104 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3105 (addr
& ~TARGET_PAGE_MASK
);
3112 uint32_t ldub_phys(target_phys_addr_t addr
)
3115 cpu_physical_memory_read(addr
, &val
, 1);
3120 uint32_t lduw_phys(target_phys_addr_t addr
)
3123 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3124 return tswap16(val
);
3128 #define likely(x) __builtin_expect(!!(x), 1)
3129 #define unlikely(x) __builtin_expect(!!(x), 0)
3132 #define unlikely(x) x
3135 /* warning: addr must be aligned. The ram page is not masked as dirty
3136 and the code inside is not invalidated. It is useful if the dirty
3137 bits are used to track modified PTEs */
3138 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3145 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3147 pd
= IO_MEM_UNASSIGNED
;
3149 pd
= p
->phys_offset
;
3152 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3153 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3154 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3156 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3157 ptr
= phys_ram_base
+ addr1
;
3160 if (unlikely(in_migration
)) {
3161 if (!cpu_physical_memory_is_dirty(addr1
)) {
3162 /* invalidate code */
3163 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3165 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3166 (0xff & ~CODE_DIRTY_FLAG
);
3172 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3179 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3181 pd
= IO_MEM_UNASSIGNED
;
3183 pd
= p
->phys_offset
;
3186 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3187 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3188 #ifdef TARGET_WORDS_BIGENDIAN
3189 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3190 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3192 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3193 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3196 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3197 (addr
& ~TARGET_PAGE_MASK
);
3202 /* warning: addr must be aligned */
3203 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3210 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3212 pd
= IO_MEM_UNASSIGNED
;
3214 pd
= p
->phys_offset
;
3217 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3218 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3219 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3221 unsigned long addr1
;
3222 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3224 ptr
= phys_ram_base
+ addr1
;
3226 if (!cpu_physical_memory_is_dirty(addr1
)) {
3227 /* invalidate code */
3228 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3230 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3231 (0xff & ~CODE_DIRTY_FLAG
);
3237 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3240 cpu_physical_memory_write(addr
, &v
, 1);
3244 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3246 uint16_t v
= tswap16(val
);
3247 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3251 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3254 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3259 /* virtual memory access for debug */
3260 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3261 uint8_t *buf
, int len
, int is_write
)
3264 target_phys_addr_t phys_addr
;
3268 page
= addr
& TARGET_PAGE_MASK
;
3269 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3270 /* if no physical page mapped, return an error */
3271 if (phys_addr
== -1)
3273 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3276 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3285 /* in deterministic execution mode, instructions doing device I/Os
3286 must be at the end of the TB */
3287 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3289 TranslationBlock
*tb
;
3291 target_ulong pc
, cs_base
;
3294 tb
= tb_find_pc((unsigned long)retaddr
);
3296 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3299 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3300 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3301 /* Calculate how many instructions had been executed before the fault
3303 n
= n
- env
->icount_decr
.u16
.low
;
3304 /* Generate a new TB ending on the I/O insn. */
3306 /* On MIPS and SH, delay slot instructions can only be restarted if
3307 they were already the first instruction in the TB. If this is not
3308 the first instruction in a TB then re-execute the preceding
3310 #if defined(TARGET_MIPS)
3311 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3312 env
->active_tc
.PC
-= 4;
3313 env
->icount_decr
.u16
.low
++;
3314 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3316 #elif defined(TARGET_SH4)
3317 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3320 env
->icount_decr
.u16
.low
++;
3321 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3324 /* This should never happen. */
3325 if (n
> CF_COUNT_MASK
)
3326 cpu_abort(env
, "TB too big during recompile");
3328 cflags
= n
| CF_LAST_IO
;
3330 cs_base
= tb
->cs_base
;
3332 tb_phys_invalidate(tb
, -1);
3333 /* FIXME: In theory this could raise an exception. In practice
3334 we have already translated the block once so it's probably ok. */
3335 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3336 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3337 the first in the TB) then we end up generating a whole new TB and
3338 repeating the fault, which is horribly inefficient.
3339 Better would be to execute just this insn uncached, or generate a
3341 cpu_resume_from_signal(env
, NULL
);
3344 void dump_exec_info(FILE *f
,
3345 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3347 int i
, target_code_size
, max_target_code_size
;
3348 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3349 TranslationBlock
*tb
;
3351 target_code_size
= 0;
3352 max_target_code_size
= 0;
3354 direct_jmp_count
= 0;
3355 direct_jmp2_count
= 0;
3356 for(i
= 0; i
< nb_tbs
; i
++) {
3358 target_code_size
+= tb
->size
;
3359 if (tb
->size
> max_target_code_size
)
3360 max_target_code_size
= tb
->size
;
3361 if (tb
->page_addr
[1] != -1)
3363 if (tb
->tb_next_offset
[0] != 0xffff) {
3365 if (tb
->tb_next_offset
[1] != 0xffff) {
3366 direct_jmp2_count
++;
3370 /* XXX: avoid using doubles ? */
3371 cpu_fprintf(f
, "Translation buffer state:\n");
3372 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3373 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3374 cpu_fprintf(f
, "TB count %d/%d\n",
3375 nb_tbs
, code_gen_max_blocks
);
3376 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3377 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3378 max_target_code_size
);
3379 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3380 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3381 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3382 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3384 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3385 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3387 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3389 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3390 cpu_fprintf(f
, "\nStatistics:\n");
3391 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3392 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3393 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3394 tcg_dump_info(f
, cpu_fprintf
);
3397 #if !defined(CONFIG_USER_ONLY)
3399 #define MMUSUFFIX _cmmu
3400 #define GETPC() NULL
3401 #define env cpu_single_env
3402 #define SOFTMMU_CODE_ACCESS
3405 #include "softmmu_template.h"
3408 #include "softmmu_template.h"
3411 #include "softmmu_template.h"
3414 #include "softmmu_template.h"