2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
45 /* make various TB consistency checks */
46 //#define DEBUG_TB_CHECK
47 //#define DEBUG_TLB_CHECK
49 #if !defined(CONFIG_USER_ONLY)
50 /* TB consistency checks only implemented for usermode emulation. */
54 /* threshold to flush the translated code buffer */
55 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
57 #define SMC_BITMAP_USE_THRESHOLD 10
59 #define MMAP_AREA_START 0x00000000
60 #define MMAP_AREA_END 0xa8000000
62 #if defined(TARGET_SPARC64)
63 #define TARGET_PHYS_ADDR_SPACE_BITS 41
64 #elif defined(TARGET_PPC64)
65 #define TARGET_PHYS_ADDR_SPACE_BITS 42
67 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
68 #define TARGET_PHYS_ADDR_SPACE_BITS 32
71 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
72 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
74 /* any access to the tbs or the page table must use this lock */
75 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
77 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
78 uint8_t *code_gen_ptr
;
82 uint8_t *phys_ram_base
;
83 uint8_t *phys_ram_dirty
;
86 /* current CPU in the current thread. It is only valid inside
88 CPUState
*cpu_single_env
;
90 typedef struct PageDesc
{
91 /* list of TBs intersecting this ram page */
92 TranslationBlock
*first_tb
;
93 /* in order to optimize self modifying code, we count the number
94 of lookups we do to a given page to use a bitmap */
95 unsigned int code_write_count
;
97 #if defined(CONFIG_USER_ONLY)
102 typedef struct PhysPageDesc
{
103 /* offset in host memory of the page + io_index in the low 12 bits */
104 uint32_t phys_offset
;
108 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
110 #define L1_SIZE (1 << L1_BITS)
111 #define L2_SIZE (1 << L2_BITS)
113 static void io_mem_init(void);
115 unsigned long qemu_real_host_page_size
;
116 unsigned long qemu_host_page_bits
;
117 unsigned long qemu_host_page_size
;
118 unsigned long qemu_host_page_mask
;
120 /* XXX: for system emulation, it could just be an array */
121 static PageDesc
*l1_map
[L1_SIZE
];
122 PhysPageDesc
**l1_phys_map
;
124 /* io memory support */
125 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
126 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
127 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
128 static int io_mem_nb
;
131 char *logfilename
= "/tmp/qemu.log";
136 static int tlb_flush_count
;
137 static int tb_flush_count
;
138 static int tb_phys_invalidate_count
;
140 static void page_init(void)
142 /* NOTE: we can always suppose that qemu_host_page_size >=
146 SYSTEM_INFO system_info
;
149 GetSystemInfo(&system_info
);
150 qemu_real_host_page_size
= system_info
.dwPageSize
;
152 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
153 PAGE_EXECUTE_READWRITE
, &old_protect
);
156 qemu_real_host_page_size
= getpagesize();
158 unsigned long start
, end
;
160 start
= (unsigned long)code_gen_buffer
;
161 start
&= ~(qemu_real_host_page_size
- 1);
163 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
164 end
+= qemu_real_host_page_size
- 1;
165 end
&= ~(qemu_real_host_page_size
- 1);
167 mprotect((void *)start
, end
- start
,
168 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
172 if (qemu_host_page_size
== 0)
173 qemu_host_page_size
= qemu_real_host_page_size
;
174 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
175 qemu_host_page_size
= TARGET_PAGE_SIZE
;
176 qemu_host_page_bits
= 0;
177 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
178 qemu_host_page_bits
++;
179 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
180 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
181 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
184 static inline PageDesc
*page_find_alloc(unsigned int index
)
188 lp
= &l1_map
[index
>> L2_BITS
];
191 /* allocate if not found */
192 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
193 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
196 return p
+ (index
& (L2_SIZE
- 1));
199 static inline PageDesc
*page_find(unsigned int index
)
203 p
= l1_map
[index
>> L2_BITS
];
206 return p
+ (index
& (L2_SIZE
- 1));
209 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
214 p
= (void **)l1_phys_map
;
215 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
217 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
218 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
220 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
223 /* allocate if not found */
226 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
227 memset(p
, 0, sizeof(void *) * L1_SIZE
);
231 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
235 /* allocate if not found */
238 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
240 for (i
= 0; i
< L2_SIZE
; i
++)
241 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
243 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
246 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
248 return phys_page_find_alloc(index
, 0);
251 #if !defined(CONFIG_USER_ONLY)
252 static void tlb_protect_code(ram_addr_t ram_addr
);
253 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
257 void cpu_exec_init(CPUState
*env
)
263 code_gen_ptr
= code_gen_buffer
;
267 env
->next_cpu
= NULL
;
270 while (*penv
!= NULL
) {
271 penv
= (CPUState
**)&(*penv
)->next_cpu
;
274 env
->cpu_index
= cpu_index
;
278 static inline void invalidate_page_bitmap(PageDesc
*p
)
280 if (p
->code_bitmap
) {
281 qemu_free(p
->code_bitmap
);
282 p
->code_bitmap
= NULL
;
284 p
->code_write_count
= 0;
287 /* set to NULL all the 'first_tb' fields in all PageDescs */
288 static void page_flush_tb(void)
293 for(i
= 0; i
< L1_SIZE
; i
++) {
296 for(j
= 0; j
< L2_SIZE
; j
++) {
298 invalidate_page_bitmap(p
);
305 /* flush all the translation blocks */
306 /* XXX: tb_flush is currently not thread safe */
307 void tb_flush(CPUState
*env1
)
310 #if defined(DEBUG_FLUSH)
311 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
312 code_gen_ptr
- code_gen_buffer
,
314 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
318 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
319 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
322 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
325 code_gen_ptr
= code_gen_buffer
;
326 /* XXX: flush processor icache at this point if cache flush is
331 #ifdef DEBUG_TB_CHECK
333 static void tb_invalidate_check(unsigned long address
)
335 TranslationBlock
*tb
;
337 address
&= TARGET_PAGE_MASK
;
338 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
339 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
340 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
341 address
>= tb
->pc
+ tb
->size
)) {
342 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
343 address
, (long)tb
->pc
, tb
->size
);
349 /* verify that all the pages have correct rights for code */
350 static void tb_page_check(void)
352 TranslationBlock
*tb
;
353 int i
, flags1
, flags2
;
355 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
356 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
357 flags1
= page_get_flags(tb
->pc
);
358 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
359 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
360 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
361 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
367 void tb_jmp_check(TranslationBlock
*tb
)
369 TranslationBlock
*tb1
;
372 /* suppress any remaining jumps to this TB */
376 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
379 tb1
= tb1
->jmp_next
[n1
];
381 /* check end of list */
383 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
389 /* invalidate one TB */
390 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
393 TranslationBlock
*tb1
;
397 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
400 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
404 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
406 TranslationBlock
*tb1
;
412 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
414 *ptb
= tb1
->page_next
[n1
];
417 ptb
= &tb1
->page_next
[n1
];
421 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
423 TranslationBlock
*tb1
, **ptb
;
426 ptb
= &tb
->jmp_next
[n
];
429 /* find tb(n) in circular list */
433 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
434 if (n1
== n
&& tb1
== tb
)
437 ptb
= &tb1
->jmp_first
;
439 ptb
= &tb1
->jmp_next
[n1
];
442 /* now we can suppress tb(n) from the list */
443 *ptb
= tb
->jmp_next
[n
];
445 tb
->jmp_next
[n
] = NULL
;
449 /* reset the jump entry 'n' of a TB so that it is not chained to
451 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
453 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
456 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
461 target_ulong phys_pc
;
462 TranslationBlock
*tb1
, *tb2
;
464 /* remove the TB from the hash list */
465 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
466 h
= tb_phys_hash_func(phys_pc
);
467 tb_remove(&tb_phys_hash
[h
], tb
,
468 offsetof(TranslationBlock
, phys_hash_next
));
470 /* remove the TB from the page list */
471 if (tb
->page_addr
[0] != page_addr
) {
472 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
473 tb_page_remove(&p
->first_tb
, tb
);
474 invalidate_page_bitmap(p
);
476 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
477 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
478 tb_page_remove(&p
->first_tb
, tb
);
479 invalidate_page_bitmap(p
);
482 tb_invalidated_flag
= 1;
484 /* remove the TB from the hash list */
485 h
= tb_jmp_cache_hash_func(tb
->pc
);
486 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
487 if (env
->tb_jmp_cache
[h
] == tb
)
488 env
->tb_jmp_cache
[h
] = NULL
;
491 /* suppress this TB from the two jump lists */
492 tb_jmp_remove(tb
, 0);
493 tb_jmp_remove(tb
, 1);
495 /* suppress any remaining jumps to this TB */
501 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
502 tb2
= tb1
->jmp_next
[n1
];
503 tb_reset_jump(tb1
, n1
);
504 tb1
->jmp_next
[n1
] = NULL
;
507 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
509 tb_phys_invalidate_count
++;
512 static inline void set_bits(uint8_t *tab
, int start
, int len
)
518 mask
= 0xff << (start
& 7);
519 if ((start
& ~7) == (end
& ~7)) {
521 mask
&= ~(0xff << (end
& 7));
526 start
= (start
+ 8) & ~7;
528 while (start
< end1
) {
533 mask
= ~(0xff << (end
& 7));
539 static void build_page_bitmap(PageDesc
*p
)
541 int n
, tb_start
, tb_end
;
542 TranslationBlock
*tb
;
544 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
547 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
552 tb
= (TranslationBlock
*)((long)tb
& ~3);
553 /* NOTE: this is subtle as a TB may span two physical pages */
555 /* NOTE: tb_end may be after the end of the page, but
556 it is not a problem */
557 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
558 tb_end
= tb_start
+ tb
->size
;
559 if (tb_end
> TARGET_PAGE_SIZE
)
560 tb_end
= TARGET_PAGE_SIZE
;
563 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
565 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
566 tb
= tb
->page_next
[n
];
570 #ifdef TARGET_HAS_PRECISE_SMC
572 static void tb_gen_code(CPUState
*env
,
573 target_ulong pc
, target_ulong cs_base
, int flags
,
576 TranslationBlock
*tb
;
578 target_ulong phys_pc
, phys_page2
, virt_page2
;
581 phys_pc
= get_phys_addr_code(env
, pc
);
584 /* flush must be done */
586 /* cannot fail at this point */
589 tc_ptr
= code_gen_ptr
;
591 tb
->cs_base
= cs_base
;
594 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
595 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
597 /* check next page if needed */
598 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
600 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
601 phys_page2
= get_phys_addr_code(env
, virt_page2
);
603 tb_link_phys(tb
, phys_pc
, phys_page2
);
607 /* invalidate all TBs which intersect with the target physical page
608 starting in range [start;end[. NOTE: start and end must refer to
609 the same physical page. 'is_cpu_write_access' should be true if called
610 from a real cpu write access: the virtual CPU will exit the current
611 TB if code is modified inside this TB. */
612 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
613 int is_cpu_write_access
)
615 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
616 CPUState
*env
= cpu_single_env
;
618 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
619 target_ulong tb_start
, tb_end
;
620 target_ulong current_pc
, current_cs_base
;
622 p
= page_find(start
>> TARGET_PAGE_BITS
);
625 if (!p
->code_bitmap
&&
626 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
627 is_cpu_write_access
) {
628 /* build code bitmap */
629 build_page_bitmap(p
);
632 /* we remove all the TBs in the range [start, end[ */
633 /* XXX: see if in some cases it could be faster to invalidate all the code */
634 current_tb_not_found
= is_cpu_write_access
;
635 current_tb_modified
= 0;
636 current_tb
= NULL
; /* avoid warning */
637 current_pc
= 0; /* avoid warning */
638 current_cs_base
= 0; /* avoid warning */
639 current_flags
= 0; /* avoid warning */
643 tb
= (TranslationBlock
*)((long)tb
& ~3);
644 tb_next
= tb
->page_next
[n
];
645 /* NOTE: this is subtle as a TB may span two physical pages */
647 /* NOTE: tb_end may be after the end of the page, but
648 it is not a problem */
649 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
650 tb_end
= tb_start
+ tb
->size
;
652 tb_start
= tb
->page_addr
[1];
653 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
655 if (!(tb_end
<= start
|| tb_start
>= end
)) {
656 #ifdef TARGET_HAS_PRECISE_SMC
657 if (current_tb_not_found
) {
658 current_tb_not_found
= 0;
660 if (env
->mem_write_pc
) {
661 /* now we have a real cpu fault */
662 current_tb
= tb_find_pc(env
->mem_write_pc
);
665 if (current_tb
== tb
&&
666 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
667 /* If we are modifying the current TB, we must stop
668 its execution. We could be more precise by checking
669 that the modification is after the current PC, but it
670 would require a specialized function to partially
671 restore the CPU state */
673 current_tb_modified
= 1;
674 cpu_restore_state(current_tb
, env
,
675 env
->mem_write_pc
, NULL
);
676 #if defined(TARGET_I386)
677 current_flags
= env
->hflags
;
678 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
679 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
680 current_pc
= current_cs_base
+ env
->eip
;
682 #error unsupported CPU
685 #endif /* TARGET_HAS_PRECISE_SMC */
686 /* we need to do that to handle the case where a signal
687 occurs while doing tb_phys_invalidate() */
690 saved_tb
= env
->current_tb
;
691 env
->current_tb
= NULL
;
693 tb_phys_invalidate(tb
, -1);
695 env
->current_tb
= saved_tb
;
696 if (env
->interrupt_request
&& env
->current_tb
)
697 cpu_interrupt(env
, env
->interrupt_request
);
702 #if !defined(CONFIG_USER_ONLY)
703 /* if no code remaining, no need to continue to use slow writes */
705 invalidate_page_bitmap(p
);
706 if (is_cpu_write_access
) {
707 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
711 #ifdef TARGET_HAS_PRECISE_SMC
712 if (current_tb_modified
) {
713 /* we generate a block containing just the instruction
714 modifying the memory. It will ensure that it cannot modify
716 env
->current_tb
= NULL
;
717 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
719 cpu_resume_from_signal(env
, NULL
);
724 /* len must be <= 8 and start must be a multiple of len */
725 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
732 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
733 cpu_single_env
->mem_write_vaddr
, len
,
735 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
739 p
= page_find(start
>> TARGET_PAGE_BITS
);
742 if (p
->code_bitmap
) {
743 offset
= start
& ~TARGET_PAGE_MASK
;
744 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
745 if (b
& ((1 << len
) - 1))
749 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
753 #if !defined(CONFIG_SOFTMMU)
754 static void tb_invalidate_phys_page(target_ulong addr
,
755 unsigned long pc
, void *puc
)
757 int n
, current_flags
, current_tb_modified
;
758 target_ulong current_pc
, current_cs_base
;
760 TranslationBlock
*tb
, *current_tb
;
761 #ifdef TARGET_HAS_PRECISE_SMC
762 CPUState
*env
= cpu_single_env
;
765 addr
&= TARGET_PAGE_MASK
;
766 p
= page_find(addr
>> TARGET_PAGE_BITS
);
770 current_tb_modified
= 0;
772 current_pc
= 0; /* avoid warning */
773 current_cs_base
= 0; /* avoid warning */
774 current_flags
= 0; /* avoid warning */
775 #ifdef TARGET_HAS_PRECISE_SMC
777 current_tb
= tb_find_pc(pc
);
782 tb
= (TranslationBlock
*)((long)tb
& ~3);
783 #ifdef TARGET_HAS_PRECISE_SMC
784 if (current_tb
== tb
&&
785 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
786 /* If we are modifying the current TB, we must stop
787 its execution. We could be more precise by checking
788 that the modification is after the current PC, but it
789 would require a specialized function to partially
790 restore the CPU state */
792 current_tb_modified
= 1;
793 cpu_restore_state(current_tb
, env
, pc
, puc
);
794 #if defined(TARGET_I386)
795 current_flags
= env
->hflags
;
796 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
797 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
798 current_pc
= current_cs_base
+ env
->eip
;
800 #error unsupported CPU
803 #endif /* TARGET_HAS_PRECISE_SMC */
804 tb_phys_invalidate(tb
, addr
);
805 tb
= tb
->page_next
[n
];
808 #ifdef TARGET_HAS_PRECISE_SMC
809 if (current_tb_modified
) {
810 /* we generate a block containing just the instruction
811 modifying the memory. It will ensure that it cannot modify
813 env
->current_tb
= NULL
;
814 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
816 cpu_resume_from_signal(env
, puc
);
822 /* add the tb in the target page and protect it if necessary */
823 static inline void tb_alloc_page(TranslationBlock
*tb
,
824 unsigned int n
, target_ulong page_addr
)
827 TranslationBlock
*last_first_tb
;
829 tb
->page_addr
[n
] = page_addr
;
830 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
831 tb
->page_next
[n
] = p
->first_tb
;
832 last_first_tb
= p
->first_tb
;
833 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
834 invalidate_page_bitmap(p
);
836 #if defined(TARGET_HAS_SMC) || 1
838 #if defined(CONFIG_USER_ONLY)
839 if (p
->flags
& PAGE_WRITE
) {
844 /* force the host page as non writable (writes will have a
845 page fault + mprotect overhead) */
846 page_addr
&= qemu_host_page_mask
;
848 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
849 addr
+= TARGET_PAGE_SIZE
) {
851 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
855 p2
->flags
&= ~PAGE_WRITE
;
856 page_get_flags(addr
);
858 mprotect(g2h(page_addr
), qemu_host_page_size
,
859 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
860 #ifdef DEBUG_TB_INVALIDATE
861 printf("protecting code page: 0x%08lx\n",
866 /* if some code is already present, then the pages are already
867 protected. So we handle the case where only the first TB is
868 allocated in a physical page */
869 if (!last_first_tb
) {
870 tlb_protect_code(page_addr
);
874 #endif /* TARGET_HAS_SMC */
877 /* Allocate a new translation block. Flush the translation buffer if
878 too many translation blocks or too much generated code. */
879 TranslationBlock
*tb_alloc(target_ulong pc
)
881 TranslationBlock
*tb
;
883 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
884 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
892 /* add a new TB and link it to the physical page tables. phys_page2 is
893 (-1) to indicate that only one page contains the TB. */
894 void tb_link_phys(TranslationBlock
*tb
,
895 target_ulong phys_pc
, target_ulong phys_page2
)
898 TranslationBlock
**ptb
;
900 /* add in the physical hash table */
901 h
= tb_phys_hash_func(phys_pc
);
902 ptb
= &tb_phys_hash
[h
];
903 tb
->phys_hash_next
= *ptb
;
906 /* add in the page list */
907 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
908 if (phys_page2
!= -1)
909 tb_alloc_page(tb
, 1, phys_page2
);
911 tb
->page_addr
[1] = -1;
913 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
914 tb
->jmp_next
[0] = NULL
;
915 tb
->jmp_next
[1] = NULL
;
917 tb
->cflags
&= ~CF_FP_USED
;
918 if (tb
->cflags
& CF_TB_FP_USED
)
919 tb
->cflags
|= CF_FP_USED
;
922 /* init original jump addresses */
923 if (tb
->tb_next_offset
[0] != 0xffff)
924 tb_reset_jump(tb
, 0);
925 if (tb
->tb_next_offset
[1] != 0xffff)
926 tb_reset_jump(tb
, 1);
928 #ifdef DEBUG_TB_CHECK
933 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
934 tb[1].tc_ptr. Return NULL if not found */
935 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
939 TranslationBlock
*tb
;
943 if (tc_ptr
< (unsigned long)code_gen_buffer
||
944 tc_ptr
>= (unsigned long)code_gen_ptr
)
946 /* binary search (cf Knuth) */
949 while (m_min
<= m_max
) {
950 m
= (m_min
+ m_max
) >> 1;
952 v
= (unsigned long)tb
->tc_ptr
;
955 else if (tc_ptr
< v
) {
964 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
966 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
968 TranslationBlock
*tb1
, *tb_next
, **ptb
;
971 tb1
= tb
->jmp_next
[n
];
973 /* find head of list */
976 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
979 tb1
= tb1
->jmp_next
[n1
];
981 /* we are now sure now that tb jumps to tb1 */
984 /* remove tb from the jmp_first list */
985 ptb
= &tb_next
->jmp_first
;
989 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
990 if (n1
== n
&& tb1
== tb
)
992 ptb
= &tb1
->jmp_next
[n1
];
994 *ptb
= tb
->jmp_next
[n
];
995 tb
->jmp_next
[n
] = NULL
;
997 /* suppress the jump to next tb in generated code */
998 tb_reset_jump(tb
, n
);
1000 /* suppress jumps in the tb on which we could have jumped */
1001 tb_reset_jump_recursive(tb_next
);
1005 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1007 tb_reset_jump_recursive2(tb
, 0);
1008 tb_reset_jump_recursive2(tb
, 1);
1011 #if defined(TARGET_HAS_ICE)
1012 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1014 target_ulong addr
, pd
;
1015 ram_addr_t ram_addr
;
1018 addr
= cpu_get_phys_page_debug(env
, pc
);
1019 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1021 pd
= IO_MEM_UNASSIGNED
;
1023 pd
= p
->phys_offset
;
1025 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1026 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1030 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1031 breakpoint is reached */
1032 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1034 #if defined(TARGET_HAS_ICE)
1037 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1038 if (env
->breakpoints
[i
] == pc
)
1042 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1044 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1046 breakpoint_invalidate(env
, pc
);
1053 /* remove a breakpoint */
1054 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1056 #if defined(TARGET_HAS_ICE)
1058 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1059 if (env
->breakpoints
[i
] == pc
)
1064 env
->nb_breakpoints
--;
1065 if (i
< env
->nb_breakpoints
)
1066 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1068 breakpoint_invalidate(env
, pc
);
1075 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1076 CPU loop after each instruction */
1077 void cpu_single_step(CPUState
*env
, int enabled
)
1079 #if defined(TARGET_HAS_ICE)
1080 if (env
->singlestep_enabled
!= enabled
) {
1081 env
->singlestep_enabled
= enabled
;
1082 /* must flush all the translated code to avoid inconsistancies */
1083 /* XXX: only flush what is necessary */
1089 /* enable or disable low levels log */
1090 void cpu_set_log(int log_flags
)
1092 loglevel
= log_flags
;
1093 if (loglevel
&& !logfile
) {
1094 logfile
= fopen(logfilename
, "w");
1096 perror(logfilename
);
1099 #if !defined(CONFIG_SOFTMMU)
1100 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1102 static uint8_t logfile_buf
[4096];
1103 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1106 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1111 void cpu_set_log_filename(const char *filename
)
1113 logfilename
= strdup(filename
);
1116 /* mask must never be zero, except for A20 change call */
1117 void cpu_interrupt(CPUState
*env
, int mask
)
1119 TranslationBlock
*tb
;
1120 static int interrupt_lock
;
1122 env
->interrupt_request
|= mask
;
1123 /* if the cpu is currently executing code, we must unlink it and
1124 all the potentially executing TB */
1125 tb
= env
->current_tb
;
1126 if (tb
&& !testandset(&interrupt_lock
)) {
1127 env
->current_tb
= NULL
;
1128 tb_reset_jump_recursive(tb
);
1133 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1135 env
->interrupt_request
&= ~mask
;
1138 CPULogItem cpu_log_items
[] = {
1139 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1140 "show generated host assembly code for each compiled TB" },
1141 { CPU_LOG_TB_IN_ASM
, "in_asm",
1142 "show target assembly code for each compiled TB" },
1143 { CPU_LOG_TB_OP
, "op",
1144 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1146 { CPU_LOG_TB_OP_OPT
, "op_opt",
1147 "show micro ops after optimization for each compiled TB" },
1149 { CPU_LOG_INT
, "int",
1150 "show interrupts/exceptions in short format" },
1151 { CPU_LOG_EXEC
, "exec",
1152 "show trace before each executed TB (lots of logs)" },
1153 { CPU_LOG_TB_CPU
, "cpu",
1154 "show CPU state before bloc translation" },
1156 { CPU_LOG_PCALL
, "pcall",
1157 "show protected mode far calls/returns/exceptions" },
1160 { CPU_LOG_IOPORT
, "ioport",
1161 "show all i/o ports accesses" },
1166 static int cmp1(const char *s1
, int n
, const char *s2
)
1168 if (strlen(s2
) != n
)
1170 return memcmp(s1
, s2
, n
) == 0;
1173 /* takes a comma separated list of log masks. Return 0 if error. */
1174 int cpu_str_to_log_mask(const char *str
)
1183 p1
= strchr(p
, ',');
1186 if(cmp1(p
,p1
-p
,"all")) {
1187 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1191 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1192 if (cmp1(p
, p1
- p
, item
->name
))
1206 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1211 fprintf(stderr
, "qemu: fatal: ");
1212 vfprintf(stderr
, fmt
, ap
);
1213 fprintf(stderr
, "\n");
1215 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1217 cpu_dump_state(env
, stderr
, fprintf
, 0);
1223 #if !defined(CONFIG_USER_ONLY)
1225 /* NOTE: if flush_global is true, also flush global entries (not
1227 void tlb_flush(CPUState
*env
, int flush_global
)
1231 #if defined(DEBUG_TLB)
1232 printf("tlb_flush:\n");
1234 /* must reset current TB so that interrupts cannot modify the
1235 links while we are modifying them */
1236 env
->current_tb
= NULL
;
1238 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1239 env
->tlb_table
[0][i
].addr_read
= -1;
1240 env
->tlb_table
[0][i
].addr_write
= -1;
1241 env
->tlb_table
[0][i
].addr_code
= -1;
1242 env
->tlb_table
[1][i
].addr_read
= -1;
1243 env
->tlb_table
[1][i
].addr_write
= -1;
1244 env
->tlb_table
[1][i
].addr_code
= -1;
1247 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1249 #if !defined(CONFIG_SOFTMMU)
1250 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1253 if (env
->kqemu_enabled
) {
1254 kqemu_flush(env
, flush_global
);
1260 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1262 if (addr
== (tlb_entry
->addr_read
&
1263 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1264 addr
== (tlb_entry
->addr_write
&
1265 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1266 addr
== (tlb_entry
->addr_code
&
1267 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1268 tlb_entry
->addr_read
= -1;
1269 tlb_entry
->addr_write
= -1;
1270 tlb_entry
->addr_code
= -1;
1274 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1277 TranslationBlock
*tb
;
1279 #if defined(DEBUG_TLB)
1280 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1282 /* must reset current TB so that interrupts cannot modify the
1283 links while we are modifying them */
1284 env
->current_tb
= NULL
;
1286 addr
&= TARGET_PAGE_MASK
;
1287 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1288 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1289 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1291 for(i
= 0; i
< TB_JMP_CACHE_SIZE
; i
++) {
1292 tb
= env
->tb_jmp_cache
[i
];
1294 ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1295 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
)) {
1296 env
->tb_jmp_cache
[i
] = NULL
;
1300 #if !defined(CONFIG_SOFTMMU)
1301 if (addr
< MMAP_AREA_END
)
1302 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1305 if (env
->kqemu_enabled
) {
1306 kqemu_flush_page(env
, addr
);
1311 /* update the TLBs so that writes to code in the virtual page 'addr'
1313 static void tlb_protect_code(ram_addr_t ram_addr
)
1315 cpu_physical_memory_reset_dirty(ram_addr
,
1316 ram_addr
+ TARGET_PAGE_SIZE
,
1320 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1321 tested for self modifying code */
1322 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1325 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1328 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1329 unsigned long start
, unsigned long length
)
1332 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1333 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1334 if ((addr
- start
) < length
) {
1335 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1340 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1344 unsigned long length
, start1
;
1348 start
&= TARGET_PAGE_MASK
;
1349 end
= TARGET_PAGE_ALIGN(end
);
1351 length
= end
- start
;
1354 len
= length
>> TARGET_PAGE_BITS
;
1356 /* XXX: should not depend on cpu context */
1358 if (env
->kqemu_enabled
) {
1361 for(i
= 0; i
< len
; i
++) {
1362 kqemu_set_notdirty(env
, addr
);
1363 addr
+= TARGET_PAGE_SIZE
;
1367 mask
= ~dirty_flags
;
1368 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1369 for(i
= 0; i
< len
; i
++)
1372 /* we modify the TLB cache so that the dirty bit will be set again
1373 when accessing the range */
1374 start1
= start
+ (unsigned long)phys_ram_base
;
1375 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1376 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1377 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1378 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1379 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1382 #if !defined(CONFIG_SOFTMMU)
1383 /* XXX: this is expensive */
1389 for(i
= 0; i
< L1_SIZE
; i
++) {
1392 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1393 for(j
= 0; j
< L2_SIZE
; j
++) {
1394 if (p
->valid_tag
== virt_valid_tag
&&
1395 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1396 (p
->prot
& PROT_WRITE
)) {
1397 if (addr
< MMAP_AREA_END
) {
1398 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1399 p
->prot
& ~PROT_WRITE
);
1402 addr
+= TARGET_PAGE_SIZE
;
1411 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1413 ram_addr_t ram_addr
;
1415 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1416 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1417 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1418 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1419 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1424 /* update the TLB according to the current state of the dirty bits */
1425 void cpu_tlb_update_dirty(CPUState
*env
)
1428 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1429 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1430 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1431 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1434 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1435 unsigned long start
)
1438 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1439 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1440 if (addr
== start
) {
1441 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1446 /* update the TLB corresponding to virtual page vaddr and phys addr
1447 addr so that it is no longer dirty */
1448 static inline void tlb_set_dirty(CPUState
*env
,
1449 unsigned long addr
, target_ulong vaddr
)
1453 addr
&= TARGET_PAGE_MASK
;
1454 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1455 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1456 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1459 /* add a new TLB entry. At most one entry for a given virtual address
1460 is permitted. Return 0 if OK or 2 if the page could not be mapped
1461 (can only happen in non SOFTMMU mode for I/O pages or pages
1462 conflicting with the host address space). */
1463 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1464 target_phys_addr_t paddr
, int prot
,
1465 int is_user
, int is_softmmu
)
1470 target_ulong address
;
1471 target_phys_addr_t addend
;
1475 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1477 pd
= IO_MEM_UNASSIGNED
;
1479 pd
= p
->phys_offset
;
1481 #if defined(DEBUG_TLB)
1482 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1483 vaddr
, (int)paddr
, prot
, is_user
, is_softmmu
, pd
);
1487 #if !defined(CONFIG_SOFTMMU)
1491 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1492 /* IO memory case */
1493 address
= vaddr
| pd
;
1496 /* standard memory */
1498 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1501 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1503 te
= &env
->tlb_table
[is_user
][index
];
1504 te
->addend
= addend
;
1505 if (prot
& PAGE_READ
) {
1506 te
->addr_read
= address
;
1510 if (prot
& PAGE_EXEC
) {
1511 te
->addr_code
= address
;
1515 if (prot
& PAGE_WRITE
) {
1516 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1517 (pd
& IO_MEM_ROMD
)) {
1518 /* write access calls the I/O callback */
1519 te
->addr_write
= vaddr
|
1520 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1521 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1522 !cpu_physical_memory_is_dirty(pd
)) {
1523 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1525 te
->addr_write
= address
;
1528 te
->addr_write
= -1;
1531 #if !defined(CONFIG_SOFTMMU)
1533 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1534 /* IO access: no mapping is done as it will be handled by the
1536 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1541 if (vaddr
>= MMAP_AREA_END
) {
1544 if (prot
& PROT_WRITE
) {
1545 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1546 #if defined(TARGET_HAS_SMC) || 1
1549 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1550 !cpu_physical_memory_is_dirty(pd
))) {
1551 /* ROM: we do as if code was inside */
1552 /* if code is present, we only map as read only and save the
1556 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1559 vp
->valid_tag
= virt_valid_tag
;
1560 prot
&= ~PAGE_WRITE
;
1563 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1564 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1565 if (map_addr
== MAP_FAILED
) {
1566 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1576 /* called from signal handler: invalidate the code and unprotect the
1577 page. Return TRUE if the fault was succesfully handled. */
1578 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1580 #if !defined(CONFIG_SOFTMMU)
1583 #if defined(DEBUG_TLB)
1584 printf("page_unprotect: addr=0x%08x\n", addr
);
1586 addr
&= TARGET_PAGE_MASK
;
1588 /* if it is not mapped, no need to worry here */
1589 if (addr
>= MMAP_AREA_END
)
1591 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1594 /* NOTE: in this case, validate_tag is _not_ tested as it
1595 validates only the code TLB */
1596 if (vp
->valid_tag
!= virt_valid_tag
)
1598 if (!(vp
->prot
& PAGE_WRITE
))
1600 #if defined(DEBUG_TLB)
1601 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1602 addr
, vp
->phys_addr
, vp
->prot
);
1604 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1605 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1606 (unsigned long)addr
, vp
->prot
);
1607 /* set the dirty bit */
1608 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1609 /* flush the code inside */
1610 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1619 void tlb_flush(CPUState
*env
, int flush_global
)
1623 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1627 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1628 target_phys_addr_t paddr
, int prot
,
1629 int is_user
, int is_softmmu
)
1634 /* dump memory mappings */
1635 void page_dump(FILE *f
)
1637 unsigned long start
, end
;
1638 int i
, j
, prot
, prot1
;
1641 fprintf(f
, "%-8s %-8s %-8s %s\n",
1642 "start", "end", "size", "prot");
1646 for(i
= 0; i
<= L1_SIZE
; i
++) {
1651 for(j
= 0;j
< L2_SIZE
; j
++) {
1656 if (prot1
!= prot
) {
1657 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1659 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1660 start
, end
, end
- start
,
1661 prot
& PAGE_READ
? 'r' : '-',
1662 prot
& PAGE_WRITE
? 'w' : '-',
1663 prot
& PAGE_EXEC
? 'x' : '-');
1677 int page_get_flags(target_ulong address
)
1681 p
= page_find(address
>> TARGET_PAGE_BITS
);
1687 /* modify the flags of a page and invalidate the code if
1688 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1689 depending on PAGE_WRITE */
1690 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1695 start
= start
& TARGET_PAGE_MASK
;
1696 end
= TARGET_PAGE_ALIGN(end
);
1697 if (flags
& PAGE_WRITE
)
1698 flags
|= PAGE_WRITE_ORG
;
1699 spin_lock(&tb_lock
);
1700 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1701 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1702 /* if the write protection is set, then we invalidate the code
1704 if (!(p
->flags
& PAGE_WRITE
) &&
1705 (flags
& PAGE_WRITE
) &&
1707 tb_invalidate_phys_page(addr
, 0, NULL
);
1711 spin_unlock(&tb_lock
);
1714 /* called from signal handler: invalidate the code and unprotect the
1715 page. Return TRUE if the fault was succesfully handled. */
1716 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1718 unsigned int page_index
, prot
, pindex
;
1720 target_ulong host_start
, host_end
, addr
;
1722 host_start
= address
& qemu_host_page_mask
;
1723 page_index
= host_start
>> TARGET_PAGE_BITS
;
1724 p1
= page_find(page_index
);
1727 host_end
= host_start
+ qemu_host_page_size
;
1730 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1734 /* if the page was really writable, then we change its
1735 protection back to writable */
1736 if (prot
& PAGE_WRITE_ORG
) {
1737 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1738 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1739 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1740 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1741 p1
[pindex
].flags
|= PAGE_WRITE
;
1742 /* and since the content will be modified, we must invalidate
1743 the corresponding translated code. */
1744 tb_invalidate_phys_page(address
, pc
, puc
);
1745 #ifdef DEBUG_TB_CHECK
1746 tb_invalidate_check(address
);
1754 /* call this function when system calls directly modify a memory area */
1755 /* ??? This should be redundant now we have lock_user. */
1756 void page_unprotect_range(target_ulong data
, target_ulong data_size
)
1758 target_ulong start
, end
, addr
;
1761 end
= start
+ data_size
;
1762 start
&= TARGET_PAGE_MASK
;
1763 end
= TARGET_PAGE_ALIGN(end
);
1764 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1765 page_unprotect(addr
, 0, NULL
);
1769 static inline void tlb_set_dirty(CPUState
*env
,
1770 unsigned long addr
, target_ulong vaddr
)
1773 #endif /* defined(CONFIG_USER_ONLY) */
1775 /* register physical memory. 'size' must be a multiple of the target
1776 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1778 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1780 unsigned long phys_offset
)
1782 target_phys_addr_t addr
, end_addr
;
1786 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1787 end_addr
= start_addr
+ size
;
1788 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1789 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1790 p
->phys_offset
= phys_offset
;
1791 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
1792 (phys_offset
& IO_MEM_ROMD
))
1793 phys_offset
+= TARGET_PAGE_SIZE
;
1796 /* since each CPU stores ram addresses in its TLB cache, we must
1797 reset the modified entries */
1799 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1804 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1809 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1813 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1814 unassigned_mem_readb
,
1815 unassigned_mem_readb
,
1816 unassigned_mem_readb
,
1819 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1820 unassigned_mem_writeb
,
1821 unassigned_mem_writeb
,
1822 unassigned_mem_writeb
,
1825 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1827 unsigned long ram_addr
;
1829 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1830 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1831 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1832 #if !defined(CONFIG_USER_ONLY)
1833 tb_invalidate_phys_page_fast(ram_addr
, 1);
1834 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1837 stb_p((uint8_t *)(long)addr
, val
);
1839 if (cpu_single_env
->kqemu_enabled
&&
1840 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1841 kqemu_modify_page(cpu_single_env
, ram_addr
);
1843 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1844 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1845 /* we remove the notdirty callback only if the code has been
1847 if (dirty_flags
== 0xff)
1848 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1851 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1853 unsigned long ram_addr
;
1855 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1856 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1857 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1858 #if !defined(CONFIG_USER_ONLY)
1859 tb_invalidate_phys_page_fast(ram_addr
, 2);
1860 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1863 stw_p((uint8_t *)(long)addr
, val
);
1865 if (cpu_single_env
->kqemu_enabled
&&
1866 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1867 kqemu_modify_page(cpu_single_env
, ram_addr
);
1869 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1870 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1871 /* we remove the notdirty callback only if the code has been
1873 if (dirty_flags
== 0xff)
1874 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1877 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1879 unsigned long ram_addr
;
1881 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1882 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1883 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1884 #if !defined(CONFIG_USER_ONLY)
1885 tb_invalidate_phys_page_fast(ram_addr
, 4);
1886 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1889 stl_p((uint8_t *)(long)addr
, val
);
1891 if (cpu_single_env
->kqemu_enabled
&&
1892 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1893 kqemu_modify_page(cpu_single_env
, ram_addr
);
1895 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1896 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1897 /* we remove the notdirty callback only if the code has been
1899 if (dirty_flags
== 0xff)
1900 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1903 static CPUReadMemoryFunc
*error_mem_read
[3] = {
1904 NULL
, /* never used */
1905 NULL
, /* never used */
1906 NULL
, /* never used */
1909 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1910 notdirty_mem_writeb
,
1911 notdirty_mem_writew
,
1912 notdirty_mem_writel
,
1915 static void io_mem_init(void)
1917 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
1918 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
1919 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
1922 /* alloc dirty bits array */
1923 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1924 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
1927 /* mem_read and mem_write are arrays of functions containing the
1928 function to access byte (index 0), word (index 1) and dword (index
1929 2). All functions must be supplied. If io_index is non zero, the
1930 corresponding io zone is modified. If it is zero, a new io zone is
1931 allocated. The return value can be used with
1932 cpu_register_physical_memory(). (-1) is returned if error. */
1933 int cpu_register_io_memory(int io_index
,
1934 CPUReadMemoryFunc
**mem_read
,
1935 CPUWriteMemoryFunc
**mem_write
,
1940 if (io_index
<= 0) {
1941 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
1943 io_index
= io_mem_nb
++;
1945 if (io_index
>= IO_MEM_NB_ENTRIES
)
1949 for(i
= 0;i
< 3; i
++) {
1950 io_mem_read
[io_index
][i
] = mem_read
[i
];
1951 io_mem_write
[io_index
][i
] = mem_write
[i
];
1953 io_mem_opaque
[io_index
] = opaque
;
1954 return io_index
<< IO_MEM_SHIFT
;
1957 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
1959 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
1962 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
1964 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
1967 /* physical memory access (slow version, mainly for debug) */
1968 #if defined(CONFIG_USER_ONLY)
1969 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
1970 int len
, int is_write
)
1977 page
= addr
& TARGET_PAGE_MASK
;
1978 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1981 flags
= page_get_flags(page
);
1982 if (!(flags
& PAGE_VALID
))
1985 if (!(flags
& PAGE_WRITE
))
1987 p
= lock_user(addr
, len
, 0);
1988 memcpy(p
, buf
, len
);
1989 unlock_user(p
, addr
, len
);
1991 if (!(flags
& PAGE_READ
))
1993 p
= lock_user(addr
, len
, 1);
1994 memcpy(buf
, p
, len
);
1995 unlock_user(p
, addr
, 0);
2004 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2005 int len
, int is_write
)
2010 target_phys_addr_t page
;
2015 page
= addr
& TARGET_PAGE_MASK
;
2016 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2019 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2021 pd
= IO_MEM_UNASSIGNED
;
2023 pd
= p
->phys_offset
;
2027 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2028 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2029 /* XXX: could force cpu_single_env to NULL to avoid
2031 if (l
>= 4 && ((addr
& 3) == 0)) {
2032 /* 32 bit write access */
2034 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2036 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2037 /* 16 bit write access */
2039 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2042 /* 8 bit write access */
2044 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2048 unsigned long addr1
;
2049 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2051 ptr
= phys_ram_base
+ addr1
;
2052 memcpy(ptr
, buf
, l
);
2053 if (!cpu_physical_memory_is_dirty(addr1
)) {
2054 /* invalidate code */
2055 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2057 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2058 (0xff & ~CODE_DIRTY_FLAG
);
2062 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2063 !(pd
& IO_MEM_ROMD
)) {
2065 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2066 if (l
>= 4 && ((addr
& 3) == 0)) {
2067 /* 32 bit read access */
2068 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2071 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2072 /* 16 bit read access */
2073 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2077 /* 8 bit read access */
2078 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2084 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2085 (addr
& ~TARGET_PAGE_MASK
);
2086 memcpy(buf
, ptr
, l
);
2095 /* used for ROM loading : can write in RAM and ROM */
2096 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2097 const uint8_t *buf
, int len
)
2101 target_phys_addr_t page
;
2106 page
= addr
& TARGET_PAGE_MASK
;
2107 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2110 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2112 pd
= IO_MEM_UNASSIGNED
;
2114 pd
= p
->phys_offset
;
2117 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2118 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2119 !(pd
& IO_MEM_ROMD
)) {
2122 unsigned long addr1
;
2123 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2125 ptr
= phys_ram_base
+ addr1
;
2126 memcpy(ptr
, buf
, l
);
2135 /* warning: addr must be aligned */
2136 uint32_t ldl_phys(target_phys_addr_t addr
)
2144 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2146 pd
= IO_MEM_UNASSIGNED
;
2148 pd
= p
->phys_offset
;
2151 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2152 !(pd
& IO_MEM_ROMD
)) {
2154 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2155 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2158 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2159 (addr
& ~TARGET_PAGE_MASK
);
2165 /* warning: addr must be aligned */
2166 uint64_t ldq_phys(target_phys_addr_t addr
)
2174 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2176 pd
= IO_MEM_UNASSIGNED
;
2178 pd
= p
->phys_offset
;
2181 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2182 !(pd
& IO_MEM_ROMD
)) {
2184 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2185 #ifdef TARGET_WORDS_BIGENDIAN
2186 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2187 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2189 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2190 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2194 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2195 (addr
& ~TARGET_PAGE_MASK
);
2202 uint32_t ldub_phys(target_phys_addr_t addr
)
2205 cpu_physical_memory_read(addr
, &val
, 1);
2210 uint32_t lduw_phys(target_phys_addr_t addr
)
2213 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2214 return tswap16(val
);
2217 /* warning: addr must be aligned. The ram page is not masked as dirty
2218 and the code inside is not invalidated. It is useful if the dirty
2219 bits are used to track modified PTEs */
2220 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2227 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2229 pd
= IO_MEM_UNASSIGNED
;
2231 pd
= p
->phys_offset
;
2234 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2235 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2236 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2238 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2239 (addr
& ~TARGET_PAGE_MASK
);
2244 /* warning: addr must be aligned */
2245 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2252 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2254 pd
= IO_MEM_UNASSIGNED
;
2256 pd
= p
->phys_offset
;
2259 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2260 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2261 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2263 unsigned long addr1
;
2264 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2266 ptr
= phys_ram_base
+ addr1
;
2268 if (!cpu_physical_memory_is_dirty(addr1
)) {
2269 /* invalidate code */
2270 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2272 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2273 (0xff & ~CODE_DIRTY_FLAG
);
2279 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2282 cpu_physical_memory_write(addr
, &v
, 1);
2286 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2288 uint16_t v
= tswap16(val
);
2289 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2293 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2296 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2301 /* virtual memory access for debug */
2302 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2303 uint8_t *buf
, int len
, int is_write
)
2306 target_ulong page
, phys_addr
;
2309 page
= addr
& TARGET_PAGE_MASK
;
2310 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2311 /* if no physical page mapped, return an error */
2312 if (phys_addr
== -1)
2314 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2317 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2326 void dump_exec_info(FILE *f
,
2327 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2329 int i
, target_code_size
, max_target_code_size
;
2330 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2331 TranslationBlock
*tb
;
2333 target_code_size
= 0;
2334 max_target_code_size
= 0;
2336 direct_jmp_count
= 0;
2337 direct_jmp2_count
= 0;
2338 for(i
= 0; i
< nb_tbs
; i
++) {
2340 target_code_size
+= tb
->size
;
2341 if (tb
->size
> max_target_code_size
)
2342 max_target_code_size
= tb
->size
;
2343 if (tb
->page_addr
[1] != -1)
2345 if (tb
->tb_next_offset
[0] != 0xffff) {
2347 if (tb
->tb_next_offset
[1] != 0xffff) {
2348 direct_jmp2_count
++;
2352 /* XXX: avoid using doubles ? */
2353 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2354 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2355 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2356 max_target_code_size
);
2357 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2358 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2359 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2360 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2362 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2363 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2365 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2367 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2368 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2369 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2370 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2373 #if !defined(CONFIG_USER_ONLY)
2375 #define MMUSUFFIX _cmmu
2376 #define GETPC() NULL
2377 #define env cpu_single_env
2378 #define SOFTMMU_CODE_ACCESS
2381 #include "softmmu_template.h"
2384 #include "softmmu_template.h"
2387 #include "softmmu_template.h"
2390 #include "softmmu_template.h"