2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 #if !defined(CONFIG_USER_ONLY)
51 /* TB consistency checks only implemented for usermode emulation. */
55 /* threshold to flush the translated code buffer */
56 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
58 #define SMC_BITMAP_USE_THRESHOLD 10
60 #define MMAP_AREA_START 0x00000000
61 #define MMAP_AREA_END 0xa8000000
63 #if defined(TARGET_SPARC64)
64 #define TARGET_PHYS_ADDR_SPACE_BITS 41
65 #elif defined(TARGET_PPC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 42
68 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
69 #define TARGET_PHYS_ADDR_SPACE_BITS 32
73 extern int kvm_allowed
;
76 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
77 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
79 /* any access to the tbs or the page table must use this lock */
80 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
82 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
83 uint8_t *code_gen_ptr
;
87 uint8_t *phys_ram_base
;
88 uint8_t *phys_ram_dirty
;
90 static int in_migration
;
93 /* current CPU in the current thread. It is only valid inside
95 CPUState
*cpu_single_env
;
97 typedef struct PageDesc
{
98 /* list of TBs intersecting this ram page */
99 TranslationBlock
*first_tb
;
100 /* in order to optimize self modifying code, we count the number
101 of lookups we do to a given page to use a bitmap */
102 unsigned int code_write_count
;
103 uint8_t *code_bitmap
;
104 #if defined(CONFIG_USER_ONLY)
109 typedef struct PhysPageDesc
{
110 /* offset in host memory of the page + io_index in the low 12 bits */
111 uint32_t phys_offset
;
115 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
117 #define L1_SIZE (1 << L1_BITS)
118 #define L2_SIZE (1 << L2_BITS)
120 static void io_mem_init(void);
122 unsigned long qemu_real_host_page_size
;
123 unsigned long qemu_host_page_bits
;
124 unsigned long qemu_host_page_size
;
125 unsigned long qemu_host_page_mask
;
127 /* XXX: for system emulation, it could just be an array */
128 static PageDesc
*l1_map
[L1_SIZE
];
129 PhysPageDesc
**l1_phys_map
;
131 /* io memory support */
132 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
133 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
134 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
135 static int io_mem_nb
;
138 char *logfilename
= "/tmp/qemu.log";
143 static int tlb_flush_count
;
144 static int tb_flush_count
;
145 static int tb_phys_invalidate_count
;
147 static void page_init(void)
149 /* NOTE: we can always suppose that qemu_host_page_size >=
153 SYSTEM_INFO system_info
;
156 GetSystemInfo(&system_info
);
157 qemu_real_host_page_size
= system_info
.dwPageSize
;
159 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
160 PAGE_EXECUTE_READWRITE
, &old_protect
);
163 qemu_real_host_page_size
= getpagesize();
165 unsigned long start
, end
;
167 start
= (unsigned long)code_gen_buffer
;
168 start
&= ~(qemu_real_host_page_size
- 1);
170 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
171 end
+= qemu_real_host_page_size
- 1;
172 end
&= ~(qemu_real_host_page_size
- 1);
174 mprotect((void *)start
, end
- start
,
175 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
179 if (qemu_host_page_size
== 0)
180 qemu_host_page_size
= qemu_real_host_page_size
;
181 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
182 qemu_host_page_size
= TARGET_PAGE_SIZE
;
183 qemu_host_page_bits
= 0;
184 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
185 qemu_host_page_bits
++;
186 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
187 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
188 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
191 static inline PageDesc
*page_find_alloc(unsigned int index
)
195 lp
= &l1_map
[index
>> L2_BITS
];
198 /* allocate if not found */
199 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
200 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
203 return p
+ (index
& (L2_SIZE
- 1));
206 static inline PageDesc
*page_find(unsigned int index
)
210 p
= l1_map
[index
>> L2_BITS
];
213 return p
+ (index
& (L2_SIZE
- 1));
216 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
221 p
= (void **)l1_phys_map
;
222 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
224 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
225 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
227 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
230 /* allocate if not found */
233 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
234 memset(p
, 0, sizeof(void *) * L1_SIZE
);
238 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
242 /* allocate if not found */
245 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
247 for (i
= 0; i
< L2_SIZE
; i
++)
248 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
250 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
253 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
255 return phys_page_find_alloc(index
, 0);
258 #if !defined(CONFIG_USER_ONLY)
259 static void tlb_protect_code(ram_addr_t ram_addr
);
260 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
264 void cpu_exec_init(CPUState
*env
)
270 code_gen_ptr
= code_gen_buffer
;
274 env
->next_cpu
= NULL
;
277 while (*penv
!= NULL
) {
278 penv
= (CPUState
**)&(*penv
)->next_cpu
;
281 env
->cpu_index
= cpu_index
;
285 static inline void invalidate_page_bitmap(PageDesc
*p
)
287 if (p
->code_bitmap
) {
288 qemu_free(p
->code_bitmap
);
289 p
->code_bitmap
= NULL
;
291 p
->code_write_count
= 0;
294 /* set to NULL all the 'first_tb' fields in all PageDescs */
295 static void page_flush_tb(void)
300 for(i
= 0; i
< L1_SIZE
; i
++) {
303 for(j
= 0; j
< L2_SIZE
; j
++) {
305 invalidate_page_bitmap(p
);
312 /* flush all the translation blocks */
313 /* XXX: tb_flush is currently not thread safe */
314 void tb_flush(CPUState
*env1
)
317 #if defined(DEBUG_FLUSH)
318 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
319 code_gen_ptr
- code_gen_buffer
,
321 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
325 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
326 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
329 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
332 code_gen_ptr
= code_gen_buffer
;
333 /* XXX: flush processor icache at this point if cache flush is
338 #ifdef DEBUG_TB_CHECK
340 static void tb_invalidate_check(unsigned long address
)
342 TranslationBlock
*tb
;
344 address
&= TARGET_PAGE_MASK
;
345 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
346 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
347 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
348 address
>= tb
->pc
+ tb
->size
)) {
349 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
350 address
, (long)tb
->pc
, tb
->size
);
356 /* verify that all the pages have correct rights for code */
357 static void tb_page_check(void)
359 TranslationBlock
*tb
;
360 int i
, flags1
, flags2
;
362 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
363 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
364 flags1
= page_get_flags(tb
->pc
);
365 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
366 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
367 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
368 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
374 void tb_jmp_check(TranslationBlock
*tb
)
376 TranslationBlock
*tb1
;
379 /* suppress any remaining jumps to this TB */
383 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
386 tb1
= tb1
->jmp_next
[n1
];
388 /* check end of list */
390 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
396 /* invalidate one TB */
397 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
400 TranslationBlock
*tb1
;
404 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
407 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
411 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
413 TranslationBlock
*tb1
;
419 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
421 *ptb
= tb1
->page_next
[n1
];
424 ptb
= &tb1
->page_next
[n1
];
428 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
430 TranslationBlock
*tb1
, **ptb
;
433 ptb
= &tb
->jmp_next
[n
];
436 /* find tb(n) in circular list */
440 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
441 if (n1
== n
&& tb1
== tb
)
444 ptb
= &tb1
->jmp_first
;
446 ptb
= &tb1
->jmp_next
[n1
];
449 /* now we can suppress tb(n) from the list */
450 *ptb
= tb
->jmp_next
[n
];
452 tb
->jmp_next
[n
] = NULL
;
456 /* reset the jump entry 'n' of a TB so that it is not chained to
458 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
460 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
463 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
468 target_ulong phys_pc
;
469 TranslationBlock
*tb1
, *tb2
;
471 /* remove the TB from the hash list */
472 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
473 h
= tb_phys_hash_func(phys_pc
);
474 tb_remove(&tb_phys_hash
[h
], tb
,
475 offsetof(TranslationBlock
, phys_hash_next
));
477 /* remove the TB from the page list */
478 if (tb
->page_addr
[0] != page_addr
) {
479 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
480 tb_page_remove(&p
->first_tb
, tb
);
481 invalidate_page_bitmap(p
);
483 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
484 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
485 tb_page_remove(&p
->first_tb
, tb
);
486 invalidate_page_bitmap(p
);
489 tb_invalidated_flag
= 1;
491 /* remove the TB from the hash list */
492 h
= tb_jmp_cache_hash_func(tb
->pc
);
493 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
494 if (env
->tb_jmp_cache
[h
] == tb
)
495 env
->tb_jmp_cache
[h
] = NULL
;
498 /* suppress this TB from the two jump lists */
499 tb_jmp_remove(tb
, 0);
500 tb_jmp_remove(tb
, 1);
502 /* suppress any remaining jumps to this TB */
508 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
509 tb2
= tb1
->jmp_next
[n1
];
510 tb_reset_jump(tb1
, n1
);
511 tb1
->jmp_next
[n1
] = NULL
;
514 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
516 tb_phys_invalidate_count
++;
519 static inline void set_bits(uint8_t *tab
, int start
, int len
)
525 mask
= 0xff << (start
& 7);
526 if ((start
& ~7) == (end
& ~7)) {
528 mask
&= ~(0xff << (end
& 7));
533 start
= (start
+ 8) & ~7;
535 while (start
< end1
) {
540 mask
= ~(0xff << (end
& 7));
546 static void build_page_bitmap(PageDesc
*p
)
548 int n
, tb_start
, tb_end
;
549 TranslationBlock
*tb
;
551 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
554 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
559 tb
= (TranslationBlock
*)((long)tb
& ~3);
560 /* NOTE: this is subtle as a TB may span two physical pages */
562 /* NOTE: tb_end may be after the end of the page, but
563 it is not a problem */
564 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
565 tb_end
= tb_start
+ tb
->size
;
566 if (tb_end
> TARGET_PAGE_SIZE
)
567 tb_end
= TARGET_PAGE_SIZE
;
570 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
572 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
573 tb
= tb
->page_next
[n
];
577 #ifdef TARGET_HAS_PRECISE_SMC
579 static void tb_gen_code(CPUState
*env
,
580 target_ulong pc
, target_ulong cs_base
, int flags
,
583 TranslationBlock
*tb
;
585 target_ulong phys_pc
, phys_page2
, virt_page2
;
588 phys_pc
= get_phys_addr_code(env
, pc
);
591 /* flush must be done */
593 /* cannot fail at this point */
596 tc_ptr
= code_gen_ptr
;
598 tb
->cs_base
= cs_base
;
601 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
602 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
604 /* check next page if needed */
605 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
607 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
608 phys_page2
= get_phys_addr_code(env
, virt_page2
);
610 tb_link_phys(tb
, phys_pc
, phys_page2
);
614 /* invalidate all TBs which intersect with the target physical page
615 starting in range [start;end[. NOTE: start and end must refer to
616 the same physical page. 'is_cpu_write_access' should be true if called
617 from a real cpu write access: the virtual CPU will exit the current
618 TB if code is modified inside this TB. */
619 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
620 int is_cpu_write_access
)
622 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
623 CPUState
*env
= cpu_single_env
;
625 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
626 target_ulong tb_start
, tb_end
;
627 target_ulong current_pc
, current_cs_base
;
629 p
= page_find(start
>> TARGET_PAGE_BITS
);
632 if (!p
->code_bitmap
&&
633 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
634 is_cpu_write_access
) {
635 /* build code bitmap */
636 build_page_bitmap(p
);
639 /* we remove all the TBs in the range [start, end[ */
640 /* XXX: see if in some cases it could be faster to invalidate all the code */
641 current_tb_not_found
= is_cpu_write_access
;
642 current_tb_modified
= 0;
643 current_tb
= NULL
; /* avoid warning */
644 current_pc
= 0; /* avoid warning */
645 current_cs_base
= 0; /* avoid warning */
646 current_flags
= 0; /* avoid warning */
650 tb
= (TranslationBlock
*)((long)tb
& ~3);
651 tb_next
= tb
->page_next
[n
];
652 /* NOTE: this is subtle as a TB may span two physical pages */
654 /* NOTE: tb_end may be after the end of the page, but
655 it is not a problem */
656 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
657 tb_end
= tb_start
+ tb
->size
;
659 tb_start
= tb
->page_addr
[1];
660 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
662 if (!(tb_end
<= start
|| tb_start
>= end
)) {
663 #ifdef TARGET_HAS_PRECISE_SMC
664 if (current_tb_not_found
) {
665 current_tb_not_found
= 0;
667 if (env
->mem_write_pc
) {
668 /* now we have a real cpu fault */
669 current_tb
= tb_find_pc(env
->mem_write_pc
);
672 if (current_tb
== tb
&&
673 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
674 /* If we are modifying the current TB, we must stop
675 its execution. We could be more precise by checking
676 that the modification is after the current PC, but it
677 would require a specialized function to partially
678 restore the CPU state */
680 current_tb_modified
= 1;
681 cpu_restore_state(current_tb
, env
,
682 env
->mem_write_pc
, NULL
);
683 #if defined(TARGET_I386)
684 current_flags
= env
->hflags
;
685 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
686 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
687 current_pc
= current_cs_base
+ env
->eip
;
689 #error unsupported CPU
692 #endif /* TARGET_HAS_PRECISE_SMC */
693 /* we need to do that to handle the case where a signal
694 occurs while doing tb_phys_invalidate() */
697 saved_tb
= env
->current_tb
;
698 env
->current_tb
= NULL
;
700 tb_phys_invalidate(tb
, -1);
702 env
->current_tb
= saved_tb
;
703 if (env
->interrupt_request
&& env
->current_tb
)
704 cpu_interrupt(env
, env
->interrupt_request
);
709 #if !defined(CONFIG_USER_ONLY)
710 /* if no code remaining, no need to continue to use slow writes */
712 invalidate_page_bitmap(p
);
713 if (is_cpu_write_access
) {
714 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
718 #ifdef TARGET_HAS_PRECISE_SMC
719 if (current_tb_modified
) {
720 /* we generate a block containing just the instruction
721 modifying the memory. It will ensure that it cannot modify
723 env
->current_tb
= NULL
;
724 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
726 cpu_resume_from_signal(env
, NULL
);
731 /* len must be <= 8 and start must be a multiple of len */
732 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
739 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
740 cpu_single_env
->mem_write_vaddr
, len
,
742 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
746 p
= page_find(start
>> TARGET_PAGE_BITS
);
749 if (p
->code_bitmap
) {
750 offset
= start
& ~TARGET_PAGE_MASK
;
751 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
752 if (b
& ((1 << len
) - 1))
756 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
760 #if !defined(CONFIG_SOFTMMU)
761 static void tb_invalidate_phys_page(target_ulong addr
,
762 unsigned long pc
, void *puc
)
764 int n
, current_flags
, current_tb_modified
;
765 target_ulong current_pc
, current_cs_base
;
767 TranslationBlock
*tb
, *current_tb
;
768 #ifdef TARGET_HAS_PRECISE_SMC
769 CPUState
*env
= cpu_single_env
;
772 addr
&= TARGET_PAGE_MASK
;
773 p
= page_find(addr
>> TARGET_PAGE_BITS
);
777 current_tb_modified
= 0;
779 current_pc
= 0; /* avoid warning */
780 current_cs_base
= 0; /* avoid warning */
781 current_flags
= 0; /* avoid warning */
782 #ifdef TARGET_HAS_PRECISE_SMC
784 current_tb
= tb_find_pc(pc
);
789 tb
= (TranslationBlock
*)((long)tb
& ~3);
790 #ifdef TARGET_HAS_PRECISE_SMC
791 if (current_tb
== tb
&&
792 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
793 /* If we are modifying the current TB, we must stop
794 its execution. We could be more precise by checking
795 that the modification is after the current PC, but it
796 would require a specialized function to partially
797 restore the CPU state */
799 current_tb_modified
= 1;
800 cpu_restore_state(current_tb
, env
, pc
, puc
);
801 #if defined(TARGET_I386)
802 current_flags
= env
->hflags
;
803 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
804 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
805 current_pc
= current_cs_base
+ env
->eip
;
807 #error unsupported CPU
810 #endif /* TARGET_HAS_PRECISE_SMC */
811 tb_phys_invalidate(tb
, addr
);
812 tb
= tb
->page_next
[n
];
815 #ifdef TARGET_HAS_PRECISE_SMC
816 if (current_tb_modified
) {
817 /* we generate a block containing just the instruction
818 modifying the memory. It will ensure that it cannot modify
820 env
->current_tb
= NULL
;
821 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
823 cpu_resume_from_signal(env
, puc
);
829 /* add the tb in the target page and protect it if necessary */
830 static inline void tb_alloc_page(TranslationBlock
*tb
,
831 unsigned int n
, target_ulong page_addr
)
834 TranslationBlock
*last_first_tb
;
836 tb
->page_addr
[n
] = page_addr
;
837 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
838 tb
->page_next
[n
] = p
->first_tb
;
839 last_first_tb
= p
->first_tb
;
840 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
841 invalidate_page_bitmap(p
);
843 #if defined(TARGET_HAS_SMC) || 1
845 #if defined(CONFIG_USER_ONLY)
846 if (p
->flags
& PAGE_WRITE
) {
851 /* force the host page as non writable (writes will have a
852 page fault + mprotect overhead) */
853 page_addr
&= qemu_host_page_mask
;
855 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
856 addr
+= TARGET_PAGE_SIZE
) {
858 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
862 p2
->flags
&= ~PAGE_WRITE
;
863 page_get_flags(addr
);
865 mprotect(g2h(page_addr
), qemu_host_page_size
,
866 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
867 #ifdef DEBUG_TB_INVALIDATE
868 printf("protecting code page: 0x%08lx\n",
873 /* if some code is already present, then the pages are already
874 protected. So we handle the case where only the first TB is
875 allocated in a physical page */
876 if (!last_first_tb
) {
877 tlb_protect_code(page_addr
);
881 #endif /* TARGET_HAS_SMC */
884 /* Allocate a new translation block. Flush the translation buffer if
885 too many translation blocks or too much generated code. */
886 TranslationBlock
*tb_alloc(target_ulong pc
)
888 TranslationBlock
*tb
;
890 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
891 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
899 /* add a new TB and link it to the physical page tables. phys_page2 is
900 (-1) to indicate that only one page contains the TB. */
901 void tb_link_phys(TranslationBlock
*tb
,
902 target_ulong phys_pc
, target_ulong phys_page2
)
905 TranslationBlock
**ptb
;
907 /* add in the physical hash table */
908 h
= tb_phys_hash_func(phys_pc
);
909 ptb
= &tb_phys_hash
[h
];
910 tb
->phys_hash_next
= *ptb
;
913 /* add in the page list */
914 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
915 if (phys_page2
!= -1)
916 tb_alloc_page(tb
, 1, phys_page2
);
918 tb
->page_addr
[1] = -1;
920 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
921 tb
->jmp_next
[0] = NULL
;
922 tb
->jmp_next
[1] = NULL
;
924 tb
->cflags
&= ~CF_FP_USED
;
925 if (tb
->cflags
& CF_TB_FP_USED
)
926 tb
->cflags
|= CF_FP_USED
;
929 /* init original jump addresses */
930 if (tb
->tb_next_offset
[0] != 0xffff)
931 tb_reset_jump(tb
, 0);
932 if (tb
->tb_next_offset
[1] != 0xffff)
933 tb_reset_jump(tb
, 1);
935 #ifdef DEBUG_TB_CHECK
940 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
941 tb[1].tc_ptr. Return NULL if not found */
942 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
946 TranslationBlock
*tb
;
950 if (tc_ptr
< (unsigned long)code_gen_buffer
||
951 tc_ptr
>= (unsigned long)code_gen_ptr
)
953 /* binary search (cf Knuth) */
956 while (m_min
<= m_max
) {
957 m
= (m_min
+ m_max
) >> 1;
959 v
= (unsigned long)tb
->tc_ptr
;
962 else if (tc_ptr
< v
) {
971 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
973 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
975 TranslationBlock
*tb1
, *tb_next
, **ptb
;
978 tb1
= tb
->jmp_next
[n
];
980 /* find head of list */
983 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
986 tb1
= tb1
->jmp_next
[n1
];
988 /* we are now sure now that tb jumps to tb1 */
991 /* remove tb from the jmp_first list */
992 ptb
= &tb_next
->jmp_first
;
996 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
997 if (n1
== n
&& tb1
== tb
)
999 ptb
= &tb1
->jmp_next
[n1
];
1001 *ptb
= tb
->jmp_next
[n
];
1002 tb
->jmp_next
[n
] = NULL
;
1004 /* suppress the jump to next tb in generated code */
1005 tb_reset_jump(tb
, n
);
1007 /* suppress jumps in the tb on which we could have jumped */
1008 tb_reset_jump_recursive(tb_next
);
1012 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1014 tb_reset_jump_recursive2(tb
, 0);
1015 tb_reset_jump_recursive2(tb
, 1);
1018 #if defined(TARGET_HAS_ICE)
1019 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1021 target_ulong addr
, pd
;
1022 ram_addr_t ram_addr
;
1025 addr
= cpu_get_phys_page_debug(env
, pc
);
1026 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1028 pd
= IO_MEM_UNASSIGNED
;
1030 pd
= p
->phys_offset
;
1032 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1033 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1037 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1038 breakpoint is reached */
1039 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1041 #if defined(TARGET_HAS_ICE)
1044 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1045 if (env
->breakpoints
[i
] == pc
)
1049 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1051 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1055 kvm_update_debugger(env
);
1058 breakpoint_invalidate(env
, pc
);
1065 /* remove a breakpoint */
1066 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1068 #if defined(TARGET_HAS_ICE)
1070 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1071 if (env
->breakpoints
[i
] == pc
)
1076 env
->nb_breakpoints
--;
1077 if (i
< env
->nb_breakpoints
)
1078 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1082 kvm_update_debugger(env
);
1085 breakpoint_invalidate(env
, pc
);
1092 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1093 CPU loop after each instruction */
1094 void cpu_single_step(CPUState
*env
, int enabled
)
1096 #if defined(TARGET_HAS_ICE)
1097 if (env
->singlestep_enabled
!= enabled
) {
1098 env
->singlestep_enabled
= enabled
;
1099 /* must flush all the translated code to avoid inconsistancies */
1100 /* XXX: only flush what is necessary */
1105 kvm_update_debugger(env
);
1110 /* enable or disable low levels log */
1111 void cpu_set_log(int log_flags
)
1113 loglevel
= log_flags
;
1114 if (loglevel
&& !logfile
) {
1115 logfile
= fopen(logfilename
, "w");
1117 perror(logfilename
);
1120 #if !defined(CONFIG_SOFTMMU)
1121 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1123 static uint8_t logfile_buf
[4096];
1124 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1127 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1132 void cpu_set_log_filename(const char *filename
)
1134 logfilename
= strdup(filename
);
1137 /* mask must never be zero, except for A20 change call */
1138 void cpu_interrupt(CPUState
*env
, int mask
)
1140 TranslationBlock
*tb
;
1141 static int interrupt_lock
;
1143 env
->interrupt_request
|= mask
;
1144 /* if the cpu is currently executing code, we must unlink it and
1145 all the potentially executing TB */
1146 tb
= env
->current_tb
;
1147 if (tb
&& !testandset(&interrupt_lock
)) {
1148 env
->current_tb
= NULL
;
1149 tb_reset_jump_recursive(tb
);
1154 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1156 env
->interrupt_request
&= ~mask
;
1159 CPULogItem cpu_log_items
[] = {
1160 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1161 "show generated host assembly code for each compiled TB" },
1162 { CPU_LOG_TB_IN_ASM
, "in_asm",
1163 "show target assembly code for each compiled TB" },
1164 { CPU_LOG_TB_OP
, "op",
1165 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1167 { CPU_LOG_TB_OP_OPT
, "op_opt",
1168 "show micro ops after optimization for each compiled TB" },
1170 { CPU_LOG_INT
, "int",
1171 "show interrupts/exceptions in short format" },
1172 { CPU_LOG_EXEC
, "exec",
1173 "show trace before each executed TB (lots of logs)" },
1174 { CPU_LOG_TB_CPU
, "cpu",
1175 "show CPU state before bloc translation" },
1177 { CPU_LOG_PCALL
, "pcall",
1178 "show protected mode far calls/returns/exceptions" },
1181 { CPU_LOG_IOPORT
, "ioport",
1182 "show all i/o ports accesses" },
1187 static int cmp1(const char *s1
, int n
, const char *s2
)
1189 if (strlen(s2
) != n
)
1191 return memcmp(s1
, s2
, n
) == 0;
1194 /* takes a comma separated list of log masks. Return 0 if error. */
1195 int cpu_str_to_log_mask(const char *str
)
1204 p1
= strchr(p
, ',');
1207 if(cmp1(p
,p1
-p
,"all")) {
1208 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1212 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1213 if (cmp1(p
, p1
- p
, item
->name
))
1227 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1232 fprintf(stderr
, "qemu: fatal: ");
1233 vfprintf(stderr
, fmt
, ap
);
1234 fprintf(stderr
, "\n");
1236 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1238 cpu_dump_state(env
, stderr
, fprintf
, 0);
1244 #if !defined(CONFIG_USER_ONLY)
1246 /* NOTE: if flush_global is true, also flush global entries (not
1248 void tlb_flush(CPUState
*env
, int flush_global
)
1252 #if defined(DEBUG_TLB)
1253 printf("tlb_flush:\n");
1255 /* must reset current TB so that interrupts cannot modify the
1256 links while we are modifying them */
1257 env
->current_tb
= NULL
;
1259 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1260 env
->tlb_table
[0][i
].addr_read
= -1;
1261 env
->tlb_table
[0][i
].addr_write
= -1;
1262 env
->tlb_table
[0][i
].addr_code
= -1;
1263 env
->tlb_table
[1][i
].addr_read
= -1;
1264 env
->tlb_table
[1][i
].addr_write
= -1;
1265 env
->tlb_table
[1][i
].addr_code
= -1;
1268 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1270 #if !defined(CONFIG_SOFTMMU)
1271 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1274 if (env
->kqemu_enabled
) {
1275 kqemu_flush(env
, flush_global
);
1281 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1283 if (addr
== (tlb_entry
->addr_read
&
1284 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1285 addr
== (tlb_entry
->addr_write
&
1286 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1287 addr
== (tlb_entry
->addr_code
&
1288 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1289 tlb_entry
->addr_read
= -1;
1290 tlb_entry
->addr_write
= -1;
1291 tlb_entry
->addr_code
= -1;
1295 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1298 TranslationBlock
*tb
;
1300 #if defined(DEBUG_TLB)
1301 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1303 /* must reset current TB so that interrupts cannot modify the
1304 links while we are modifying them */
1305 env
->current_tb
= NULL
;
1307 addr
&= TARGET_PAGE_MASK
;
1308 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1309 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1310 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1312 /* Discard jump cache entries for any tb which might potentially
1313 overlap the flushed page. */
1314 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1315 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1317 i
= tb_jmp_cache_hash_page(addr
);
1318 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1320 #if !defined(CONFIG_SOFTMMU)
1321 if (addr
< MMAP_AREA_END
)
1322 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1325 if (env
->kqemu_enabled
) {
1326 kqemu_flush_page(env
, addr
);
1331 /* update the TLBs so that writes to code in the virtual page 'addr'
1333 static void tlb_protect_code(ram_addr_t ram_addr
)
1335 cpu_physical_memory_reset_dirty(ram_addr
,
1336 ram_addr
+ TARGET_PAGE_SIZE
,
1340 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1341 tested for self modifying code */
1342 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1345 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1348 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1349 unsigned long start
, unsigned long length
)
1352 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1353 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1354 if ((addr
- start
) < length
) {
1355 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1360 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1364 unsigned long length
, start1
;
1368 start
&= TARGET_PAGE_MASK
;
1369 end
= TARGET_PAGE_ALIGN(end
);
1371 length
= end
- start
;
1374 len
= length
>> TARGET_PAGE_BITS
;
1376 /* XXX: should not depend on cpu context */
1378 if (env
->kqemu_enabled
) {
1381 for(i
= 0; i
< len
; i
++) {
1382 kqemu_set_notdirty(env
, addr
);
1383 addr
+= TARGET_PAGE_SIZE
;
1387 mask
= ~dirty_flags
;
1388 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1389 for(i
= 0; i
< len
; i
++)
1392 /* we modify the TLB cache so that the dirty bit will be set again
1393 when accessing the range */
1394 start1
= start
+ (unsigned long)phys_ram_base
;
1395 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1396 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1397 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1398 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1399 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1402 #if !defined(CONFIG_SOFTMMU)
1403 /* XXX: this is expensive */
1409 for(i
= 0; i
< L1_SIZE
; i
++) {
1412 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1413 for(j
= 0; j
< L2_SIZE
; j
++) {
1414 if (p
->valid_tag
== virt_valid_tag
&&
1415 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1416 (p
->prot
& PROT_WRITE
)) {
1417 if (addr
< MMAP_AREA_END
) {
1418 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1419 p
->prot
& ~PROT_WRITE
);
1422 addr
+= TARGET_PAGE_SIZE
;
1431 int cpu_physical_memory_set_dirty_tracking(int enable
)
1436 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1438 in_migration
= enable
;
1442 int cpu_physical_memory_get_dirty_tracking(void)
1444 return in_migration
;
1447 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1449 ram_addr_t ram_addr
;
1451 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1452 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1453 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1454 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1455 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1460 /* update the TLB according to the current state of the dirty bits */
1461 void cpu_tlb_update_dirty(CPUState
*env
)
1464 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1465 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1466 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1467 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1470 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1471 unsigned long start
)
1474 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1475 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1476 if (addr
== start
) {
1477 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1482 /* update the TLB corresponding to virtual page vaddr and phys addr
1483 addr so that it is no longer dirty */
1484 static inline void tlb_set_dirty(CPUState
*env
,
1485 unsigned long addr
, target_ulong vaddr
)
1489 addr
&= TARGET_PAGE_MASK
;
1490 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1491 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1492 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1495 /* add a new TLB entry. At most one entry for a given virtual address
1496 is permitted. Return 0 if OK or 2 if the page could not be mapped
1497 (can only happen in non SOFTMMU mode for I/O pages or pages
1498 conflicting with the host address space). */
1499 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1500 target_phys_addr_t paddr
, int prot
,
1501 int is_user
, int is_softmmu
)
1506 target_ulong address
;
1507 target_phys_addr_t addend
;
1511 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1513 pd
= IO_MEM_UNASSIGNED
;
1515 pd
= p
->phys_offset
;
1517 #if defined(DEBUG_TLB)
1518 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1519 vaddr
, (int)paddr
, prot
, is_user
, is_softmmu
, pd
);
1523 #if !defined(CONFIG_SOFTMMU)
1527 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1528 /* IO memory case */
1529 address
= vaddr
| pd
;
1532 /* standard memory */
1534 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1537 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1539 te
= &env
->tlb_table
[is_user
][index
];
1540 te
->addend
= addend
;
1541 if (prot
& PAGE_READ
) {
1542 te
->addr_read
= address
;
1546 if (prot
& PAGE_EXEC
) {
1547 te
->addr_code
= address
;
1551 if (prot
& PAGE_WRITE
) {
1552 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1553 (pd
& IO_MEM_ROMD
)) {
1554 /* write access calls the I/O callback */
1555 te
->addr_write
= vaddr
|
1556 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1557 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1558 !cpu_physical_memory_is_dirty(pd
)) {
1559 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1561 te
->addr_write
= address
;
1564 te
->addr_write
= -1;
1567 #if !defined(CONFIG_SOFTMMU)
1569 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1570 /* IO access: no mapping is done as it will be handled by the
1572 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1577 if (vaddr
>= MMAP_AREA_END
) {
1580 if (prot
& PROT_WRITE
) {
1581 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1582 #if defined(TARGET_HAS_SMC) || 1
1585 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1586 !cpu_physical_memory_is_dirty(pd
))) {
1587 /* ROM: we do as if code was inside */
1588 /* if code is present, we only map as read only and save the
1592 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1595 vp
->valid_tag
= virt_valid_tag
;
1596 prot
&= ~PAGE_WRITE
;
1599 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1600 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1601 if (map_addr
== MAP_FAILED
) {
1602 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1612 /* called from signal handler: invalidate the code and unprotect the
1613 page. Return TRUE if the fault was succesfully handled. */
1614 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1616 #if !defined(CONFIG_SOFTMMU)
1619 #if defined(DEBUG_TLB)
1620 printf("page_unprotect: addr=0x%08x\n", addr
);
1622 addr
&= TARGET_PAGE_MASK
;
1624 /* if it is not mapped, no need to worry here */
1625 if (addr
>= MMAP_AREA_END
)
1627 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1630 /* NOTE: in this case, validate_tag is _not_ tested as it
1631 validates only the code TLB */
1632 if (vp
->valid_tag
!= virt_valid_tag
)
1634 if (!(vp
->prot
& PAGE_WRITE
))
1636 #if defined(DEBUG_TLB)
1637 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1638 addr
, vp
->phys_addr
, vp
->prot
);
1640 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1641 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1642 (unsigned long)addr
, vp
->prot
);
1643 /* set the dirty bit */
1644 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1645 /* flush the code inside */
1646 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1655 void tlb_flush(CPUState
*env
, int flush_global
)
1659 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1663 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1664 target_phys_addr_t paddr
, int prot
,
1665 int is_user
, int is_softmmu
)
1670 /* dump memory mappings */
1671 void page_dump(FILE *f
)
1673 unsigned long start
, end
;
1674 int i
, j
, prot
, prot1
;
1677 fprintf(f
, "%-8s %-8s %-8s %s\n",
1678 "start", "end", "size", "prot");
1682 for(i
= 0; i
<= L1_SIZE
; i
++) {
1687 for(j
= 0;j
< L2_SIZE
; j
++) {
1692 if (prot1
!= prot
) {
1693 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1695 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1696 start
, end
, end
- start
,
1697 prot
& PAGE_READ
? 'r' : '-',
1698 prot
& PAGE_WRITE
? 'w' : '-',
1699 prot
& PAGE_EXEC
? 'x' : '-');
1713 int page_get_flags(target_ulong address
)
1717 p
= page_find(address
>> TARGET_PAGE_BITS
);
1723 /* modify the flags of a page and invalidate the code if
1724 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1725 depending on PAGE_WRITE */
1726 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1731 start
= start
& TARGET_PAGE_MASK
;
1732 end
= TARGET_PAGE_ALIGN(end
);
1733 if (flags
& PAGE_WRITE
)
1734 flags
|= PAGE_WRITE_ORG
;
1735 spin_lock(&tb_lock
);
1736 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1737 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1738 /* if the write protection is set, then we invalidate the code
1740 if (!(p
->flags
& PAGE_WRITE
) &&
1741 (flags
& PAGE_WRITE
) &&
1743 tb_invalidate_phys_page(addr
, 0, NULL
);
1747 spin_unlock(&tb_lock
);
1750 /* called from signal handler: invalidate the code and unprotect the
1751 page. Return TRUE if the fault was succesfully handled. */
1752 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1754 unsigned int page_index
, prot
, pindex
;
1756 target_ulong host_start
, host_end
, addr
;
1758 host_start
= address
& qemu_host_page_mask
;
1759 page_index
= host_start
>> TARGET_PAGE_BITS
;
1760 p1
= page_find(page_index
);
1763 host_end
= host_start
+ qemu_host_page_size
;
1766 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1770 /* if the page was really writable, then we change its
1771 protection back to writable */
1772 if (prot
& PAGE_WRITE_ORG
) {
1773 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1774 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1775 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1776 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1777 p1
[pindex
].flags
|= PAGE_WRITE
;
1778 /* and since the content will be modified, we must invalidate
1779 the corresponding translated code. */
1780 tb_invalidate_phys_page(address
, pc
, puc
);
1781 #ifdef DEBUG_TB_CHECK
1782 tb_invalidate_check(address
);
1790 /* call this function when system calls directly modify a memory area */
1791 /* ??? This should be redundant now we have lock_user. */
1792 void page_unprotect_range(target_ulong data
, target_ulong data_size
)
1794 target_ulong start
, end
, addr
;
1797 end
= start
+ data_size
;
1798 start
&= TARGET_PAGE_MASK
;
1799 end
= TARGET_PAGE_ALIGN(end
);
1800 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1801 page_unprotect(addr
, 0, NULL
);
1805 static inline void tlb_set_dirty(CPUState
*env
,
1806 unsigned long addr
, target_ulong vaddr
)
1809 #endif /* defined(CONFIG_USER_ONLY) */
1811 /* register physical memory. 'size' must be a multiple of the target
1812 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1814 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1816 unsigned long phys_offset
)
1818 target_phys_addr_t addr
, end_addr
;
1822 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1823 end_addr
= start_addr
+ size
;
1824 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1825 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1826 p
->phys_offset
= phys_offset
;
1827 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
1828 (phys_offset
& IO_MEM_ROMD
))
1829 phys_offset
+= TARGET_PAGE_SIZE
;
1832 /* since each CPU stores ram addresses in its TLB cache, we must
1833 reset the modified entries */
1835 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1840 /* XXX: temporary until new memory mapping API */
1841 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
1845 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1847 return IO_MEM_UNASSIGNED
;
1848 return p
->phys_offset
;
1851 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1853 #ifdef DEBUG_UNASSIGNED
1854 printf("Unassigned mem read 0x%08x\n", (int)addr
);
1859 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1861 #ifdef DEBUG_UNASSIGNED
1862 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr
, val
);
1866 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1867 unassigned_mem_readb
,
1868 unassigned_mem_readb
,
1869 unassigned_mem_readb
,
1872 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1873 unassigned_mem_writeb
,
1874 unassigned_mem_writeb
,
1875 unassigned_mem_writeb
,
1878 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1880 unsigned long ram_addr
;
1882 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1883 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1884 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1885 #if !defined(CONFIG_USER_ONLY)
1886 tb_invalidate_phys_page_fast(ram_addr
, 1);
1887 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1890 stb_p((uint8_t *)(long)addr
, val
);
1892 if (cpu_single_env
->kqemu_enabled
&&
1893 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1894 kqemu_modify_page(cpu_single_env
, ram_addr
);
1896 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1897 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1898 /* we remove the notdirty callback only if the code has been
1900 if (dirty_flags
== 0xff)
1901 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1904 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1906 unsigned long ram_addr
;
1908 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1909 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1910 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1911 #if !defined(CONFIG_USER_ONLY)
1912 tb_invalidate_phys_page_fast(ram_addr
, 2);
1913 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1916 stw_p((uint8_t *)(long)addr
, val
);
1918 if (cpu_single_env
->kqemu_enabled
&&
1919 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1920 kqemu_modify_page(cpu_single_env
, ram_addr
);
1922 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1923 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1924 /* we remove the notdirty callback only if the code has been
1926 if (dirty_flags
== 0xff)
1927 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1930 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1932 unsigned long ram_addr
;
1934 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1935 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1936 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1937 #if !defined(CONFIG_USER_ONLY)
1938 tb_invalidate_phys_page_fast(ram_addr
, 4);
1939 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1942 stl_p((uint8_t *)(long)addr
, val
);
1944 if (cpu_single_env
->kqemu_enabled
&&
1945 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1946 kqemu_modify_page(cpu_single_env
, ram_addr
);
1948 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1949 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1950 /* we remove the notdirty callback only if the code has been
1952 if (dirty_flags
== 0xff)
1953 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1956 static CPUReadMemoryFunc
*error_mem_read
[3] = {
1957 NULL
, /* never used */
1958 NULL
, /* never used */
1959 NULL
, /* never used */
1962 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1963 notdirty_mem_writeb
,
1964 notdirty_mem_writew
,
1965 notdirty_mem_writel
,
1968 static void io_mem_init(void)
1970 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
1971 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
1972 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
1975 /* alloc dirty bits array */
1976 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1977 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
1980 /* mem_read and mem_write are arrays of functions containing the
1981 function to access byte (index 0), word (index 1) and dword (index
1982 2). All functions must be supplied. If io_index is non zero, the
1983 corresponding io zone is modified. If it is zero, a new io zone is
1984 allocated. The return value can be used with
1985 cpu_register_physical_memory(). (-1) is returned if error. */
1986 int cpu_register_io_memory(int io_index
,
1987 CPUReadMemoryFunc
**mem_read
,
1988 CPUWriteMemoryFunc
**mem_write
,
1993 if (io_index
<= 0) {
1994 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
1996 io_index
= io_mem_nb
++;
1998 if (io_index
>= IO_MEM_NB_ENTRIES
)
2002 for(i
= 0;i
< 3; i
++) {
2003 io_mem_read
[io_index
][i
] = mem_read
[i
];
2004 io_mem_write
[io_index
][i
] = mem_write
[i
];
2006 io_mem_opaque
[io_index
] = opaque
;
2007 return io_index
<< IO_MEM_SHIFT
;
2010 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2012 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2015 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2017 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2020 /* physical memory access (slow version, mainly for debug) */
2021 #if defined(CONFIG_USER_ONLY)
2022 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2023 int len
, int is_write
)
2030 page
= addr
& TARGET_PAGE_MASK
;
2031 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2034 flags
= page_get_flags(page
);
2035 if (!(flags
& PAGE_VALID
))
2038 if (!(flags
& PAGE_WRITE
))
2040 p
= lock_user(addr
, len
, 0);
2041 memcpy(p
, buf
, len
);
2042 unlock_user(p
, addr
, len
);
2044 if (!(flags
& PAGE_READ
))
2046 p
= lock_user(addr
, len
, 1);
2047 memcpy(buf
, p
, len
);
2048 unlock_user(p
, addr
, 0);
2057 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2058 int len
, int is_write
)
2063 target_phys_addr_t page
;
2068 page
= addr
& TARGET_PAGE_MASK
;
2069 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2072 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2074 pd
= IO_MEM_UNASSIGNED
;
2076 pd
= p
->phys_offset
;
2080 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2081 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2082 /* XXX: could force cpu_single_env to NULL to avoid
2084 if (l
>= 4 && ((addr
& 3) == 0)) {
2085 /* 32 bit write access */
2087 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2089 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2090 /* 16 bit write access */
2092 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2095 /* 8 bit write access */
2097 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2101 unsigned long addr1
;
2102 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2104 ptr
= phys_ram_base
+ addr1
;
2105 memcpy(ptr
, buf
, l
);
2106 if (!cpu_physical_memory_is_dirty(addr1
)) {
2107 /* invalidate code */
2108 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2110 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2111 (0xff & ~CODE_DIRTY_FLAG
);
2115 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2116 !(pd
& IO_MEM_ROMD
)) {
2118 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2119 if (l
>= 4 && ((addr
& 3) == 0)) {
2120 /* 32 bit read access */
2121 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2124 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2125 /* 16 bit read access */
2126 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2130 /* 8 bit read access */
2131 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2137 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2138 (addr
& ~TARGET_PAGE_MASK
);
2139 memcpy(buf
, ptr
, l
);
2148 /* used for ROM loading : can write in RAM and ROM */
2149 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2150 const uint8_t *buf
, int len
)
2154 target_phys_addr_t page
;
2159 page
= addr
& TARGET_PAGE_MASK
;
2160 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2163 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2165 pd
= IO_MEM_UNASSIGNED
;
2167 pd
= p
->phys_offset
;
2170 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2171 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2172 !(pd
& IO_MEM_ROMD
)) {
2175 unsigned long addr1
;
2176 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2178 ptr
= phys_ram_base
+ addr1
;
2179 memcpy(ptr
, buf
, l
);
2188 /* warning: addr must be aligned */
2189 uint32_t ldl_phys(target_phys_addr_t addr
)
2197 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2199 pd
= IO_MEM_UNASSIGNED
;
2201 pd
= p
->phys_offset
;
2204 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2205 !(pd
& IO_MEM_ROMD
)) {
2207 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2208 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2211 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2212 (addr
& ~TARGET_PAGE_MASK
);
2218 /* warning: addr must be aligned */
2219 uint64_t ldq_phys(target_phys_addr_t addr
)
2227 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2229 pd
= IO_MEM_UNASSIGNED
;
2231 pd
= p
->phys_offset
;
2234 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2235 !(pd
& IO_MEM_ROMD
)) {
2237 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2238 #ifdef TARGET_WORDS_BIGENDIAN
2239 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2240 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2242 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2243 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2247 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2248 (addr
& ~TARGET_PAGE_MASK
);
2255 uint32_t ldub_phys(target_phys_addr_t addr
)
2258 cpu_physical_memory_read(addr
, &val
, 1);
2263 uint32_t lduw_phys(target_phys_addr_t addr
)
2266 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2267 return tswap16(val
);
2271 #define likely(x) __builtin_expect(!!(x), 1)
2272 #define unlikely(x) __builtin_expect(!!(x), 0)
2275 #define unlikely(x) x
2278 /* warning: addr must be aligned. The ram page is not masked as dirty
2279 and the code inside is not invalidated. It is useful if the dirty
2280 bits are used to track modified PTEs */
2281 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2288 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2290 pd
= IO_MEM_UNASSIGNED
;
2292 pd
= p
->phys_offset
;
2295 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2296 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2297 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2299 unsigned long addr1
;
2300 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2302 ptr
= phys_ram_base
+ addr1
;
2305 if (unlikely(in_migration
)) {
2306 if (!cpu_physical_memory_is_dirty(addr1
)) {
2307 /* invalidate code */
2308 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2310 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2311 (0xff & ~CODE_DIRTY_FLAG
);
2317 /* warning: addr must be aligned */
2318 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2325 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2327 pd
= IO_MEM_UNASSIGNED
;
2329 pd
= p
->phys_offset
;
2332 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2333 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2334 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2336 unsigned long addr1
;
2337 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2339 ptr
= phys_ram_base
+ addr1
;
2341 if (!cpu_physical_memory_is_dirty(addr1
)) {
2342 /* invalidate code */
2343 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2345 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2346 (0xff & ~CODE_DIRTY_FLAG
);
2352 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2355 cpu_physical_memory_write(addr
, &v
, 1);
2359 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2361 uint16_t v
= tswap16(val
);
2362 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2366 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2369 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2374 /* virtual memory access for debug */
2375 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2376 uint8_t *buf
, int len
, int is_write
)
2379 target_ulong page
, phys_addr
;
2382 page
= addr
& TARGET_PAGE_MASK
;
2383 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2384 /* if no physical page mapped, return an error */
2385 if (phys_addr
== -1)
2387 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2390 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2399 void dump_exec_info(FILE *f
,
2400 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2402 int i
, target_code_size
, max_target_code_size
;
2403 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2404 TranslationBlock
*tb
;
2406 target_code_size
= 0;
2407 max_target_code_size
= 0;
2409 direct_jmp_count
= 0;
2410 direct_jmp2_count
= 0;
2411 for(i
= 0; i
< nb_tbs
; i
++) {
2413 target_code_size
+= tb
->size
;
2414 if (tb
->size
> max_target_code_size
)
2415 max_target_code_size
= tb
->size
;
2416 if (tb
->page_addr
[1] != -1)
2418 if (tb
->tb_next_offset
[0] != 0xffff) {
2420 if (tb
->tb_next_offset
[1] != 0xffff) {
2421 direct_jmp2_count
++;
2425 /* XXX: avoid using doubles ? */
2426 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2427 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2428 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2429 max_target_code_size
);
2430 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2431 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2432 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2433 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2435 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2436 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2438 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2440 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2441 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2442 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2443 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2446 #if !defined(CONFIG_USER_ONLY)
2448 #define MMUSUFFIX _cmmu
2449 #define GETPC() NULL
2450 #define env cpu_single_env
2451 #define SOFTMMU_CODE_ACCESS
2454 #include "softmmu_template.h"
2457 #include "softmmu_template.h"
2460 #include "softmmu_template.h"
2463 #include "softmmu_template.h"