2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
52 #if !defined(CONFIG_USER_ONLY)
53 /* TB consistency checks only implemented for usermode emulation. */
57 /* threshold to flush the translated code buffer */
58 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60 #define SMC_BITMAP_USE_THRESHOLD 10
62 #define MMAP_AREA_START 0x00000000
63 #define MMAP_AREA_END 0xa8000000
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_ALPHA)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 42
69 #define TARGET_VIRT_ADDR_SPACE_BITS 42
70 #elif defined(TARGET_PPC64)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
73 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
74 #define TARGET_PHYS_ADDR_SPACE_BITS 32
77 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
78 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
80 /* any access to the tbs or the page table must use this lock */
81 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
83 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
84 uint8_t *code_gen_ptr
;
88 uint8_t *phys_ram_base
;
89 uint8_t *phys_ram_dirty
;
90 static ram_addr_t phys_ram_alloc_offset
= 0;
93 /* current CPU in the current thread. It is only valid inside
95 CPUState
*cpu_single_env
;
97 typedef struct PageDesc
{
98 /* list of TBs intersecting this ram page */
99 TranslationBlock
*first_tb
;
100 /* in order to optimize self modifying code, we count the number
101 of lookups we do to a given page to use a bitmap */
102 unsigned int code_write_count
;
103 uint8_t *code_bitmap
;
104 #if defined(CONFIG_USER_ONLY)
109 typedef struct PhysPageDesc
{
110 /* offset in host memory of the page + io_index in the low 12 bits */
111 uint32_t phys_offset
;
115 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
116 /* XXX: this is a temporary hack for alpha target.
117 * In the future, this is to be replaced by a multi-level table
118 * to actually be able to handle the complete 64 bits address space.
120 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
122 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
125 #define L1_SIZE (1 << L1_BITS)
126 #define L2_SIZE (1 << L2_BITS)
128 static void io_mem_init(void);
130 unsigned long qemu_real_host_page_size
;
131 unsigned long qemu_host_page_bits
;
132 unsigned long qemu_host_page_size
;
133 unsigned long qemu_host_page_mask
;
135 /* XXX: for system emulation, it could just be an array */
136 static PageDesc
*l1_map
[L1_SIZE
];
137 PhysPageDesc
**l1_phys_map
;
139 /* io memory support */
140 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
141 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
142 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
143 static int io_mem_nb
;
144 #if defined(CONFIG_SOFTMMU)
145 static int io_mem_watch
;
149 char *logfilename
= "/tmp/qemu.log";
154 static int tlb_flush_count
;
155 static int tb_flush_count
;
156 static int tb_phys_invalidate_count
;
158 static void page_init(void)
160 /* NOTE: we can always suppose that qemu_host_page_size >=
164 SYSTEM_INFO system_info
;
167 GetSystemInfo(&system_info
);
168 qemu_real_host_page_size
= system_info
.dwPageSize
;
170 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
171 PAGE_EXECUTE_READWRITE
, &old_protect
);
174 qemu_real_host_page_size
= getpagesize();
176 unsigned long start
, end
;
178 start
= (unsigned long)code_gen_buffer
;
179 start
&= ~(qemu_real_host_page_size
- 1);
181 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
182 end
+= qemu_real_host_page_size
- 1;
183 end
&= ~(qemu_real_host_page_size
- 1);
185 mprotect((void *)start
, end
- start
,
186 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
190 if (qemu_host_page_size
== 0)
191 qemu_host_page_size
= qemu_real_host_page_size
;
192 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
193 qemu_host_page_size
= TARGET_PAGE_SIZE
;
194 qemu_host_page_bits
= 0;
195 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
196 qemu_host_page_bits
++;
197 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
198 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
199 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
202 static inline PageDesc
*page_find_alloc(unsigned int index
)
206 lp
= &l1_map
[index
>> L2_BITS
];
209 /* allocate if not found */
210 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
211 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
214 return p
+ (index
& (L2_SIZE
- 1));
217 static inline PageDesc
*page_find(unsigned int index
)
221 p
= l1_map
[index
>> L2_BITS
];
224 return p
+ (index
& (L2_SIZE
- 1));
227 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
232 p
= (void **)l1_phys_map
;
233 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
235 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
236 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
238 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
241 /* allocate if not found */
244 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
245 memset(p
, 0, sizeof(void *) * L1_SIZE
);
249 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
253 /* allocate if not found */
256 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
258 for (i
= 0; i
< L2_SIZE
; i
++)
259 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
261 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
264 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
266 return phys_page_find_alloc(index
, 0);
269 #if !defined(CONFIG_USER_ONLY)
270 static void tlb_protect_code(ram_addr_t ram_addr
);
271 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
275 void cpu_exec_init(CPUState
*env
)
281 code_gen_ptr
= code_gen_buffer
;
285 env
->next_cpu
= NULL
;
288 while (*penv
!= NULL
) {
289 penv
= (CPUState
**)&(*penv
)->next_cpu
;
292 env
->cpu_index
= cpu_index
;
293 env
->nb_watchpoints
= 0;
297 static inline void invalidate_page_bitmap(PageDesc
*p
)
299 if (p
->code_bitmap
) {
300 qemu_free(p
->code_bitmap
);
301 p
->code_bitmap
= NULL
;
303 p
->code_write_count
= 0;
306 /* set to NULL all the 'first_tb' fields in all PageDescs */
307 static void page_flush_tb(void)
312 for(i
= 0; i
< L1_SIZE
; i
++) {
315 for(j
= 0; j
< L2_SIZE
; j
++) {
317 invalidate_page_bitmap(p
);
324 /* flush all the translation blocks */
325 /* XXX: tb_flush is currently not thread safe */
326 void tb_flush(CPUState
*env1
)
329 #if defined(DEBUG_FLUSH)
330 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
331 code_gen_ptr
- code_gen_buffer
,
333 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
337 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
338 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
341 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
344 code_gen_ptr
= code_gen_buffer
;
345 /* XXX: flush processor icache at this point if cache flush is
350 #ifdef DEBUG_TB_CHECK
352 static void tb_invalidate_check(target_ulong address
)
354 TranslationBlock
*tb
;
356 address
&= TARGET_PAGE_MASK
;
357 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
358 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
359 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
360 address
>= tb
->pc
+ tb
->size
)) {
361 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
362 address
, (long)tb
->pc
, tb
->size
);
368 /* verify that all the pages have correct rights for code */
369 static void tb_page_check(void)
371 TranslationBlock
*tb
;
372 int i
, flags1
, flags2
;
374 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
375 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
376 flags1
= page_get_flags(tb
->pc
);
377 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
378 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
379 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
380 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
386 void tb_jmp_check(TranslationBlock
*tb
)
388 TranslationBlock
*tb1
;
391 /* suppress any remaining jumps to this TB */
395 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
398 tb1
= tb1
->jmp_next
[n1
];
400 /* check end of list */
402 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
408 /* invalidate one TB */
409 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
412 TranslationBlock
*tb1
;
416 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
419 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
423 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
425 TranslationBlock
*tb1
;
431 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
433 *ptb
= tb1
->page_next
[n1
];
436 ptb
= &tb1
->page_next
[n1
];
440 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
442 TranslationBlock
*tb1
, **ptb
;
445 ptb
= &tb
->jmp_next
[n
];
448 /* find tb(n) in circular list */
452 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
453 if (n1
== n
&& tb1
== tb
)
456 ptb
= &tb1
->jmp_first
;
458 ptb
= &tb1
->jmp_next
[n1
];
461 /* now we can suppress tb(n) from the list */
462 *ptb
= tb
->jmp_next
[n
];
464 tb
->jmp_next
[n
] = NULL
;
468 /* reset the jump entry 'n' of a TB so that it is not chained to
470 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
472 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
475 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
480 target_ulong phys_pc
;
481 TranslationBlock
*tb1
, *tb2
;
483 /* remove the TB from the hash list */
484 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
485 h
= tb_phys_hash_func(phys_pc
);
486 tb_remove(&tb_phys_hash
[h
], tb
,
487 offsetof(TranslationBlock
, phys_hash_next
));
489 /* remove the TB from the page list */
490 if (tb
->page_addr
[0] != page_addr
) {
491 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
492 tb_page_remove(&p
->first_tb
, tb
);
493 invalidate_page_bitmap(p
);
495 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
496 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
497 tb_page_remove(&p
->first_tb
, tb
);
498 invalidate_page_bitmap(p
);
501 tb_invalidated_flag
= 1;
503 /* remove the TB from the hash list */
504 h
= tb_jmp_cache_hash_func(tb
->pc
);
505 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
506 if (env
->tb_jmp_cache
[h
] == tb
)
507 env
->tb_jmp_cache
[h
] = NULL
;
510 /* suppress this TB from the two jump lists */
511 tb_jmp_remove(tb
, 0);
512 tb_jmp_remove(tb
, 1);
514 /* suppress any remaining jumps to this TB */
520 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
521 tb2
= tb1
->jmp_next
[n1
];
522 tb_reset_jump(tb1
, n1
);
523 tb1
->jmp_next
[n1
] = NULL
;
526 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
528 tb_phys_invalidate_count
++;
531 static inline void set_bits(uint8_t *tab
, int start
, int len
)
537 mask
= 0xff << (start
& 7);
538 if ((start
& ~7) == (end
& ~7)) {
540 mask
&= ~(0xff << (end
& 7));
545 start
= (start
+ 8) & ~7;
547 while (start
< end1
) {
552 mask
= ~(0xff << (end
& 7));
558 static void build_page_bitmap(PageDesc
*p
)
560 int n
, tb_start
, tb_end
;
561 TranslationBlock
*tb
;
563 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
566 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
571 tb
= (TranslationBlock
*)((long)tb
& ~3);
572 /* NOTE: this is subtle as a TB may span two physical pages */
574 /* NOTE: tb_end may be after the end of the page, but
575 it is not a problem */
576 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
577 tb_end
= tb_start
+ tb
->size
;
578 if (tb_end
> TARGET_PAGE_SIZE
)
579 tb_end
= TARGET_PAGE_SIZE
;
582 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
584 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
585 tb
= tb
->page_next
[n
];
589 #ifdef TARGET_HAS_PRECISE_SMC
591 static void tb_gen_code(CPUState
*env
,
592 target_ulong pc
, target_ulong cs_base
, int flags
,
595 TranslationBlock
*tb
;
597 target_ulong phys_pc
, phys_page2
, virt_page2
;
600 phys_pc
= get_phys_addr_code(env
, pc
);
603 /* flush must be done */
605 /* cannot fail at this point */
608 tc_ptr
= code_gen_ptr
;
610 tb
->cs_base
= cs_base
;
613 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
614 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
616 /* check next page if needed */
617 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
619 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
620 phys_page2
= get_phys_addr_code(env
, virt_page2
);
622 tb_link_phys(tb
, phys_pc
, phys_page2
);
626 /* invalidate all TBs which intersect with the target physical page
627 starting in range [start;end[. NOTE: start and end must refer to
628 the same physical page. 'is_cpu_write_access' should be true if called
629 from a real cpu write access: the virtual CPU will exit the current
630 TB if code is modified inside this TB. */
631 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
632 int is_cpu_write_access
)
634 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
635 CPUState
*env
= cpu_single_env
;
637 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
638 target_ulong tb_start
, tb_end
;
639 target_ulong current_pc
, current_cs_base
;
641 p
= page_find(start
>> TARGET_PAGE_BITS
);
644 if (!p
->code_bitmap
&&
645 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
646 is_cpu_write_access
) {
647 /* build code bitmap */
648 build_page_bitmap(p
);
651 /* we remove all the TBs in the range [start, end[ */
652 /* XXX: see if in some cases it could be faster to invalidate all the code */
653 current_tb_not_found
= is_cpu_write_access
;
654 current_tb_modified
= 0;
655 current_tb
= NULL
; /* avoid warning */
656 current_pc
= 0; /* avoid warning */
657 current_cs_base
= 0; /* avoid warning */
658 current_flags
= 0; /* avoid warning */
662 tb
= (TranslationBlock
*)((long)tb
& ~3);
663 tb_next
= tb
->page_next
[n
];
664 /* NOTE: this is subtle as a TB may span two physical pages */
666 /* NOTE: tb_end may be after the end of the page, but
667 it is not a problem */
668 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
669 tb_end
= tb_start
+ tb
->size
;
671 tb_start
= tb
->page_addr
[1];
672 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
674 if (!(tb_end
<= start
|| tb_start
>= end
)) {
675 #ifdef TARGET_HAS_PRECISE_SMC
676 if (current_tb_not_found
) {
677 current_tb_not_found
= 0;
679 if (env
->mem_write_pc
) {
680 /* now we have a real cpu fault */
681 current_tb
= tb_find_pc(env
->mem_write_pc
);
684 if (current_tb
== tb
&&
685 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
686 /* If we are modifying the current TB, we must stop
687 its execution. We could be more precise by checking
688 that the modification is after the current PC, but it
689 would require a specialized function to partially
690 restore the CPU state */
692 current_tb_modified
= 1;
693 cpu_restore_state(current_tb
, env
,
694 env
->mem_write_pc
, NULL
);
695 #if defined(TARGET_I386)
696 current_flags
= env
->hflags
;
697 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
698 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
699 current_pc
= current_cs_base
+ env
->eip
;
701 #error unsupported CPU
704 #endif /* TARGET_HAS_PRECISE_SMC */
705 /* we need to do that to handle the case where a signal
706 occurs while doing tb_phys_invalidate() */
709 saved_tb
= env
->current_tb
;
710 env
->current_tb
= NULL
;
712 tb_phys_invalidate(tb
, -1);
714 env
->current_tb
= saved_tb
;
715 if (env
->interrupt_request
&& env
->current_tb
)
716 cpu_interrupt(env
, env
->interrupt_request
);
721 #if !defined(CONFIG_USER_ONLY)
722 /* if no code remaining, no need to continue to use slow writes */
724 invalidate_page_bitmap(p
);
725 if (is_cpu_write_access
) {
726 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
730 #ifdef TARGET_HAS_PRECISE_SMC
731 if (current_tb_modified
) {
732 /* we generate a block containing just the instruction
733 modifying the memory. It will ensure that it cannot modify
735 env
->current_tb
= NULL
;
736 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
738 cpu_resume_from_signal(env
, NULL
);
743 /* len must be <= 8 and start must be a multiple of len */
744 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
751 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
752 cpu_single_env
->mem_write_vaddr
, len
,
754 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
758 p
= page_find(start
>> TARGET_PAGE_BITS
);
761 if (p
->code_bitmap
) {
762 offset
= start
& ~TARGET_PAGE_MASK
;
763 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
764 if (b
& ((1 << len
) - 1))
768 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
772 #if !defined(CONFIG_SOFTMMU)
773 static void tb_invalidate_phys_page(target_ulong addr
,
774 unsigned long pc
, void *puc
)
776 int n
, current_flags
, current_tb_modified
;
777 target_ulong current_pc
, current_cs_base
;
779 TranslationBlock
*tb
, *current_tb
;
780 #ifdef TARGET_HAS_PRECISE_SMC
781 CPUState
*env
= cpu_single_env
;
784 addr
&= TARGET_PAGE_MASK
;
785 p
= page_find(addr
>> TARGET_PAGE_BITS
);
789 current_tb_modified
= 0;
791 current_pc
= 0; /* avoid warning */
792 current_cs_base
= 0; /* avoid warning */
793 current_flags
= 0; /* avoid warning */
794 #ifdef TARGET_HAS_PRECISE_SMC
796 current_tb
= tb_find_pc(pc
);
801 tb
= (TranslationBlock
*)((long)tb
& ~3);
802 #ifdef TARGET_HAS_PRECISE_SMC
803 if (current_tb
== tb
&&
804 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
805 /* If we are modifying the current TB, we must stop
806 its execution. We could be more precise by checking
807 that the modification is after the current PC, but it
808 would require a specialized function to partially
809 restore the CPU state */
811 current_tb_modified
= 1;
812 cpu_restore_state(current_tb
, env
, pc
, puc
);
813 #if defined(TARGET_I386)
814 current_flags
= env
->hflags
;
815 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
816 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
817 current_pc
= current_cs_base
+ env
->eip
;
819 #error unsupported CPU
822 #endif /* TARGET_HAS_PRECISE_SMC */
823 tb_phys_invalidate(tb
, addr
);
824 tb
= tb
->page_next
[n
];
827 #ifdef TARGET_HAS_PRECISE_SMC
828 if (current_tb_modified
) {
829 /* we generate a block containing just the instruction
830 modifying the memory. It will ensure that it cannot modify
832 env
->current_tb
= NULL
;
833 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
835 cpu_resume_from_signal(env
, puc
);
841 /* add the tb in the target page and protect it if necessary */
842 static inline void tb_alloc_page(TranslationBlock
*tb
,
843 unsigned int n
, target_ulong page_addr
)
846 TranslationBlock
*last_first_tb
;
848 tb
->page_addr
[n
] = page_addr
;
849 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
850 tb
->page_next
[n
] = p
->first_tb
;
851 last_first_tb
= p
->first_tb
;
852 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
853 invalidate_page_bitmap(p
);
855 #if defined(TARGET_HAS_SMC) || 1
857 #if defined(CONFIG_USER_ONLY)
858 if (p
->flags
& PAGE_WRITE
) {
863 /* force the host page as non writable (writes will have a
864 page fault + mprotect overhead) */
865 page_addr
&= qemu_host_page_mask
;
867 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
868 addr
+= TARGET_PAGE_SIZE
) {
870 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
874 p2
->flags
&= ~PAGE_WRITE
;
875 page_get_flags(addr
);
877 mprotect(g2h(page_addr
), qemu_host_page_size
,
878 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
879 #ifdef DEBUG_TB_INVALIDATE
880 printf("protecting code page: 0x%08lx\n",
885 /* if some code is already present, then the pages are already
886 protected. So we handle the case where only the first TB is
887 allocated in a physical page */
888 if (!last_first_tb
) {
889 tlb_protect_code(page_addr
);
893 #endif /* TARGET_HAS_SMC */
896 /* Allocate a new translation block. Flush the translation buffer if
897 too many translation blocks or too much generated code. */
898 TranslationBlock
*tb_alloc(target_ulong pc
)
900 TranslationBlock
*tb
;
902 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
903 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
911 /* add a new TB and link it to the physical page tables. phys_page2 is
912 (-1) to indicate that only one page contains the TB. */
913 void tb_link_phys(TranslationBlock
*tb
,
914 target_ulong phys_pc
, target_ulong phys_page2
)
917 TranslationBlock
**ptb
;
919 /* add in the physical hash table */
920 h
= tb_phys_hash_func(phys_pc
);
921 ptb
= &tb_phys_hash
[h
];
922 tb
->phys_hash_next
= *ptb
;
925 /* add in the page list */
926 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
927 if (phys_page2
!= -1)
928 tb_alloc_page(tb
, 1, phys_page2
);
930 tb
->page_addr
[1] = -1;
932 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
933 tb
->jmp_next
[0] = NULL
;
934 tb
->jmp_next
[1] = NULL
;
936 tb
->cflags
&= ~CF_FP_USED
;
937 if (tb
->cflags
& CF_TB_FP_USED
)
938 tb
->cflags
|= CF_FP_USED
;
941 /* init original jump addresses */
942 if (tb
->tb_next_offset
[0] != 0xffff)
943 tb_reset_jump(tb
, 0);
944 if (tb
->tb_next_offset
[1] != 0xffff)
945 tb_reset_jump(tb
, 1);
947 #ifdef DEBUG_TB_CHECK
952 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
953 tb[1].tc_ptr. Return NULL if not found */
954 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
958 TranslationBlock
*tb
;
962 if (tc_ptr
< (unsigned long)code_gen_buffer
||
963 tc_ptr
>= (unsigned long)code_gen_ptr
)
965 /* binary search (cf Knuth) */
968 while (m_min
<= m_max
) {
969 m
= (m_min
+ m_max
) >> 1;
971 v
= (unsigned long)tb
->tc_ptr
;
974 else if (tc_ptr
< v
) {
983 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
985 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
987 TranslationBlock
*tb1
, *tb_next
, **ptb
;
990 tb1
= tb
->jmp_next
[n
];
992 /* find head of list */
995 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
998 tb1
= tb1
->jmp_next
[n1
];
1000 /* we are now sure now that tb jumps to tb1 */
1003 /* remove tb from the jmp_first list */
1004 ptb
= &tb_next
->jmp_first
;
1008 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1009 if (n1
== n
&& tb1
== tb
)
1011 ptb
= &tb1
->jmp_next
[n1
];
1013 *ptb
= tb
->jmp_next
[n
];
1014 tb
->jmp_next
[n
] = NULL
;
1016 /* suppress the jump to next tb in generated code */
1017 tb_reset_jump(tb
, n
);
1019 /* suppress jumps in the tb on which we could have jumped */
1020 tb_reset_jump_recursive(tb_next
);
1024 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1026 tb_reset_jump_recursive2(tb
, 0);
1027 tb_reset_jump_recursive2(tb
, 1);
1030 #if defined(TARGET_HAS_ICE)
1031 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1033 target_phys_addr_t addr
;
1035 ram_addr_t ram_addr
;
1038 addr
= cpu_get_phys_page_debug(env
, pc
);
1039 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1041 pd
= IO_MEM_UNASSIGNED
;
1043 pd
= p
->phys_offset
;
1045 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1046 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1050 /* Add a watchpoint. */
1051 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1055 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1056 if (addr
== env
->watchpoint
[i
].vaddr
)
1059 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1062 i
= env
->nb_watchpoints
++;
1063 env
->watchpoint
[i
].vaddr
= addr
;
1064 tlb_flush_page(env
, addr
);
1065 /* FIXME: This flush is needed because of the hack to make memory ops
1066 terminate the TB. It can be removed once the proper IO trap and
1067 re-execute bits are in. */
1072 /* Remove a watchpoint. */
1073 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1077 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1078 if (addr
== env
->watchpoint
[i
].vaddr
) {
1079 env
->nb_watchpoints
--;
1080 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1081 tlb_flush_page(env
, addr
);
1088 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1089 breakpoint is reached */
1090 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1092 #if defined(TARGET_HAS_ICE)
1095 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1096 if (env
->breakpoints
[i
] == pc
)
1100 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1102 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1104 breakpoint_invalidate(env
, pc
);
1111 /* remove a breakpoint */
1112 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1114 #if defined(TARGET_HAS_ICE)
1116 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1117 if (env
->breakpoints
[i
] == pc
)
1122 env
->nb_breakpoints
--;
1123 if (i
< env
->nb_breakpoints
)
1124 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1126 breakpoint_invalidate(env
, pc
);
1133 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1134 CPU loop after each instruction */
1135 void cpu_single_step(CPUState
*env
, int enabled
)
1137 #if defined(TARGET_HAS_ICE)
1138 if (env
->singlestep_enabled
!= enabled
) {
1139 env
->singlestep_enabled
= enabled
;
1140 /* must flush all the translated code to avoid inconsistancies */
1141 /* XXX: only flush what is necessary */
1147 /* enable or disable low levels log */
1148 void cpu_set_log(int log_flags
)
1150 loglevel
= log_flags
;
1151 if (loglevel
&& !logfile
) {
1152 logfile
= fopen(logfilename
, "w");
1154 perror(logfilename
);
1157 #if !defined(CONFIG_SOFTMMU)
1158 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1160 static uint8_t logfile_buf
[4096];
1161 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1164 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1169 void cpu_set_log_filename(const char *filename
)
1171 logfilename
= strdup(filename
);
1174 /* mask must never be zero, except for A20 change call */
1175 void cpu_interrupt(CPUState
*env
, int mask
)
1177 TranslationBlock
*tb
;
1178 static int interrupt_lock
;
1180 env
->interrupt_request
|= mask
;
1181 /* if the cpu is currently executing code, we must unlink it and
1182 all the potentially executing TB */
1183 tb
= env
->current_tb
;
1184 if (tb
&& !testandset(&interrupt_lock
)) {
1185 env
->current_tb
= NULL
;
1186 tb_reset_jump_recursive(tb
);
1191 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1193 env
->interrupt_request
&= ~mask
;
1196 CPULogItem cpu_log_items
[] = {
1197 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1198 "show generated host assembly code for each compiled TB" },
1199 { CPU_LOG_TB_IN_ASM
, "in_asm",
1200 "show target assembly code for each compiled TB" },
1201 { CPU_LOG_TB_OP
, "op",
1202 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1204 { CPU_LOG_TB_OP_OPT
, "op_opt",
1205 "show micro ops after optimization for each compiled TB" },
1207 { CPU_LOG_INT
, "int",
1208 "show interrupts/exceptions in short format" },
1209 { CPU_LOG_EXEC
, "exec",
1210 "show trace before each executed TB (lots of logs)" },
1211 { CPU_LOG_TB_CPU
, "cpu",
1212 "show CPU state before bloc translation" },
1214 { CPU_LOG_PCALL
, "pcall",
1215 "show protected mode far calls/returns/exceptions" },
1218 { CPU_LOG_IOPORT
, "ioport",
1219 "show all i/o ports accesses" },
1224 static int cmp1(const char *s1
, int n
, const char *s2
)
1226 if (strlen(s2
) != n
)
1228 return memcmp(s1
, s2
, n
) == 0;
1231 /* takes a comma separated list of log masks. Return 0 if error. */
1232 int cpu_str_to_log_mask(const char *str
)
1241 p1
= strchr(p
, ',');
1244 if(cmp1(p
,p1
-p
,"all")) {
1245 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1249 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1250 if (cmp1(p
, p1
- p
, item
->name
))
1264 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1269 fprintf(stderr
, "qemu: fatal: ");
1270 vfprintf(stderr
, fmt
, ap
);
1271 fprintf(stderr
, "\n");
1273 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1275 cpu_dump_state(env
, stderr
, fprintf
, 0);
1281 CPUState
*cpu_copy(CPUState
*env
)
1283 CPUState
*new_env
= cpu_init();
1284 /* preserve chaining and index */
1285 CPUState
*next_cpu
= new_env
->next_cpu
;
1286 int cpu_index
= new_env
->cpu_index
;
1287 memcpy(new_env
, env
, sizeof(CPUState
));
1288 new_env
->next_cpu
= next_cpu
;
1289 new_env
->cpu_index
= cpu_index
;
1293 #if !defined(CONFIG_USER_ONLY)
1295 /* NOTE: if flush_global is true, also flush global entries (not
1297 void tlb_flush(CPUState
*env
, int flush_global
)
1301 #if defined(DEBUG_TLB)
1302 printf("tlb_flush:\n");
1304 /* must reset current TB so that interrupts cannot modify the
1305 links while we are modifying them */
1306 env
->current_tb
= NULL
;
1308 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1309 env
->tlb_table
[0][i
].addr_read
= -1;
1310 env
->tlb_table
[0][i
].addr_write
= -1;
1311 env
->tlb_table
[0][i
].addr_code
= -1;
1312 env
->tlb_table
[1][i
].addr_read
= -1;
1313 env
->tlb_table
[1][i
].addr_write
= -1;
1314 env
->tlb_table
[1][i
].addr_code
= -1;
1315 #if (NB_MMU_MODES >= 3)
1316 env
->tlb_table
[2][i
].addr_read
= -1;
1317 env
->tlb_table
[2][i
].addr_write
= -1;
1318 env
->tlb_table
[2][i
].addr_code
= -1;
1319 #if (NB_MMU_MODES == 4)
1320 env
->tlb_table
[3][i
].addr_read
= -1;
1321 env
->tlb_table
[3][i
].addr_write
= -1;
1322 env
->tlb_table
[3][i
].addr_code
= -1;
1327 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1329 #if !defined(CONFIG_SOFTMMU)
1330 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1333 if (env
->kqemu_enabled
) {
1334 kqemu_flush(env
, flush_global
);
1340 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1342 if (addr
== (tlb_entry
->addr_read
&
1343 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1344 addr
== (tlb_entry
->addr_write
&
1345 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1346 addr
== (tlb_entry
->addr_code
&
1347 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1348 tlb_entry
->addr_read
= -1;
1349 tlb_entry
->addr_write
= -1;
1350 tlb_entry
->addr_code
= -1;
1354 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1357 TranslationBlock
*tb
;
1359 #if defined(DEBUG_TLB)
1360 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1362 /* must reset current TB so that interrupts cannot modify the
1363 links while we are modifying them */
1364 env
->current_tb
= NULL
;
1366 addr
&= TARGET_PAGE_MASK
;
1367 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1368 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1369 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1370 #if (NB_MMU_MODES >= 3)
1371 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1372 #if (NB_MMU_MODES == 4)
1373 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1377 /* Discard jump cache entries for any tb which might potentially
1378 overlap the flushed page. */
1379 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1380 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1382 i
= tb_jmp_cache_hash_page(addr
);
1383 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1385 #if !defined(CONFIG_SOFTMMU)
1386 if (addr
< MMAP_AREA_END
)
1387 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1390 if (env
->kqemu_enabled
) {
1391 kqemu_flush_page(env
, addr
);
1396 /* update the TLBs so that writes to code in the virtual page 'addr'
1398 static void tlb_protect_code(ram_addr_t ram_addr
)
1400 cpu_physical_memory_reset_dirty(ram_addr
,
1401 ram_addr
+ TARGET_PAGE_SIZE
,
1405 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1406 tested for self modifying code */
1407 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1410 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1413 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1414 unsigned long start
, unsigned long length
)
1417 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1418 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1419 if ((addr
- start
) < length
) {
1420 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1425 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1429 unsigned long length
, start1
;
1433 start
&= TARGET_PAGE_MASK
;
1434 end
= TARGET_PAGE_ALIGN(end
);
1436 length
= end
- start
;
1439 len
= length
>> TARGET_PAGE_BITS
;
1441 /* XXX: should not depend on cpu context */
1443 if (env
->kqemu_enabled
) {
1446 for(i
= 0; i
< len
; i
++) {
1447 kqemu_set_notdirty(env
, addr
);
1448 addr
+= TARGET_PAGE_SIZE
;
1452 mask
= ~dirty_flags
;
1453 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1454 for(i
= 0; i
< len
; i
++)
1457 /* we modify the TLB cache so that the dirty bit will be set again
1458 when accessing the range */
1459 start1
= start
+ (unsigned long)phys_ram_base
;
1460 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1461 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1462 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1463 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1464 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1465 #if (NB_MMU_MODES >= 3)
1466 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1467 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1468 #if (NB_MMU_MODES == 4)
1469 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1470 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1475 #if !defined(CONFIG_SOFTMMU)
1476 /* XXX: this is expensive */
1482 for(i
= 0; i
< L1_SIZE
; i
++) {
1485 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1486 for(j
= 0; j
< L2_SIZE
; j
++) {
1487 if (p
->valid_tag
== virt_valid_tag
&&
1488 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1489 (p
->prot
& PROT_WRITE
)) {
1490 if (addr
< MMAP_AREA_END
) {
1491 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1492 p
->prot
& ~PROT_WRITE
);
1495 addr
+= TARGET_PAGE_SIZE
;
1504 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1506 ram_addr_t ram_addr
;
1508 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1509 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1510 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1511 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1512 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1517 /* update the TLB according to the current state of the dirty bits */
1518 void cpu_tlb_update_dirty(CPUState
*env
)
1521 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1522 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1523 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1524 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1525 #if (NB_MMU_MODES >= 3)
1526 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1527 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1528 #if (NB_MMU_MODES == 4)
1529 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1530 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1535 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1536 unsigned long start
)
1539 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1540 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1541 if (addr
== start
) {
1542 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1547 /* update the TLB corresponding to virtual page vaddr and phys addr
1548 addr so that it is no longer dirty */
1549 static inline void tlb_set_dirty(CPUState
*env
,
1550 unsigned long addr
, target_ulong vaddr
)
1554 addr
&= TARGET_PAGE_MASK
;
1555 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1556 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1557 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1558 #if (NB_MMU_MODES >= 3)
1559 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1560 #if (NB_MMU_MODES == 4)
1561 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1566 /* add a new TLB entry. At most one entry for a given virtual address
1567 is permitted. Return 0 if OK or 2 if the page could not be mapped
1568 (can only happen in non SOFTMMU mode for I/O pages or pages
1569 conflicting with the host address space). */
1570 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1571 target_phys_addr_t paddr
, int prot
,
1572 int is_user
, int is_softmmu
)
1577 target_ulong address
;
1578 target_phys_addr_t addend
;
1583 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1585 pd
= IO_MEM_UNASSIGNED
;
1587 pd
= p
->phys_offset
;
1589 #if defined(DEBUG_TLB)
1590 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1591 vaddr
, (int)paddr
, prot
, is_user
, is_softmmu
, pd
);
1595 #if !defined(CONFIG_SOFTMMU)
1599 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1600 /* IO memory case */
1601 address
= vaddr
| pd
;
1604 /* standard memory */
1606 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1609 /* Make accesses to pages with watchpoints go via the
1610 watchpoint trap routines. */
1611 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1612 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1613 if (address
& ~TARGET_PAGE_MASK
) {
1614 env
->watchpoint
[i
].is_ram
= 0;
1615 address
= vaddr
| io_mem_watch
;
1617 env
->watchpoint
[i
].is_ram
= 1;
1618 /* TODO: Figure out how to make read watchpoints coexist
1620 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1625 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1627 te
= &env
->tlb_table
[is_user
][index
];
1628 te
->addend
= addend
;
1629 if (prot
& PAGE_READ
) {
1630 te
->addr_read
= address
;
1634 if (prot
& PAGE_EXEC
) {
1635 te
->addr_code
= address
;
1639 if (prot
& PAGE_WRITE
) {
1640 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1641 (pd
& IO_MEM_ROMD
)) {
1642 /* write access calls the I/O callback */
1643 te
->addr_write
= vaddr
|
1644 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1645 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1646 !cpu_physical_memory_is_dirty(pd
)) {
1647 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1649 te
->addr_write
= address
;
1652 te
->addr_write
= -1;
1655 #if !defined(CONFIG_SOFTMMU)
1657 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1658 /* IO access: no mapping is done as it will be handled by the
1660 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1665 if (vaddr
>= MMAP_AREA_END
) {
1668 if (prot
& PROT_WRITE
) {
1669 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1670 #if defined(TARGET_HAS_SMC) || 1
1673 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1674 !cpu_physical_memory_is_dirty(pd
))) {
1675 /* ROM: we do as if code was inside */
1676 /* if code is present, we only map as read only and save the
1680 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1683 vp
->valid_tag
= virt_valid_tag
;
1684 prot
&= ~PAGE_WRITE
;
1687 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1688 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1689 if (map_addr
== MAP_FAILED
) {
1690 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1700 /* called from signal handler: invalidate the code and unprotect the
1701 page. Return TRUE if the fault was succesfully handled. */
1702 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1704 #if !defined(CONFIG_SOFTMMU)
1707 #if defined(DEBUG_TLB)
1708 printf("page_unprotect: addr=0x%08x\n", addr
);
1710 addr
&= TARGET_PAGE_MASK
;
1712 /* if it is not mapped, no need to worry here */
1713 if (addr
>= MMAP_AREA_END
)
1715 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1718 /* NOTE: in this case, validate_tag is _not_ tested as it
1719 validates only the code TLB */
1720 if (vp
->valid_tag
!= virt_valid_tag
)
1722 if (!(vp
->prot
& PAGE_WRITE
))
1724 #if defined(DEBUG_TLB)
1725 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1726 addr
, vp
->phys_addr
, vp
->prot
);
1728 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1729 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1730 (unsigned long)addr
, vp
->prot
);
1731 /* set the dirty bit */
1732 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1733 /* flush the code inside */
1734 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1743 void tlb_flush(CPUState
*env
, int flush_global
)
1747 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1751 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1752 target_phys_addr_t paddr
, int prot
,
1753 int is_user
, int is_softmmu
)
1758 /* dump memory mappings */
1759 void page_dump(FILE *f
)
1761 unsigned long start
, end
;
1762 int i
, j
, prot
, prot1
;
1765 fprintf(f
, "%-8s %-8s %-8s %s\n",
1766 "start", "end", "size", "prot");
1770 for(i
= 0; i
<= L1_SIZE
; i
++) {
1775 for(j
= 0;j
< L2_SIZE
; j
++) {
1780 if (prot1
!= prot
) {
1781 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1783 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1784 start
, end
, end
- start
,
1785 prot
& PAGE_READ
? 'r' : '-',
1786 prot
& PAGE_WRITE
? 'w' : '-',
1787 prot
& PAGE_EXEC
? 'x' : '-');
1801 int page_get_flags(target_ulong address
)
1805 p
= page_find(address
>> TARGET_PAGE_BITS
);
1811 /* modify the flags of a page and invalidate the code if
1812 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1813 depending on PAGE_WRITE */
1814 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1819 start
= start
& TARGET_PAGE_MASK
;
1820 end
= TARGET_PAGE_ALIGN(end
);
1821 if (flags
& PAGE_WRITE
)
1822 flags
|= PAGE_WRITE_ORG
;
1823 spin_lock(&tb_lock
);
1824 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1825 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1826 /* if the write protection is set, then we invalidate the code
1828 if (!(p
->flags
& PAGE_WRITE
) &&
1829 (flags
& PAGE_WRITE
) &&
1831 tb_invalidate_phys_page(addr
, 0, NULL
);
1835 spin_unlock(&tb_lock
);
1838 /* called from signal handler: invalidate the code and unprotect the
1839 page. Return TRUE if the fault was succesfully handled. */
1840 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1842 unsigned int page_index
, prot
, pindex
;
1844 target_ulong host_start
, host_end
, addr
;
1846 host_start
= address
& qemu_host_page_mask
;
1847 page_index
= host_start
>> TARGET_PAGE_BITS
;
1848 p1
= page_find(page_index
);
1851 host_end
= host_start
+ qemu_host_page_size
;
1854 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1858 /* if the page was really writable, then we change its
1859 protection back to writable */
1860 if (prot
& PAGE_WRITE_ORG
) {
1861 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1862 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1863 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1864 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1865 p1
[pindex
].flags
|= PAGE_WRITE
;
1866 /* and since the content will be modified, we must invalidate
1867 the corresponding translated code. */
1868 tb_invalidate_phys_page(address
, pc
, puc
);
1869 #ifdef DEBUG_TB_CHECK
1870 tb_invalidate_check(address
);
1878 /* call this function when system calls directly modify a memory area */
1879 /* ??? This should be redundant now we have lock_user. */
1880 void page_unprotect_range(target_ulong data
, target_ulong data_size
)
1882 target_ulong start
, end
, addr
;
1885 end
= start
+ data_size
;
1886 start
&= TARGET_PAGE_MASK
;
1887 end
= TARGET_PAGE_ALIGN(end
);
1888 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1889 page_unprotect(addr
, 0, NULL
);
1893 static inline void tlb_set_dirty(CPUState
*env
,
1894 unsigned long addr
, target_ulong vaddr
)
1897 #endif /* defined(CONFIG_USER_ONLY) */
1899 /* register physical memory. 'size' must be a multiple of the target
1900 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1902 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1904 unsigned long phys_offset
)
1906 target_phys_addr_t addr
, end_addr
;
1910 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1911 end_addr
= start_addr
+ size
;
1912 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1913 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1914 p
->phys_offset
= phys_offset
;
1915 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
1916 (phys_offset
& IO_MEM_ROMD
))
1917 phys_offset
+= TARGET_PAGE_SIZE
;
1920 /* since each CPU stores ram addresses in its TLB cache, we must
1921 reset the modified entries */
1923 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1928 /* XXX: temporary until new memory mapping API */
1929 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
1933 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1935 return IO_MEM_UNASSIGNED
;
1936 return p
->phys_offset
;
1939 /* XXX: better than nothing */
1940 ram_addr_t
qemu_ram_alloc(unsigned int size
)
1943 if ((phys_ram_alloc_offset
+ size
) >= phys_ram_size
) {
1944 fprintf(stderr
, "Not enough memory (requested_size = %u, max memory = %d)\n",
1945 size
, phys_ram_size
);
1948 addr
= phys_ram_alloc_offset
;
1949 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
1953 void qemu_ram_free(ram_addr_t addr
)
1957 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1959 #ifdef DEBUG_UNASSIGNED
1960 printf("Unassigned mem read 0x%08x\n", (int)addr
);
1965 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1967 #ifdef DEBUG_UNASSIGNED
1968 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr
, val
);
1972 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1973 unassigned_mem_readb
,
1974 unassigned_mem_readb
,
1975 unassigned_mem_readb
,
1978 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1979 unassigned_mem_writeb
,
1980 unassigned_mem_writeb
,
1981 unassigned_mem_writeb
,
1984 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1986 unsigned long ram_addr
;
1988 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1989 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1990 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1991 #if !defined(CONFIG_USER_ONLY)
1992 tb_invalidate_phys_page_fast(ram_addr
, 1);
1993 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1996 stb_p((uint8_t *)(long)addr
, val
);
1998 if (cpu_single_env
->kqemu_enabled
&&
1999 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2000 kqemu_modify_page(cpu_single_env
, ram_addr
);
2002 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2003 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2004 /* we remove the notdirty callback only if the code has been
2006 if (dirty_flags
== 0xff)
2007 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2010 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2012 unsigned long ram_addr
;
2014 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2015 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2016 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2017 #if !defined(CONFIG_USER_ONLY)
2018 tb_invalidate_phys_page_fast(ram_addr
, 2);
2019 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2022 stw_p((uint8_t *)(long)addr
, val
);
2024 if (cpu_single_env
->kqemu_enabled
&&
2025 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2026 kqemu_modify_page(cpu_single_env
, ram_addr
);
2028 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2029 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2030 /* we remove the notdirty callback only if the code has been
2032 if (dirty_flags
== 0xff)
2033 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2036 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2038 unsigned long ram_addr
;
2040 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2041 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2042 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2043 #if !defined(CONFIG_USER_ONLY)
2044 tb_invalidate_phys_page_fast(ram_addr
, 4);
2045 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2048 stl_p((uint8_t *)(long)addr
, val
);
2050 if (cpu_single_env
->kqemu_enabled
&&
2051 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2052 kqemu_modify_page(cpu_single_env
, ram_addr
);
2054 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2055 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2056 /* we remove the notdirty callback only if the code has been
2058 if (dirty_flags
== 0xff)
2059 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2062 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2063 NULL
, /* never used */
2064 NULL
, /* never used */
2065 NULL
, /* never used */
2068 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2069 notdirty_mem_writeb
,
2070 notdirty_mem_writew
,
2071 notdirty_mem_writel
,
2074 #if defined(CONFIG_SOFTMMU)
2075 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2076 so these check for a hit then pass through to the normal out-of-line
2078 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2080 return ldub_phys(addr
);
2083 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2085 return lduw_phys(addr
);
2088 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2090 return ldl_phys(addr
);
2093 /* Generate a debug exception if a watchpoint has been hit.
2094 Returns the real physical address of the access. addr will be a host
2095 address in the is_ram case. */
2096 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2098 CPUState
*env
= cpu_single_env
;
2100 target_ulong retaddr
;
2104 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2105 watch
= env
->watchpoint
[i
].vaddr
;
2106 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2107 if (env
->watchpoint
[i
].is_ram
)
2108 retaddr
= addr
- (unsigned long)phys_ram_base
;
2109 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2110 cpu_single_env
->watchpoint_hit
= i
+ 1;
2111 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2119 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2122 addr
= check_watchpoint(addr
);
2123 stb_phys(addr
, val
);
2126 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2129 addr
= check_watchpoint(addr
);
2130 stw_phys(addr
, val
);
2133 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2136 addr
= check_watchpoint(addr
);
2137 stl_phys(addr
, val
);
2140 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2146 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2153 static void io_mem_init(void)
2155 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2156 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2157 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2160 #if defined(CONFIG_SOFTMMU)
2161 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2162 watch_mem_write
, NULL
);
2164 /* alloc dirty bits array */
2165 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2166 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2169 /* mem_read and mem_write are arrays of functions containing the
2170 function to access byte (index 0), word (index 1) and dword (index
2171 2). All functions must be supplied. If io_index is non zero, the
2172 corresponding io zone is modified. If it is zero, a new io zone is
2173 allocated. The return value can be used with
2174 cpu_register_physical_memory(). (-1) is returned if error. */
2175 int cpu_register_io_memory(int io_index
,
2176 CPUReadMemoryFunc
**mem_read
,
2177 CPUWriteMemoryFunc
**mem_write
,
2182 if (io_index
<= 0) {
2183 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2185 io_index
= io_mem_nb
++;
2187 if (io_index
>= IO_MEM_NB_ENTRIES
)
2191 for(i
= 0;i
< 3; i
++) {
2192 io_mem_read
[io_index
][i
] = mem_read
[i
];
2193 io_mem_write
[io_index
][i
] = mem_write
[i
];
2195 io_mem_opaque
[io_index
] = opaque
;
2196 return io_index
<< IO_MEM_SHIFT
;
2199 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2201 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2204 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2206 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2209 /* physical memory access (slow version, mainly for debug) */
2210 #if defined(CONFIG_USER_ONLY)
2211 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2212 int len
, int is_write
)
2219 page
= addr
& TARGET_PAGE_MASK
;
2220 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2223 flags
= page_get_flags(page
);
2224 if (!(flags
& PAGE_VALID
))
2227 if (!(flags
& PAGE_WRITE
))
2229 p
= lock_user(addr
, len
, 0);
2230 memcpy(p
, buf
, len
);
2231 unlock_user(p
, addr
, len
);
2233 if (!(flags
& PAGE_READ
))
2235 p
= lock_user(addr
, len
, 1);
2236 memcpy(buf
, p
, len
);
2237 unlock_user(p
, addr
, 0);
2246 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2247 int len
, int is_write
)
2252 target_phys_addr_t page
;
2257 page
= addr
& TARGET_PAGE_MASK
;
2258 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2261 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2263 pd
= IO_MEM_UNASSIGNED
;
2265 pd
= p
->phys_offset
;
2269 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2270 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2271 /* XXX: could force cpu_single_env to NULL to avoid
2273 if (l
>= 4 && ((addr
& 3) == 0)) {
2274 /* 32 bit write access */
2276 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2278 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2279 /* 16 bit write access */
2281 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2284 /* 8 bit write access */
2286 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2290 unsigned long addr1
;
2291 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2293 ptr
= phys_ram_base
+ addr1
;
2294 memcpy(ptr
, buf
, l
);
2295 if (!cpu_physical_memory_is_dirty(addr1
)) {
2296 /* invalidate code */
2297 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2299 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2300 (0xff & ~CODE_DIRTY_FLAG
);
2304 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2305 !(pd
& IO_MEM_ROMD
)) {
2307 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2308 if (l
>= 4 && ((addr
& 3) == 0)) {
2309 /* 32 bit read access */
2310 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2313 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2314 /* 16 bit read access */
2315 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2319 /* 8 bit read access */
2320 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2326 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2327 (addr
& ~TARGET_PAGE_MASK
);
2328 memcpy(buf
, ptr
, l
);
2337 /* used for ROM loading : can write in RAM and ROM */
2338 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2339 const uint8_t *buf
, int len
)
2343 target_phys_addr_t page
;
2348 page
= addr
& TARGET_PAGE_MASK
;
2349 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2352 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2354 pd
= IO_MEM_UNASSIGNED
;
2356 pd
= p
->phys_offset
;
2359 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2360 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2361 !(pd
& IO_MEM_ROMD
)) {
2364 unsigned long addr1
;
2365 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2367 ptr
= phys_ram_base
+ addr1
;
2368 memcpy(ptr
, buf
, l
);
2377 /* warning: addr must be aligned */
2378 uint32_t ldl_phys(target_phys_addr_t addr
)
2386 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2388 pd
= IO_MEM_UNASSIGNED
;
2390 pd
= p
->phys_offset
;
2393 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2394 !(pd
& IO_MEM_ROMD
)) {
2396 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2397 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2400 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2401 (addr
& ~TARGET_PAGE_MASK
);
2407 /* warning: addr must be aligned */
2408 uint64_t ldq_phys(target_phys_addr_t addr
)
2416 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2418 pd
= IO_MEM_UNASSIGNED
;
2420 pd
= p
->phys_offset
;
2423 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2424 !(pd
& IO_MEM_ROMD
)) {
2426 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2427 #ifdef TARGET_WORDS_BIGENDIAN
2428 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2429 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2431 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2432 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2436 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2437 (addr
& ~TARGET_PAGE_MASK
);
2444 uint32_t ldub_phys(target_phys_addr_t addr
)
2447 cpu_physical_memory_read(addr
, &val
, 1);
2452 uint32_t lduw_phys(target_phys_addr_t addr
)
2455 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2456 return tswap16(val
);
2459 /* warning: addr must be aligned. The ram page is not masked as dirty
2460 and the code inside is not invalidated. It is useful if the dirty
2461 bits are used to track modified PTEs */
2462 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2469 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2471 pd
= IO_MEM_UNASSIGNED
;
2473 pd
= p
->phys_offset
;
2476 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2477 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2478 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2480 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2481 (addr
& ~TARGET_PAGE_MASK
);
2486 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2493 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2495 pd
= IO_MEM_UNASSIGNED
;
2497 pd
= p
->phys_offset
;
2500 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2501 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2502 #ifdef TARGET_WORDS_BIGENDIAN
2503 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2504 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2506 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2507 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2510 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2511 (addr
& ~TARGET_PAGE_MASK
);
2516 /* warning: addr must be aligned */
2517 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2524 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2526 pd
= IO_MEM_UNASSIGNED
;
2528 pd
= p
->phys_offset
;
2531 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2532 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2533 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2535 unsigned long addr1
;
2536 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2538 ptr
= phys_ram_base
+ addr1
;
2540 if (!cpu_physical_memory_is_dirty(addr1
)) {
2541 /* invalidate code */
2542 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2544 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2545 (0xff & ~CODE_DIRTY_FLAG
);
2551 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2554 cpu_physical_memory_write(addr
, &v
, 1);
2558 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2560 uint16_t v
= tswap16(val
);
2561 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2565 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2568 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2573 /* virtual memory access for debug */
2574 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2575 uint8_t *buf
, int len
, int is_write
)
2578 target_phys_addr_t phys_addr
;
2582 page
= addr
& TARGET_PAGE_MASK
;
2583 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2584 /* if no physical page mapped, return an error */
2585 if (phys_addr
== -1)
2587 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2590 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2599 void dump_exec_info(FILE *f
,
2600 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2602 int i
, target_code_size
, max_target_code_size
;
2603 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2604 TranslationBlock
*tb
;
2606 target_code_size
= 0;
2607 max_target_code_size
= 0;
2609 direct_jmp_count
= 0;
2610 direct_jmp2_count
= 0;
2611 for(i
= 0; i
< nb_tbs
; i
++) {
2613 target_code_size
+= tb
->size
;
2614 if (tb
->size
> max_target_code_size
)
2615 max_target_code_size
= tb
->size
;
2616 if (tb
->page_addr
[1] != -1)
2618 if (tb
->tb_next_offset
[0] != 0xffff) {
2620 if (tb
->tb_next_offset
[1] != 0xffff) {
2621 direct_jmp2_count
++;
2625 /* XXX: avoid using doubles ? */
2626 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2627 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2628 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2629 max_target_code_size
);
2630 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2631 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2632 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2633 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2635 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2636 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2638 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2640 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2641 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2642 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2643 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2646 #if !defined(CONFIG_USER_ONLY)
2648 #define MMUSUFFIX _cmmu
2649 #define GETPC() NULL
2650 #define env cpu_single_env
2651 #define SOFTMMU_CODE_ACCESS
2654 #include "softmmu_template.h"
2657 #include "softmmu_template.h"
2660 #include "softmmu_template.h"
2663 #include "softmmu_template.h"