2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
52 #if !defined(CONFIG_USER_ONLY)
53 /* TB consistency checks only implemented for usermode emulation. */
57 /* threshold to flush the translated code buffer */
58 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60 #define SMC_BITMAP_USE_THRESHOLD 10
62 #define MMAP_AREA_START 0x00000000
63 #define MMAP_AREA_END 0xa8000000
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_PPC64)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 42
70 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
71 #define TARGET_PHYS_ADDR_SPACE_BITS 32
74 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
75 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
77 /* any access to the tbs or the page table must use this lock */
78 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
80 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
81 uint8_t *code_gen_ptr
;
85 uint8_t *phys_ram_base
;
86 uint8_t *phys_ram_dirty
;
87 static ram_addr_t phys_ram_alloc_offset
= 0;
90 /* current CPU in the current thread. It is only valid inside
92 CPUState
*cpu_single_env
;
94 typedef struct PageDesc
{
95 /* list of TBs intersecting this ram page */
96 TranslationBlock
*first_tb
;
97 /* in order to optimize self modifying code, we count the number
98 of lookups we do to a given page to use a bitmap */
99 unsigned int code_write_count
;
100 uint8_t *code_bitmap
;
101 #if defined(CONFIG_USER_ONLY)
106 typedef struct PhysPageDesc
{
107 /* offset in host memory of the page + io_index in the low 12 bits */
108 uint32_t phys_offset
;
112 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
114 #define L1_SIZE (1 << L1_BITS)
115 #define L2_SIZE (1 << L2_BITS)
117 static void io_mem_init(void);
119 unsigned long qemu_real_host_page_size
;
120 unsigned long qemu_host_page_bits
;
121 unsigned long qemu_host_page_size
;
122 unsigned long qemu_host_page_mask
;
124 /* XXX: for system emulation, it could just be an array */
125 static PageDesc
*l1_map
[L1_SIZE
];
126 PhysPageDesc
**l1_phys_map
;
128 /* io memory support */
129 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
130 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
131 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
132 static int io_mem_nb
;
133 #if defined(CONFIG_SOFTMMU)
134 static int io_mem_watch
;
138 char *logfilename
= "/tmp/qemu.log";
143 static int tlb_flush_count
;
144 static int tb_flush_count
;
145 static int tb_phys_invalidate_count
;
147 static void page_init(void)
149 /* NOTE: we can always suppose that qemu_host_page_size >=
153 SYSTEM_INFO system_info
;
156 GetSystemInfo(&system_info
);
157 qemu_real_host_page_size
= system_info
.dwPageSize
;
159 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
160 PAGE_EXECUTE_READWRITE
, &old_protect
);
163 qemu_real_host_page_size
= getpagesize();
165 unsigned long start
, end
;
167 start
= (unsigned long)code_gen_buffer
;
168 start
&= ~(qemu_real_host_page_size
- 1);
170 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
171 end
+= qemu_real_host_page_size
- 1;
172 end
&= ~(qemu_real_host_page_size
- 1);
174 mprotect((void *)start
, end
- start
,
175 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
179 if (qemu_host_page_size
== 0)
180 qemu_host_page_size
= qemu_real_host_page_size
;
181 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
182 qemu_host_page_size
= TARGET_PAGE_SIZE
;
183 qemu_host_page_bits
= 0;
184 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
185 qemu_host_page_bits
++;
186 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
187 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
188 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
191 static inline PageDesc
*page_find_alloc(unsigned int index
)
195 lp
= &l1_map
[index
>> L2_BITS
];
198 /* allocate if not found */
199 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
200 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
203 return p
+ (index
& (L2_SIZE
- 1));
206 static inline PageDesc
*page_find(unsigned int index
)
210 p
= l1_map
[index
>> L2_BITS
];
213 return p
+ (index
& (L2_SIZE
- 1));
216 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
221 p
= (void **)l1_phys_map
;
222 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
224 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
225 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
227 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
230 /* allocate if not found */
233 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
234 memset(p
, 0, sizeof(void *) * L1_SIZE
);
238 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
242 /* allocate if not found */
245 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
247 for (i
= 0; i
< L2_SIZE
; i
++)
248 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
250 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
253 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
255 return phys_page_find_alloc(index
, 0);
258 #if !defined(CONFIG_USER_ONLY)
259 static void tlb_protect_code(ram_addr_t ram_addr
);
260 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
264 void cpu_exec_init(CPUState
*env
)
270 code_gen_ptr
= code_gen_buffer
;
274 env
->next_cpu
= NULL
;
277 while (*penv
!= NULL
) {
278 penv
= (CPUState
**)&(*penv
)->next_cpu
;
281 env
->cpu_index
= cpu_index
;
282 env
->nb_watchpoints
= 0;
286 static inline void invalidate_page_bitmap(PageDesc
*p
)
288 if (p
->code_bitmap
) {
289 qemu_free(p
->code_bitmap
);
290 p
->code_bitmap
= NULL
;
292 p
->code_write_count
= 0;
295 /* set to NULL all the 'first_tb' fields in all PageDescs */
296 static void page_flush_tb(void)
301 for(i
= 0; i
< L1_SIZE
; i
++) {
304 for(j
= 0; j
< L2_SIZE
; j
++) {
306 invalidate_page_bitmap(p
);
313 /* flush all the translation blocks */
314 /* XXX: tb_flush is currently not thread safe */
315 void tb_flush(CPUState
*env1
)
318 #if defined(DEBUG_FLUSH)
319 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
320 code_gen_ptr
- code_gen_buffer
,
322 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
326 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
327 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
330 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
333 code_gen_ptr
= code_gen_buffer
;
334 /* XXX: flush processor icache at this point if cache flush is
339 #ifdef DEBUG_TB_CHECK
341 static void tb_invalidate_check(unsigned long address
)
343 TranslationBlock
*tb
;
345 address
&= TARGET_PAGE_MASK
;
346 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
347 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
348 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
349 address
>= tb
->pc
+ tb
->size
)) {
350 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
351 address
, (long)tb
->pc
, tb
->size
);
357 /* verify that all the pages have correct rights for code */
358 static void tb_page_check(void)
360 TranslationBlock
*tb
;
361 int i
, flags1
, flags2
;
363 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
364 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
365 flags1
= page_get_flags(tb
->pc
);
366 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
367 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
368 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
369 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
375 void tb_jmp_check(TranslationBlock
*tb
)
377 TranslationBlock
*tb1
;
380 /* suppress any remaining jumps to this TB */
384 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
387 tb1
= tb1
->jmp_next
[n1
];
389 /* check end of list */
391 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
397 /* invalidate one TB */
398 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
401 TranslationBlock
*tb1
;
405 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
408 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
412 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
414 TranslationBlock
*tb1
;
420 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
422 *ptb
= tb1
->page_next
[n1
];
425 ptb
= &tb1
->page_next
[n1
];
429 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
431 TranslationBlock
*tb1
, **ptb
;
434 ptb
= &tb
->jmp_next
[n
];
437 /* find tb(n) in circular list */
441 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
442 if (n1
== n
&& tb1
== tb
)
445 ptb
= &tb1
->jmp_first
;
447 ptb
= &tb1
->jmp_next
[n1
];
450 /* now we can suppress tb(n) from the list */
451 *ptb
= tb
->jmp_next
[n
];
453 tb
->jmp_next
[n
] = NULL
;
457 /* reset the jump entry 'n' of a TB so that it is not chained to
459 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
461 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
464 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
469 target_ulong phys_pc
;
470 TranslationBlock
*tb1
, *tb2
;
472 /* remove the TB from the hash list */
473 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
474 h
= tb_phys_hash_func(phys_pc
);
475 tb_remove(&tb_phys_hash
[h
], tb
,
476 offsetof(TranslationBlock
, phys_hash_next
));
478 /* remove the TB from the page list */
479 if (tb
->page_addr
[0] != page_addr
) {
480 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
481 tb_page_remove(&p
->first_tb
, tb
);
482 invalidate_page_bitmap(p
);
484 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
485 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
486 tb_page_remove(&p
->first_tb
, tb
);
487 invalidate_page_bitmap(p
);
490 tb_invalidated_flag
= 1;
492 /* remove the TB from the hash list */
493 h
= tb_jmp_cache_hash_func(tb
->pc
);
494 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
495 if (env
->tb_jmp_cache
[h
] == tb
)
496 env
->tb_jmp_cache
[h
] = NULL
;
499 /* suppress this TB from the two jump lists */
500 tb_jmp_remove(tb
, 0);
501 tb_jmp_remove(tb
, 1);
503 /* suppress any remaining jumps to this TB */
509 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
510 tb2
= tb1
->jmp_next
[n1
];
511 tb_reset_jump(tb1
, n1
);
512 tb1
->jmp_next
[n1
] = NULL
;
515 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
517 tb_phys_invalidate_count
++;
520 static inline void set_bits(uint8_t *tab
, int start
, int len
)
526 mask
= 0xff << (start
& 7);
527 if ((start
& ~7) == (end
& ~7)) {
529 mask
&= ~(0xff << (end
& 7));
534 start
= (start
+ 8) & ~7;
536 while (start
< end1
) {
541 mask
= ~(0xff << (end
& 7));
547 static void build_page_bitmap(PageDesc
*p
)
549 int n
, tb_start
, tb_end
;
550 TranslationBlock
*tb
;
552 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
555 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
560 tb
= (TranslationBlock
*)((long)tb
& ~3);
561 /* NOTE: this is subtle as a TB may span two physical pages */
563 /* NOTE: tb_end may be after the end of the page, but
564 it is not a problem */
565 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
566 tb_end
= tb_start
+ tb
->size
;
567 if (tb_end
> TARGET_PAGE_SIZE
)
568 tb_end
= TARGET_PAGE_SIZE
;
571 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
573 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
574 tb
= tb
->page_next
[n
];
578 #ifdef TARGET_HAS_PRECISE_SMC
580 static void tb_gen_code(CPUState
*env
,
581 target_ulong pc
, target_ulong cs_base
, int flags
,
584 TranslationBlock
*tb
;
586 target_ulong phys_pc
, phys_page2
, virt_page2
;
589 phys_pc
= get_phys_addr_code(env
, pc
);
592 /* flush must be done */
594 /* cannot fail at this point */
597 tc_ptr
= code_gen_ptr
;
599 tb
->cs_base
= cs_base
;
602 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
603 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
605 /* check next page if needed */
606 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
608 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
609 phys_page2
= get_phys_addr_code(env
, virt_page2
);
611 tb_link_phys(tb
, phys_pc
, phys_page2
);
615 /* invalidate all TBs which intersect with the target physical page
616 starting in range [start;end[. NOTE: start and end must refer to
617 the same physical page. 'is_cpu_write_access' should be true if called
618 from a real cpu write access: the virtual CPU will exit the current
619 TB if code is modified inside this TB. */
620 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
621 int is_cpu_write_access
)
623 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
624 CPUState
*env
= cpu_single_env
;
626 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
627 target_ulong tb_start
, tb_end
;
628 target_ulong current_pc
, current_cs_base
;
630 p
= page_find(start
>> TARGET_PAGE_BITS
);
633 if (!p
->code_bitmap
&&
634 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
635 is_cpu_write_access
) {
636 /* build code bitmap */
637 build_page_bitmap(p
);
640 /* we remove all the TBs in the range [start, end[ */
641 /* XXX: see if in some cases it could be faster to invalidate all the code */
642 current_tb_not_found
= is_cpu_write_access
;
643 current_tb_modified
= 0;
644 current_tb
= NULL
; /* avoid warning */
645 current_pc
= 0; /* avoid warning */
646 current_cs_base
= 0; /* avoid warning */
647 current_flags
= 0; /* avoid warning */
651 tb
= (TranslationBlock
*)((long)tb
& ~3);
652 tb_next
= tb
->page_next
[n
];
653 /* NOTE: this is subtle as a TB may span two physical pages */
655 /* NOTE: tb_end may be after the end of the page, but
656 it is not a problem */
657 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
658 tb_end
= tb_start
+ tb
->size
;
660 tb_start
= tb
->page_addr
[1];
661 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
663 if (!(tb_end
<= start
|| tb_start
>= end
)) {
664 #ifdef TARGET_HAS_PRECISE_SMC
665 if (current_tb_not_found
) {
666 current_tb_not_found
= 0;
668 if (env
->mem_write_pc
) {
669 /* now we have a real cpu fault */
670 current_tb
= tb_find_pc(env
->mem_write_pc
);
673 if (current_tb
== tb
&&
674 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
675 /* If we are modifying the current TB, we must stop
676 its execution. We could be more precise by checking
677 that the modification is after the current PC, but it
678 would require a specialized function to partially
679 restore the CPU state */
681 current_tb_modified
= 1;
682 cpu_restore_state(current_tb
, env
,
683 env
->mem_write_pc
, NULL
);
684 #if defined(TARGET_I386)
685 current_flags
= env
->hflags
;
686 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
687 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
688 current_pc
= current_cs_base
+ env
->eip
;
690 #error unsupported CPU
693 #endif /* TARGET_HAS_PRECISE_SMC */
694 /* we need to do that to handle the case where a signal
695 occurs while doing tb_phys_invalidate() */
698 saved_tb
= env
->current_tb
;
699 env
->current_tb
= NULL
;
701 tb_phys_invalidate(tb
, -1);
703 env
->current_tb
= saved_tb
;
704 if (env
->interrupt_request
&& env
->current_tb
)
705 cpu_interrupt(env
, env
->interrupt_request
);
710 #if !defined(CONFIG_USER_ONLY)
711 /* if no code remaining, no need to continue to use slow writes */
713 invalidate_page_bitmap(p
);
714 if (is_cpu_write_access
) {
715 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
719 #ifdef TARGET_HAS_PRECISE_SMC
720 if (current_tb_modified
) {
721 /* we generate a block containing just the instruction
722 modifying the memory. It will ensure that it cannot modify
724 env
->current_tb
= NULL
;
725 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
727 cpu_resume_from_signal(env
, NULL
);
732 /* len must be <= 8 and start must be a multiple of len */
733 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
740 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
741 cpu_single_env
->mem_write_vaddr
, len
,
743 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
747 p
= page_find(start
>> TARGET_PAGE_BITS
);
750 if (p
->code_bitmap
) {
751 offset
= start
& ~TARGET_PAGE_MASK
;
752 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
753 if (b
& ((1 << len
) - 1))
757 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
761 #if !defined(CONFIG_SOFTMMU)
762 static void tb_invalidate_phys_page(target_ulong addr
,
763 unsigned long pc
, void *puc
)
765 int n
, current_flags
, current_tb_modified
;
766 target_ulong current_pc
, current_cs_base
;
768 TranslationBlock
*tb
, *current_tb
;
769 #ifdef TARGET_HAS_PRECISE_SMC
770 CPUState
*env
= cpu_single_env
;
773 addr
&= TARGET_PAGE_MASK
;
774 p
= page_find(addr
>> TARGET_PAGE_BITS
);
778 current_tb_modified
= 0;
780 current_pc
= 0; /* avoid warning */
781 current_cs_base
= 0; /* avoid warning */
782 current_flags
= 0; /* avoid warning */
783 #ifdef TARGET_HAS_PRECISE_SMC
785 current_tb
= tb_find_pc(pc
);
790 tb
= (TranslationBlock
*)((long)tb
& ~3);
791 #ifdef TARGET_HAS_PRECISE_SMC
792 if (current_tb
== tb
&&
793 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
794 /* If we are modifying the current TB, we must stop
795 its execution. We could be more precise by checking
796 that the modification is after the current PC, but it
797 would require a specialized function to partially
798 restore the CPU state */
800 current_tb_modified
= 1;
801 cpu_restore_state(current_tb
, env
, pc
, puc
);
802 #if defined(TARGET_I386)
803 current_flags
= env
->hflags
;
804 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
805 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
806 current_pc
= current_cs_base
+ env
->eip
;
808 #error unsupported CPU
811 #endif /* TARGET_HAS_PRECISE_SMC */
812 tb_phys_invalidate(tb
, addr
);
813 tb
= tb
->page_next
[n
];
816 #ifdef TARGET_HAS_PRECISE_SMC
817 if (current_tb_modified
) {
818 /* we generate a block containing just the instruction
819 modifying the memory. It will ensure that it cannot modify
821 env
->current_tb
= NULL
;
822 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
824 cpu_resume_from_signal(env
, puc
);
830 /* add the tb in the target page and protect it if necessary */
831 static inline void tb_alloc_page(TranslationBlock
*tb
,
832 unsigned int n
, target_ulong page_addr
)
835 TranslationBlock
*last_first_tb
;
837 tb
->page_addr
[n
] = page_addr
;
838 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
839 tb
->page_next
[n
] = p
->first_tb
;
840 last_first_tb
= p
->first_tb
;
841 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
842 invalidate_page_bitmap(p
);
844 #if defined(TARGET_HAS_SMC) || 1
846 #if defined(CONFIG_USER_ONLY)
847 if (p
->flags
& PAGE_WRITE
) {
852 /* force the host page as non writable (writes will have a
853 page fault + mprotect overhead) */
854 page_addr
&= qemu_host_page_mask
;
856 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
857 addr
+= TARGET_PAGE_SIZE
) {
859 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
863 p2
->flags
&= ~PAGE_WRITE
;
864 page_get_flags(addr
);
866 mprotect(g2h(page_addr
), qemu_host_page_size
,
867 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
868 #ifdef DEBUG_TB_INVALIDATE
869 printf("protecting code page: 0x%08lx\n",
874 /* if some code is already present, then the pages are already
875 protected. So we handle the case where only the first TB is
876 allocated in a physical page */
877 if (!last_first_tb
) {
878 tlb_protect_code(page_addr
);
882 #endif /* TARGET_HAS_SMC */
885 /* Allocate a new translation block. Flush the translation buffer if
886 too many translation blocks or too much generated code. */
887 TranslationBlock
*tb_alloc(target_ulong pc
)
889 TranslationBlock
*tb
;
891 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
892 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
900 /* add a new TB and link it to the physical page tables. phys_page2 is
901 (-1) to indicate that only one page contains the TB. */
902 void tb_link_phys(TranslationBlock
*tb
,
903 target_ulong phys_pc
, target_ulong phys_page2
)
906 TranslationBlock
**ptb
;
908 /* add in the physical hash table */
909 h
= tb_phys_hash_func(phys_pc
);
910 ptb
= &tb_phys_hash
[h
];
911 tb
->phys_hash_next
= *ptb
;
914 /* add in the page list */
915 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
916 if (phys_page2
!= -1)
917 tb_alloc_page(tb
, 1, phys_page2
);
919 tb
->page_addr
[1] = -1;
921 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
922 tb
->jmp_next
[0] = NULL
;
923 tb
->jmp_next
[1] = NULL
;
925 tb
->cflags
&= ~CF_FP_USED
;
926 if (tb
->cflags
& CF_TB_FP_USED
)
927 tb
->cflags
|= CF_FP_USED
;
930 /* init original jump addresses */
931 if (tb
->tb_next_offset
[0] != 0xffff)
932 tb_reset_jump(tb
, 0);
933 if (tb
->tb_next_offset
[1] != 0xffff)
934 tb_reset_jump(tb
, 1);
936 #ifdef DEBUG_TB_CHECK
941 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
942 tb[1].tc_ptr. Return NULL if not found */
943 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
947 TranslationBlock
*tb
;
951 if (tc_ptr
< (unsigned long)code_gen_buffer
||
952 tc_ptr
>= (unsigned long)code_gen_ptr
)
954 /* binary search (cf Knuth) */
957 while (m_min
<= m_max
) {
958 m
= (m_min
+ m_max
) >> 1;
960 v
= (unsigned long)tb
->tc_ptr
;
963 else if (tc_ptr
< v
) {
972 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
974 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
976 TranslationBlock
*tb1
, *tb_next
, **ptb
;
979 tb1
= tb
->jmp_next
[n
];
981 /* find head of list */
984 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
987 tb1
= tb1
->jmp_next
[n1
];
989 /* we are now sure now that tb jumps to tb1 */
992 /* remove tb from the jmp_first list */
993 ptb
= &tb_next
->jmp_first
;
997 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
998 if (n1
== n
&& tb1
== tb
)
1000 ptb
= &tb1
->jmp_next
[n1
];
1002 *ptb
= tb
->jmp_next
[n
];
1003 tb
->jmp_next
[n
] = NULL
;
1005 /* suppress the jump to next tb in generated code */
1006 tb_reset_jump(tb
, n
);
1008 /* suppress jumps in the tb on which we could have jumped */
1009 tb_reset_jump_recursive(tb_next
);
1013 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1015 tb_reset_jump_recursive2(tb
, 0);
1016 tb_reset_jump_recursive2(tb
, 1);
1019 #if defined(TARGET_HAS_ICE)
1020 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1022 target_ulong addr
, pd
;
1023 ram_addr_t ram_addr
;
1026 addr
= cpu_get_phys_page_debug(env
, pc
);
1027 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1029 pd
= IO_MEM_UNASSIGNED
;
1031 pd
= p
->phys_offset
;
1033 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1034 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1038 /* Add a watchpoint. */
1039 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1043 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1044 if (addr
== env
->watchpoint
[i
].vaddr
)
1047 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1050 i
= env
->nb_watchpoints
++;
1051 env
->watchpoint
[i
].vaddr
= addr
;
1052 tlb_flush_page(env
, addr
);
1053 /* FIXME: This flush is needed because of the hack to make memory ops
1054 terminate the TB. It can be removed once the proper IO trap and
1055 re-execute bits are in. */
1060 /* Remove a watchpoint. */
1061 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1065 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1066 if (addr
== env
->watchpoint
[i
].vaddr
) {
1067 env
->nb_watchpoints
--;
1068 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1069 tlb_flush_page(env
, addr
);
1076 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1077 breakpoint is reached */
1078 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1080 #if defined(TARGET_HAS_ICE)
1083 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1084 if (env
->breakpoints
[i
] == pc
)
1088 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1090 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1092 breakpoint_invalidate(env
, pc
);
1099 /* remove a breakpoint */
1100 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1102 #if defined(TARGET_HAS_ICE)
1104 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1105 if (env
->breakpoints
[i
] == pc
)
1110 env
->nb_breakpoints
--;
1111 if (i
< env
->nb_breakpoints
)
1112 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1114 breakpoint_invalidate(env
, pc
);
1121 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1122 CPU loop after each instruction */
1123 void cpu_single_step(CPUState
*env
, int enabled
)
1125 #if defined(TARGET_HAS_ICE)
1126 if (env
->singlestep_enabled
!= enabled
) {
1127 env
->singlestep_enabled
= enabled
;
1128 /* must flush all the translated code to avoid inconsistancies */
1129 /* XXX: only flush what is necessary */
1135 /* enable or disable low levels log */
1136 void cpu_set_log(int log_flags
)
1138 loglevel
= log_flags
;
1139 if (loglevel
&& !logfile
) {
1140 logfile
= fopen(logfilename
, "w");
1142 perror(logfilename
);
1145 #if !defined(CONFIG_SOFTMMU)
1146 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1148 static uint8_t logfile_buf
[4096];
1149 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1152 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1157 void cpu_set_log_filename(const char *filename
)
1159 logfilename
= strdup(filename
);
1162 /* mask must never be zero, except for A20 change call */
1163 void cpu_interrupt(CPUState
*env
, int mask
)
1165 TranslationBlock
*tb
;
1166 static int interrupt_lock
;
1168 env
->interrupt_request
|= mask
;
1169 /* if the cpu is currently executing code, we must unlink it and
1170 all the potentially executing TB */
1171 tb
= env
->current_tb
;
1172 if (tb
&& !testandset(&interrupt_lock
)) {
1173 env
->current_tb
= NULL
;
1174 tb_reset_jump_recursive(tb
);
1179 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1181 env
->interrupt_request
&= ~mask
;
1184 CPULogItem cpu_log_items
[] = {
1185 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1186 "show generated host assembly code for each compiled TB" },
1187 { CPU_LOG_TB_IN_ASM
, "in_asm",
1188 "show target assembly code for each compiled TB" },
1189 { CPU_LOG_TB_OP
, "op",
1190 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1192 { CPU_LOG_TB_OP_OPT
, "op_opt",
1193 "show micro ops after optimization for each compiled TB" },
1195 { CPU_LOG_INT
, "int",
1196 "show interrupts/exceptions in short format" },
1197 { CPU_LOG_EXEC
, "exec",
1198 "show trace before each executed TB (lots of logs)" },
1199 { CPU_LOG_TB_CPU
, "cpu",
1200 "show CPU state before bloc translation" },
1202 { CPU_LOG_PCALL
, "pcall",
1203 "show protected mode far calls/returns/exceptions" },
1206 { CPU_LOG_IOPORT
, "ioport",
1207 "show all i/o ports accesses" },
1212 static int cmp1(const char *s1
, int n
, const char *s2
)
1214 if (strlen(s2
) != n
)
1216 return memcmp(s1
, s2
, n
) == 0;
1219 /* takes a comma separated list of log masks. Return 0 if error. */
1220 int cpu_str_to_log_mask(const char *str
)
1229 p1
= strchr(p
, ',');
1232 if(cmp1(p
,p1
-p
,"all")) {
1233 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1237 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1238 if (cmp1(p
, p1
- p
, item
->name
))
1252 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1257 fprintf(stderr
, "qemu: fatal: ");
1258 vfprintf(stderr
, fmt
, ap
);
1259 fprintf(stderr
, "\n");
1261 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1263 cpu_dump_state(env
, stderr
, fprintf
, 0);
1269 CPUState
*cpu_copy(CPUState
*env
)
1271 CPUState
*new_env
= cpu_init();
1272 /* preserve chaining and index */
1273 CPUState
*next_cpu
= new_env
->next_cpu
;
1274 int cpu_index
= new_env
->cpu_index
;
1275 memcpy(new_env
, env
, sizeof(CPUState
));
1276 new_env
->next_cpu
= next_cpu
;
1277 new_env
->cpu_index
= cpu_index
;
1281 #if !defined(CONFIG_USER_ONLY)
1283 /* NOTE: if flush_global is true, also flush global entries (not
1285 void tlb_flush(CPUState
*env
, int flush_global
)
1289 #if defined(DEBUG_TLB)
1290 printf("tlb_flush:\n");
1292 /* must reset current TB so that interrupts cannot modify the
1293 links while we are modifying them */
1294 env
->current_tb
= NULL
;
1296 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1297 env
->tlb_table
[0][i
].addr_read
= -1;
1298 env
->tlb_table
[0][i
].addr_write
= -1;
1299 env
->tlb_table
[0][i
].addr_code
= -1;
1300 env
->tlb_table
[1][i
].addr_read
= -1;
1301 env
->tlb_table
[1][i
].addr_write
= -1;
1302 env
->tlb_table
[1][i
].addr_code
= -1;
1305 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1307 #if !defined(CONFIG_SOFTMMU)
1308 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1311 if (env
->kqemu_enabled
) {
1312 kqemu_flush(env
, flush_global
);
1318 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1320 if (addr
== (tlb_entry
->addr_read
&
1321 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1322 addr
== (tlb_entry
->addr_write
&
1323 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1324 addr
== (tlb_entry
->addr_code
&
1325 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1326 tlb_entry
->addr_read
= -1;
1327 tlb_entry
->addr_write
= -1;
1328 tlb_entry
->addr_code
= -1;
1332 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1335 TranslationBlock
*tb
;
1337 #if defined(DEBUG_TLB)
1338 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1340 /* must reset current TB so that interrupts cannot modify the
1341 links while we are modifying them */
1342 env
->current_tb
= NULL
;
1344 addr
&= TARGET_PAGE_MASK
;
1345 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1346 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1347 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1349 /* Discard jump cache entries for any tb which might potentially
1350 overlap the flushed page. */
1351 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1352 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1354 i
= tb_jmp_cache_hash_page(addr
);
1355 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1357 #if !defined(CONFIG_SOFTMMU)
1358 if (addr
< MMAP_AREA_END
)
1359 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1362 if (env
->kqemu_enabled
) {
1363 kqemu_flush_page(env
, addr
);
1368 /* update the TLBs so that writes to code in the virtual page 'addr'
1370 static void tlb_protect_code(ram_addr_t ram_addr
)
1372 cpu_physical_memory_reset_dirty(ram_addr
,
1373 ram_addr
+ TARGET_PAGE_SIZE
,
1377 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1378 tested for self modifying code */
1379 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1382 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1385 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1386 unsigned long start
, unsigned long length
)
1389 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1390 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1391 if ((addr
- start
) < length
) {
1392 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1397 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1401 unsigned long length
, start1
;
1405 start
&= TARGET_PAGE_MASK
;
1406 end
= TARGET_PAGE_ALIGN(end
);
1408 length
= end
- start
;
1411 len
= length
>> TARGET_PAGE_BITS
;
1413 /* XXX: should not depend on cpu context */
1415 if (env
->kqemu_enabled
) {
1418 for(i
= 0; i
< len
; i
++) {
1419 kqemu_set_notdirty(env
, addr
);
1420 addr
+= TARGET_PAGE_SIZE
;
1424 mask
= ~dirty_flags
;
1425 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1426 for(i
= 0; i
< len
; i
++)
1429 /* we modify the TLB cache so that the dirty bit will be set again
1430 when accessing the range */
1431 start1
= start
+ (unsigned long)phys_ram_base
;
1432 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1433 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1434 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1435 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1436 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1439 #if !defined(CONFIG_SOFTMMU)
1440 /* XXX: this is expensive */
1446 for(i
= 0; i
< L1_SIZE
; i
++) {
1449 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1450 for(j
= 0; j
< L2_SIZE
; j
++) {
1451 if (p
->valid_tag
== virt_valid_tag
&&
1452 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1453 (p
->prot
& PROT_WRITE
)) {
1454 if (addr
< MMAP_AREA_END
) {
1455 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1456 p
->prot
& ~PROT_WRITE
);
1459 addr
+= TARGET_PAGE_SIZE
;
1468 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1470 ram_addr_t ram_addr
;
1472 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1473 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1474 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1475 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1476 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1481 /* update the TLB according to the current state of the dirty bits */
1482 void cpu_tlb_update_dirty(CPUState
*env
)
1485 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1486 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1487 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1488 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1491 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1492 unsigned long start
)
1495 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1496 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1497 if (addr
== start
) {
1498 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1503 /* update the TLB corresponding to virtual page vaddr and phys addr
1504 addr so that it is no longer dirty */
1505 static inline void tlb_set_dirty(CPUState
*env
,
1506 unsigned long addr
, target_ulong vaddr
)
1510 addr
&= TARGET_PAGE_MASK
;
1511 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1512 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1513 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1516 /* add a new TLB entry. At most one entry for a given virtual address
1517 is permitted. Return 0 if OK or 2 if the page could not be mapped
1518 (can only happen in non SOFTMMU mode for I/O pages or pages
1519 conflicting with the host address space). */
1520 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1521 target_phys_addr_t paddr
, int prot
,
1522 int is_user
, int is_softmmu
)
1527 target_ulong address
;
1528 target_phys_addr_t addend
;
1533 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1535 pd
= IO_MEM_UNASSIGNED
;
1537 pd
= p
->phys_offset
;
1539 #if defined(DEBUG_TLB)
1540 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1541 vaddr
, (int)paddr
, prot
, is_user
, is_softmmu
, pd
);
1545 #if !defined(CONFIG_SOFTMMU)
1549 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1550 /* IO memory case */
1551 address
= vaddr
| pd
;
1554 /* standard memory */
1556 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1559 /* Make accesses to pages with watchpoints go via the
1560 watchpoint trap routines. */
1561 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1562 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1563 if (address
& ~TARGET_PAGE_MASK
) {
1564 env
->watchpoint
[i
].is_ram
= 0;
1565 address
= vaddr
| io_mem_watch
;
1567 env
->watchpoint
[i
].is_ram
= 1;
1568 /* TODO: Figure out how to make read watchpoints coexist
1570 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1575 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1577 te
= &env
->tlb_table
[is_user
][index
];
1578 te
->addend
= addend
;
1579 if (prot
& PAGE_READ
) {
1580 te
->addr_read
= address
;
1584 if (prot
& PAGE_EXEC
) {
1585 te
->addr_code
= address
;
1589 if (prot
& PAGE_WRITE
) {
1590 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1591 (pd
& IO_MEM_ROMD
)) {
1592 /* write access calls the I/O callback */
1593 te
->addr_write
= vaddr
|
1594 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1595 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1596 !cpu_physical_memory_is_dirty(pd
)) {
1597 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1599 te
->addr_write
= address
;
1602 te
->addr_write
= -1;
1605 #if !defined(CONFIG_SOFTMMU)
1607 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1608 /* IO access: no mapping is done as it will be handled by the
1610 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1615 if (vaddr
>= MMAP_AREA_END
) {
1618 if (prot
& PROT_WRITE
) {
1619 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1620 #if defined(TARGET_HAS_SMC) || 1
1623 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1624 !cpu_physical_memory_is_dirty(pd
))) {
1625 /* ROM: we do as if code was inside */
1626 /* if code is present, we only map as read only and save the
1630 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1633 vp
->valid_tag
= virt_valid_tag
;
1634 prot
&= ~PAGE_WRITE
;
1637 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1638 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1639 if (map_addr
== MAP_FAILED
) {
1640 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1650 /* called from signal handler: invalidate the code and unprotect the
1651 page. Return TRUE if the fault was succesfully handled. */
1652 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1654 #if !defined(CONFIG_SOFTMMU)
1657 #if defined(DEBUG_TLB)
1658 printf("page_unprotect: addr=0x%08x\n", addr
);
1660 addr
&= TARGET_PAGE_MASK
;
1662 /* if it is not mapped, no need to worry here */
1663 if (addr
>= MMAP_AREA_END
)
1665 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1668 /* NOTE: in this case, validate_tag is _not_ tested as it
1669 validates only the code TLB */
1670 if (vp
->valid_tag
!= virt_valid_tag
)
1672 if (!(vp
->prot
& PAGE_WRITE
))
1674 #if defined(DEBUG_TLB)
1675 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1676 addr
, vp
->phys_addr
, vp
->prot
);
1678 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1679 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1680 (unsigned long)addr
, vp
->prot
);
1681 /* set the dirty bit */
1682 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1683 /* flush the code inside */
1684 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1693 void tlb_flush(CPUState
*env
, int flush_global
)
1697 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1701 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1702 target_phys_addr_t paddr
, int prot
,
1703 int is_user
, int is_softmmu
)
1708 /* dump memory mappings */
1709 void page_dump(FILE *f
)
1711 unsigned long start
, end
;
1712 int i
, j
, prot
, prot1
;
1715 fprintf(f
, "%-8s %-8s %-8s %s\n",
1716 "start", "end", "size", "prot");
1720 for(i
= 0; i
<= L1_SIZE
; i
++) {
1725 for(j
= 0;j
< L2_SIZE
; j
++) {
1730 if (prot1
!= prot
) {
1731 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1733 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1734 start
, end
, end
- start
,
1735 prot
& PAGE_READ
? 'r' : '-',
1736 prot
& PAGE_WRITE
? 'w' : '-',
1737 prot
& PAGE_EXEC
? 'x' : '-');
1751 int page_get_flags(target_ulong address
)
1755 p
= page_find(address
>> TARGET_PAGE_BITS
);
1761 /* modify the flags of a page and invalidate the code if
1762 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1763 depending on PAGE_WRITE */
1764 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1769 start
= start
& TARGET_PAGE_MASK
;
1770 end
= TARGET_PAGE_ALIGN(end
);
1771 if (flags
& PAGE_WRITE
)
1772 flags
|= PAGE_WRITE_ORG
;
1773 spin_lock(&tb_lock
);
1774 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1775 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1776 /* if the write protection is set, then we invalidate the code
1778 if (!(p
->flags
& PAGE_WRITE
) &&
1779 (flags
& PAGE_WRITE
) &&
1781 tb_invalidate_phys_page(addr
, 0, NULL
);
1785 spin_unlock(&tb_lock
);
1788 /* called from signal handler: invalidate the code and unprotect the
1789 page. Return TRUE if the fault was succesfully handled. */
1790 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1792 unsigned int page_index
, prot
, pindex
;
1794 target_ulong host_start
, host_end
, addr
;
1796 host_start
= address
& qemu_host_page_mask
;
1797 page_index
= host_start
>> TARGET_PAGE_BITS
;
1798 p1
= page_find(page_index
);
1801 host_end
= host_start
+ qemu_host_page_size
;
1804 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1808 /* if the page was really writable, then we change its
1809 protection back to writable */
1810 if (prot
& PAGE_WRITE_ORG
) {
1811 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1812 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1813 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1814 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1815 p1
[pindex
].flags
|= PAGE_WRITE
;
1816 /* and since the content will be modified, we must invalidate
1817 the corresponding translated code. */
1818 tb_invalidate_phys_page(address
, pc
, puc
);
1819 #ifdef DEBUG_TB_CHECK
1820 tb_invalidate_check(address
);
1828 /* call this function when system calls directly modify a memory area */
1829 /* ??? This should be redundant now we have lock_user. */
1830 void page_unprotect_range(target_ulong data
, target_ulong data_size
)
1832 target_ulong start
, end
, addr
;
1835 end
= start
+ data_size
;
1836 start
&= TARGET_PAGE_MASK
;
1837 end
= TARGET_PAGE_ALIGN(end
);
1838 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1839 page_unprotect(addr
, 0, NULL
);
1843 static inline void tlb_set_dirty(CPUState
*env
,
1844 unsigned long addr
, target_ulong vaddr
)
1847 #endif /* defined(CONFIG_USER_ONLY) */
1849 /* register physical memory. 'size' must be a multiple of the target
1850 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1852 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1854 unsigned long phys_offset
)
1856 target_phys_addr_t addr
, end_addr
;
1860 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1861 end_addr
= start_addr
+ size
;
1862 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1863 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1864 p
->phys_offset
= phys_offset
;
1865 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
1866 (phys_offset
& IO_MEM_ROMD
))
1867 phys_offset
+= TARGET_PAGE_SIZE
;
1870 /* since each CPU stores ram addresses in its TLB cache, we must
1871 reset the modified entries */
1873 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1878 /* XXX: temporary until new memory mapping API */
1879 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
1883 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1885 return IO_MEM_UNASSIGNED
;
1886 return p
->phys_offset
;
1889 /* XXX: better than nothing */
1890 ram_addr_t
qemu_ram_alloc(unsigned int size
)
1893 if ((phys_ram_alloc_offset
+ size
) >= phys_ram_size
) {
1894 fprintf(stderr
, "Not enough memory (requested_size = %u, max memory = %d)\n",
1895 size
, phys_ram_size
);
1898 addr
= phys_ram_alloc_offset
;
1899 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
1903 void qemu_ram_free(ram_addr_t addr
)
1907 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1909 #ifdef DEBUG_UNASSIGNED
1910 printf("Unassigned mem read 0x%08x\n", (int)addr
);
1915 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1917 #ifdef DEBUG_UNASSIGNED
1918 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr
, val
);
1922 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1923 unassigned_mem_readb
,
1924 unassigned_mem_readb
,
1925 unassigned_mem_readb
,
1928 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1929 unassigned_mem_writeb
,
1930 unassigned_mem_writeb
,
1931 unassigned_mem_writeb
,
1934 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1936 unsigned long ram_addr
;
1938 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1939 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1940 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1941 #if !defined(CONFIG_USER_ONLY)
1942 tb_invalidate_phys_page_fast(ram_addr
, 1);
1943 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1946 stb_p((uint8_t *)(long)addr
, val
);
1948 if (cpu_single_env
->kqemu_enabled
&&
1949 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1950 kqemu_modify_page(cpu_single_env
, ram_addr
);
1952 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1953 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1954 /* we remove the notdirty callback only if the code has been
1956 if (dirty_flags
== 0xff)
1957 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1960 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1962 unsigned long ram_addr
;
1964 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1965 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1966 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1967 #if !defined(CONFIG_USER_ONLY)
1968 tb_invalidate_phys_page_fast(ram_addr
, 2);
1969 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1972 stw_p((uint8_t *)(long)addr
, val
);
1974 if (cpu_single_env
->kqemu_enabled
&&
1975 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1976 kqemu_modify_page(cpu_single_env
, ram_addr
);
1978 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1979 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1980 /* we remove the notdirty callback only if the code has been
1982 if (dirty_flags
== 0xff)
1983 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1986 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1988 unsigned long ram_addr
;
1990 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1991 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1992 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1993 #if !defined(CONFIG_USER_ONLY)
1994 tb_invalidate_phys_page_fast(ram_addr
, 4);
1995 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1998 stl_p((uint8_t *)(long)addr
, val
);
2000 if (cpu_single_env
->kqemu_enabled
&&
2001 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2002 kqemu_modify_page(cpu_single_env
, ram_addr
);
2004 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2005 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2006 /* we remove the notdirty callback only if the code has been
2008 if (dirty_flags
== 0xff)
2009 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2012 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2013 NULL
, /* never used */
2014 NULL
, /* never used */
2015 NULL
, /* never used */
2018 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2019 notdirty_mem_writeb
,
2020 notdirty_mem_writew
,
2021 notdirty_mem_writel
,
2024 #if defined(CONFIG_SOFTMMU)
2025 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2026 so these check for a hit then pass through to the normal out-of-line
2028 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2030 return ldub_phys(addr
);
2033 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2035 return lduw_phys(addr
);
2038 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2040 return ldl_phys(addr
);
2043 /* Generate a debug exception if a watchpoint has been hit.
2044 Returns the real physical address of the access. addr will be a host
2045 address in the is_ram case. */
2046 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2048 CPUState
*env
= cpu_single_env
;
2050 target_ulong retaddr
;
2054 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2055 watch
= env
->watchpoint
[i
].vaddr
;
2056 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2057 if (env
->watchpoint
[i
].is_ram
)
2058 retaddr
= addr
- (unsigned long)phys_ram_base
;
2059 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2060 cpu_single_env
->watchpoint_hit
= i
+ 1;
2061 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2069 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2072 addr
= check_watchpoint(addr
);
2073 stb_phys(addr
, val
);
2076 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2079 addr
= check_watchpoint(addr
);
2080 stw_phys(addr
, val
);
2083 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2086 addr
= check_watchpoint(addr
);
2087 stl_phys(addr
, val
);
2090 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2096 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2103 static void io_mem_init(void)
2105 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2106 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2107 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2110 #if defined(CONFIG_SOFTMMU)
2111 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2112 watch_mem_write
, NULL
);
2114 /* alloc dirty bits array */
2115 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2116 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2119 /* mem_read and mem_write are arrays of functions containing the
2120 function to access byte (index 0), word (index 1) and dword (index
2121 2). All functions must be supplied. If io_index is non zero, the
2122 corresponding io zone is modified. If it is zero, a new io zone is
2123 allocated. The return value can be used with
2124 cpu_register_physical_memory(). (-1) is returned if error. */
2125 int cpu_register_io_memory(int io_index
,
2126 CPUReadMemoryFunc
**mem_read
,
2127 CPUWriteMemoryFunc
**mem_write
,
2132 if (io_index
<= 0) {
2133 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2135 io_index
= io_mem_nb
++;
2137 if (io_index
>= IO_MEM_NB_ENTRIES
)
2141 for(i
= 0;i
< 3; i
++) {
2142 io_mem_read
[io_index
][i
] = mem_read
[i
];
2143 io_mem_write
[io_index
][i
] = mem_write
[i
];
2145 io_mem_opaque
[io_index
] = opaque
;
2146 return io_index
<< IO_MEM_SHIFT
;
2149 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2151 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2154 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2156 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2159 /* physical memory access (slow version, mainly for debug) */
2160 #if defined(CONFIG_USER_ONLY)
2161 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2162 int len
, int is_write
)
2169 page
= addr
& TARGET_PAGE_MASK
;
2170 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2173 flags
= page_get_flags(page
);
2174 if (!(flags
& PAGE_VALID
))
2177 if (!(flags
& PAGE_WRITE
))
2179 p
= lock_user(addr
, len
, 0);
2180 memcpy(p
, buf
, len
);
2181 unlock_user(p
, addr
, len
);
2183 if (!(flags
& PAGE_READ
))
2185 p
= lock_user(addr
, len
, 1);
2186 memcpy(buf
, p
, len
);
2187 unlock_user(p
, addr
, 0);
2196 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2197 int len
, int is_write
)
2202 target_phys_addr_t page
;
2207 page
= addr
& TARGET_PAGE_MASK
;
2208 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2211 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2213 pd
= IO_MEM_UNASSIGNED
;
2215 pd
= p
->phys_offset
;
2219 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2220 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2221 /* XXX: could force cpu_single_env to NULL to avoid
2223 if (l
>= 4 && ((addr
& 3) == 0)) {
2224 /* 32 bit write access */
2226 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2228 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2229 /* 16 bit write access */
2231 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2234 /* 8 bit write access */
2236 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2240 unsigned long addr1
;
2241 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2243 ptr
= phys_ram_base
+ addr1
;
2244 memcpy(ptr
, buf
, l
);
2245 if (!cpu_physical_memory_is_dirty(addr1
)) {
2246 /* invalidate code */
2247 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2249 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2250 (0xff & ~CODE_DIRTY_FLAG
);
2254 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2255 !(pd
& IO_MEM_ROMD
)) {
2257 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2258 if (l
>= 4 && ((addr
& 3) == 0)) {
2259 /* 32 bit read access */
2260 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2263 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2264 /* 16 bit read access */
2265 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2269 /* 8 bit read access */
2270 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2276 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2277 (addr
& ~TARGET_PAGE_MASK
);
2278 memcpy(buf
, ptr
, l
);
2287 /* used for ROM loading : can write in RAM and ROM */
2288 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2289 const uint8_t *buf
, int len
)
2293 target_phys_addr_t page
;
2298 page
= addr
& TARGET_PAGE_MASK
;
2299 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2302 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2304 pd
= IO_MEM_UNASSIGNED
;
2306 pd
= p
->phys_offset
;
2309 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2310 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2311 !(pd
& IO_MEM_ROMD
)) {
2314 unsigned long addr1
;
2315 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2317 ptr
= phys_ram_base
+ addr1
;
2318 memcpy(ptr
, buf
, l
);
2327 /* warning: addr must be aligned */
2328 uint32_t ldl_phys(target_phys_addr_t addr
)
2336 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2338 pd
= IO_MEM_UNASSIGNED
;
2340 pd
= p
->phys_offset
;
2343 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2344 !(pd
& IO_MEM_ROMD
)) {
2346 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2347 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2350 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2351 (addr
& ~TARGET_PAGE_MASK
);
2357 /* warning: addr must be aligned */
2358 uint64_t ldq_phys(target_phys_addr_t addr
)
2366 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2368 pd
= IO_MEM_UNASSIGNED
;
2370 pd
= p
->phys_offset
;
2373 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2374 !(pd
& IO_MEM_ROMD
)) {
2376 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2377 #ifdef TARGET_WORDS_BIGENDIAN
2378 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2379 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2381 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2382 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2386 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2387 (addr
& ~TARGET_PAGE_MASK
);
2394 uint32_t ldub_phys(target_phys_addr_t addr
)
2397 cpu_physical_memory_read(addr
, &val
, 1);
2402 uint32_t lduw_phys(target_phys_addr_t addr
)
2405 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2406 return tswap16(val
);
2409 /* warning: addr must be aligned. The ram page is not masked as dirty
2410 and the code inside is not invalidated. It is useful if the dirty
2411 bits are used to track modified PTEs */
2412 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2419 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2421 pd
= IO_MEM_UNASSIGNED
;
2423 pd
= p
->phys_offset
;
2426 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2427 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2428 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2430 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2431 (addr
& ~TARGET_PAGE_MASK
);
2436 /* warning: addr must be aligned */
2437 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2444 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2446 pd
= IO_MEM_UNASSIGNED
;
2448 pd
= p
->phys_offset
;
2451 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2452 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2453 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2455 unsigned long addr1
;
2456 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2458 ptr
= phys_ram_base
+ addr1
;
2460 if (!cpu_physical_memory_is_dirty(addr1
)) {
2461 /* invalidate code */
2462 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2464 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2465 (0xff & ~CODE_DIRTY_FLAG
);
2471 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2474 cpu_physical_memory_write(addr
, &v
, 1);
2478 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2480 uint16_t v
= tswap16(val
);
2481 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2485 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2488 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2493 /* virtual memory access for debug */
2494 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2495 uint8_t *buf
, int len
, int is_write
)
2498 target_ulong page
, phys_addr
;
2501 page
= addr
& TARGET_PAGE_MASK
;
2502 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2503 /* if no physical page mapped, return an error */
2504 if (phys_addr
== -1)
2506 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2509 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2518 void dump_exec_info(FILE *f
,
2519 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2521 int i
, target_code_size
, max_target_code_size
;
2522 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2523 TranslationBlock
*tb
;
2525 target_code_size
= 0;
2526 max_target_code_size
= 0;
2528 direct_jmp_count
= 0;
2529 direct_jmp2_count
= 0;
2530 for(i
= 0; i
< nb_tbs
; i
++) {
2532 target_code_size
+= tb
->size
;
2533 if (tb
->size
> max_target_code_size
)
2534 max_target_code_size
= tb
->size
;
2535 if (tb
->page_addr
[1] != -1)
2537 if (tb
->tb_next_offset
[0] != 0xffff) {
2539 if (tb
->tb_next_offset
[1] != 0xffff) {
2540 direct_jmp2_count
++;
2544 /* XXX: avoid using doubles ? */
2545 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2546 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2547 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2548 max_target_code_size
);
2549 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2550 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2551 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2552 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2554 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2555 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2557 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2559 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2560 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2561 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2562 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2565 #if !defined(CONFIG_USER_ONLY)
2567 #define MMUSUFFIX _cmmu
2568 #define GETPC() NULL
2569 #define env cpu_single_env
2570 #define SOFTMMU_CODE_ACCESS
2573 #include "softmmu_template.h"
2576 #include "softmmu_template.h"
2579 #include "softmmu_template.h"
2582 #include "softmmu_template.h"