2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
37 #include "cache-utils.h"
39 #if !defined(TARGET_IA64)
47 #if defined(CONFIG_USER_ONLY)
52 //#define DEBUG_TB_INVALIDATE
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 static TranslationBlock
*tbs
;
72 int code_gen_max_blocks
;
73 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
75 /* any access to the tbs or the page table must use this lock */
76 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
78 #if defined(__arm__) || defined(__sparc_v9__)
79 /* The prologue must be reachable with a direct jump. ARM and Sparc64
80 have limited branch ranges (possibly also PPC) so place it in a
81 section close to code segment. */
82 #define code_gen_section \
83 __attribute__((__section__(".gen_code"))) \
84 __attribute__((aligned (32)))
86 /* Maximum alignment for Win32 is 16. */
87 #define code_gen_section \
88 __attribute__((aligned (16)))
90 #define code_gen_section \
91 __attribute__((aligned (32)))
94 uint8_t code_gen_prologue
[1024] code_gen_section
;
95 static uint8_t *code_gen_buffer
;
96 static unsigned long code_gen_buffer_size
;
97 /* threshold to flush the translated code buffer */
98 static unsigned long code_gen_buffer_max_size
;
99 uint8_t *code_gen_ptr
;
101 #if !defined(CONFIG_USER_ONLY)
103 uint8_t *phys_ram_dirty
;
104 static int in_migration
;
106 typedef struct RAMBlock
{
110 struct RAMBlock
*next
;
113 static RAMBlock
*ram_blocks
;
114 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
115 then we can no longer assume contiguous ram offsets, and external uses
116 of this variable will break. */
117 ram_addr_t last_ram_offset
;
121 /* current CPU in the current thread. It is only valid inside
123 CPUState
*cpu_single_env
;
124 /* 0 = Do not count executed instructions.
125 1 = Precise instruction counting.
126 2 = Adaptive rate instruction counting. */
128 /* Current instruction counter. While executing translated code this may
129 include some instructions that have not yet been executed. */
132 typedef struct PageDesc
{
133 /* list of TBs intersecting this ram page */
134 TranslationBlock
*first_tb
;
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count
;
138 uint8_t *code_bitmap
;
139 #if defined(CONFIG_USER_ONLY)
144 /* In system mode we want L1_MAP to be based on ram offsets,
145 while in user mode we want it to be based on virtual addresses. */
146 #if !defined(CONFIG_USER_ONLY)
147 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
150 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
156 /* Size of the L2 (and L3, etc) page tables. */
158 #define L2_SIZE (1 << L2_BITS)
160 /* The bits remaining after N lower levels of page tables. */
161 #define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163 #define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166 /* Size of the L1 page table. Avoid silly small sizes. */
167 #if P_L1_BITS_REM < 4
168 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
170 #define P_L1_BITS P_L1_BITS_REM
173 #if V_L1_BITS_REM < 4
174 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
176 #define V_L1_BITS V_L1_BITS_REM
179 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
182 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
185 unsigned long qemu_real_host_page_size
;
186 unsigned long qemu_host_page_bits
;
187 unsigned long qemu_host_page_size
;
188 unsigned long qemu_host_page_mask
;
190 /* This is a multi-level map on the virtual address space.
191 The bottom level has pointers to PageDesc. */
192 static void *l1_map
[V_L1_SIZE
];
194 #if !defined(CONFIG_USER_ONLY)
195 typedef struct PhysPageDesc
{
196 /* offset in host memory of the page + io_index in the low bits */
197 ram_addr_t phys_offset
;
198 ram_addr_t region_offset
;
201 /* This is a multi-level map on the physical address space.
202 The bottom level has pointers to PhysPageDesc. */
203 static void *l1_phys_map
[P_L1_SIZE
];
205 static void io_mem_init(void);
207 /* io memory support */
208 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
209 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
210 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
211 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
212 static int io_mem_watch
;
217 static const char *logfilename
= "qemu.log";
219 static const char *logfilename
= "/tmp/qemu.log";
223 static int log_append
= 0;
226 #if !defined(CONFIG_USER_ONLY)
227 static int tlb_flush_count
;
229 static int tb_flush_count
;
230 static int tb_phys_invalidate_count
;
233 static void map_exec(void *addr
, long size
)
236 VirtualProtect(addr
, size
,
237 PAGE_EXECUTE_READWRITE
, &old_protect
);
241 static void map_exec(void *addr
, long size
)
243 unsigned long start
, end
, page_size
;
245 page_size
= getpagesize();
246 start
= (unsigned long)addr
;
247 start
&= ~(page_size
- 1);
249 end
= (unsigned long)addr
+ size
;
250 end
+= page_size
- 1;
251 end
&= ~(page_size
- 1);
253 mprotect((void *)start
, end
- start
,
254 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
258 static void page_init(void)
260 /* NOTE: we can always suppose that qemu_host_page_size >=
264 SYSTEM_INFO system_info
;
266 GetSystemInfo(&system_info
);
267 qemu_real_host_page_size
= system_info
.dwPageSize
;
270 qemu_real_host_page_size
= getpagesize();
272 if (qemu_host_page_size
== 0)
273 qemu_host_page_size
= qemu_real_host_page_size
;
274 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
275 qemu_host_page_size
= TARGET_PAGE_SIZE
;
276 qemu_host_page_bits
= 0;
277 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
278 qemu_host_page_bits
++;
279 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
281 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
285 last_brk
= (unsigned long)sbrk(0);
287 f
= fopen("/proc/self/maps", "r");
292 unsigned long startaddr
, endaddr
;
295 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
297 if (n
== 2 && h2g_valid(startaddr
)) {
298 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
300 if (h2g_valid(endaddr
)) {
301 endaddr
= h2g(endaddr
);
305 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
316 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
322 #if defined(CONFIG_USER_ONLY)
323 /* We can't use qemu_malloc because it may recurse into a locked mutex.
324 Neither can we record the new pages we reserve while allocating a
325 given page because that may recurse into an unallocated page table
326 entry. Stuff the allocations we do make into a queue and process
327 them after having completed one entire page table allocation. */
329 unsigned long reserve
[2 * (V_L1_SHIFT
/ L2_BITS
)];
332 # define ALLOC(P, SIZE) \
334 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
335 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
336 if (h2g_valid(P)) { \
337 reserve[reserve_idx] = h2g(P); \
338 reserve[reserve_idx + 1] = SIZE; \
343 # define ALLOC(P, SIZE) \
344 do { P = qemu_mallocz(SIZE); } while (0)
347 /* Level 1. Always allocated. */
348 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
351 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
358 ALLOC(p
, sizeof(void *) * L2_SIZE
);
362 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
370 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
375 #if defined(CONFIG_USER_ONLY)
376 for (i
= 0; i
< reserve_idx
; i
+= 2) {
377 unsigned long addr
= reserve
[i
];
378 unsigned long len
= reserve
[i
+ 1];
380 page_set_flags(addr
& TARGET_PAGE_MASK
,
381 TARGET_PAGE_ALIGN(addr
+ len
),
386 return pd
+ (index
& (L2_SIZE
- 1));
389 static inline PageDesc
*page_find(tb_page_addr_t index
)
391 return page_find_alloc(index
, 0);
394 #if !defined(CONFIG_USER_ONLY)
395 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
401 /* Level 1. Always allocated. */
402 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
405 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
411 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
413 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
424 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
426 for (i
= 0; i
< L2_SIZE
; i
++) {
427 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
428 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
432 return pd
+ (index
& (L2_SIZE
- 1));
435 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
437 return phys_page_find_alloc(index
, 0);
440 static void tlb_protect_code(ram_addr_t ram_addr
);
441 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
443 #define mmap_lock() do { } while(0)
444 #define mmap_unlock() do { } while(0)
447 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
449 #if defined(CONFIG_USER_ONLY)
450 /* Currently it is not recommended to allocate big chunks of data in
451 user mode. It will change when a dedicated libc will be used */
452 #define USE_STATIC_CODE_GEN_BUFFER
455 #ifdef USE_STATIC_CODE_GEN_BUFFER
456 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
459 static void code_gen_alloc(unsigned long tb_size
)
464 #ifdef USE_STATIC_CODE_GEN_BUFFER
465 code_gen_buffer
= static_code_gen_buffer
;
466 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
467 map_exec(code_gen_buffer
, code_gen_buffer_size
);
469 code_gen_buffer_size
= tb_size
;
470 if (code_gen_buffer_size
== 0) {
471 #if defined(CONFIG_USER_ONLY)
472 /* in user mode, phys_ram_size is not meaningful */
473 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
475 /* XXX: needs adjustments */
476 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
479 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
480 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
481 /* The code gen buffer location may have constraints depending on
482 the host cpu and OS */
483 #if defined(__linux__)
488 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
489 #if defined(__x86_64__)
491 /* Cannot map more than that */
492 if (code_gen_buffer_size
> (800 * 1024 * 1024))
493 code_gen_buffer_size
= (800 * 1024 * 1024);
494 #elif defined(__sparc_v9__)
495 // Map the buffer below 2G, so we can use direct calls and branches
497 start
= (void *) 0x60000000UL
;
498 if (code_gen_buffer_size
> (512 * 1024 * 1024))
499 code_gen_buffer_size
= (512 * 1024 * 1024);
500 #elif defined(__arm__)
501 /* Map the buffer below 32M, so we can use direct calls and branches */
503 start
= (void *) 0x01000000UL
;
504 if (code_gen_buffer_size
> 16 * 1024 * 1024)
505 code_gen_buffer_size
= 16 * 1024 * 1024;
507 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
508 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
510 if (code_gen_buffer
== MAP_FAILED
) {
511 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
515 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
519 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
520 #if defined(__x86_64__)
521 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
522 * 0x40000000 is free */
524 addr
= (void *)0x40000000;
525 /* Cannot map more than that */
526 if (code_gen_buffer_size
> (800 * 1024 * 1024))
527 code_gen_buffer_size
= (800 * 1024 * 1024);
529 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
530 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
532 if (code_gen_buffer
== MAP_FAILED
) {
533 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
538 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
539 map_exec(code_gen_buffer
, code_gen_buffer_size
);
541 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
542 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
543 code_gen_buffer_max_size
= code_gen_buffer_size
-
544 code_gen_max_block_size();
545 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
546 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
549 /* Must be called before using the QEMU cpus. 'tb_size' is the size
550 (in bytes) allocated to the translation buffer. Zero means default
552 void cpu_exec_init_all(unsigned long tb_size
)
555 code_gen_alloc(tb_size
);
556 code_gen_ptr
= code_gen_buffer
;
558 #if !defined(CONFIG_USER_ONLY)
563 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
565 static int cpu_common_post_load(void *opaque
, int version_id
)
567 CPUState
*env
= opaque
;
569 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
570 version_id is increased. */
571 env
->interrupt_request
&= ~0x01;
577 static const VMStateDescription vmstate_cpu_common
= {
578 .name
= "cpu_common",
580 .minimum_version_id
= 1,
581 .minimum_version_id_old
= 1,
582 .post_load
= cpu_common_post_load
,
583 .fields
= (VMStateField
[]) {
584 VMSTATE_UINT32(halted
, CPUState
),
585 VMSTATE_UINT32(interrupt_request
, CPUState
),
586 VMSTATE_END_OF_LIST()
591 CPUState
*qemu_get_cpu(int cpu
)
593 CPUState
*env
= first_cpu
;
596 if (env
->cpu_index
== cpu
)
604 void cpu_exec_init(CPUState
*env
)
609 #if defined(CONFIG_USER_ONLY)
612 env
->next_cpu
= NULL
;
615 while (*penv
!= NULL
) {
616 penv
= &(*penv
)->next_cpu
;
619 env
->cpu_index
= cpu_index
;
621 QTAILQ_INIT(&env
->breakpoints
);
622 QTAILQ_INIT(&env
->watchpoints
);
624 env
->thread_id
= GetCurrentProcessId();
626 env
->thread_id
= getpid();
629 #if defined(CONFIG_USER_ONLY)
632 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
633 vmstate_register(cpu_index
, &vmstate_cpu_common
, env
);
634 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
635 cpu_save
, cpu_load
, env
);
639 static inline void invalidate_page_bitmap(PageDesc
*p
)
641 if (p
->code_bitmap
) {
642 qemu_free(p
->code_bitmap
);
643 p
->code_bitmap
= NULL
;
645 p
->code_write_count
= 0;
648 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
650 static void page_flush_tb_1 (int level
, void **lp
)
659 for (i
= 0; i
< L2_SIZE
; ++i
) {
660 pd
[i
].first_tb
= NULL
;
661 invalidate_page_bitmap(pd
+ i
);
665 for (i
= 0; i
< L2_SIZE
; ++i
) {
666 page_flush_tb_1 (level
- 1, pp
+ i
);
671 static void page_flush_tb(void)
674 for (i
= 0; i
< V_L1_SIZE
; i
++) {
675 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
679 /* flush all the translation blocks */
680 /* XXX: tb_flush is currently not thread safe */
681 void tb_flush(CPUState
*env1
)
684 #if defined(DEBUG_FLUSH)
685 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
686 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
688 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
690 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
691 cpu_abort(env1
, "Internal error: code buffer overflow\n");
695 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
696 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
699 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
702 code_gen_ptr
= code_gen_buffer
;
703 /* XXX: flush processor icache at this point if cache flush is
708 #ifdef DEBUG_TB_CHECK
710 static void tb_invalidate_check(target_ulong address
)
712 TranslationBlock
*tb
;
714 address
&= TARGET_PAGE_MASK
;
715 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
716 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
717 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
718 address
>= tb
->pc
+ tb
->size
)) {
719 printf("ERROR invalidate: address=" TARGET_FMT_lx
720 " PC=%08lx size=%04x\n",
721 address
, (long)tb
->pc
, tb
->size
);
727 /* verify that all the pages have correct rights for code */
728 static void tb_page_check(void)
730 TranslationBlock
*tb
;
731 int i
, flags1
, flags2
;
733 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
734 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
735 flags1
= page_get_flags(tb
->pc
);
736 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
737 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
738 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
739 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
747 /* invalidate one TB */
748 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
751 TranslationBlock
*tb1
;
755 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
758 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
762 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
764 TranslationBlock
*tb1
;
770 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
772 *ptb
= tb1
->page_next
[n1
];
775 ptb
= &tb1
->page_next
[n1
];
779 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
781 TranslationBlock
*tb1
, **ptb
;
784 ptb
= &tb
->jmp_next
[n
];
787 /* find tb(n) in circular list */
791 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
792 if (n1
== n
&& tb1
== tb
)
795 ptb
= &tb1
->jmp_first
;
797 ptb
= &tb1
->jmp_next
[n1
];
800 /* now we can suppress tb(n) from the list */
801 *ptb
= tb
->jmp_next
[n
];
803 tb
->jmp_next
[n
] = NULL
;
807 /* reset the jump entry 'n' of a TB so that it is not chained to
809 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
811 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
814 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
819 tb_page_addr_t phys_pc
;
820 TranslationBlock
*tb1
, *tb2
;
822 /* remove the TB from the hash list */
823 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
824 h
= tb_phys_hash_func(phys_pc
);
825 tb_remove(&tb_phys_hash
[h
], tb
,
826 offsetof(TranslationBlock
, phys_hash_next
));
828 /* remove the TB from the page list */
829 if (tb
->page_addr
[0] != page_addr
) {
830 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
831 tb_page_remove(&p
->first_tb
, tb
);
832 invalidate_page_bitmap(p
);
834 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
835 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
836 tb_page_remove(&p
->first_tb
, tb
);
837 invalidate_page_bitmap(p
);
840 tb_invalidated_flag
= 1;
842 /* remove the TB from the hash list */
843 h
= tb_jmp_cache_hash_func(tb
->pc
);
844 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
845 if (env
->tb_jmp_cache
[h
] == tb
)
846 env
->tb_jmp_cache
[h
] = NULL
;
849 /* suppress this TB from the two jump lists */
850 tb_jmp_remove(tb
, 0);
851 tb_jmp_remove(tb
, 1);
853 /* suppress any remaining jumps to this TB */
859 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
860 tb2
= tb1
->jmp_next
[n1
];
861 tb_reset_jump(tb1
, n1
);
862 tb1
->jmp_next
[n1
] = NULL
;
865 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
867 tb_phys_invalidate_count
++;
870 static inline void set_bits(uint8_t *tab
, int start
, int len
)
876 mask
= 0xff << (start
& 7);
877 if ((start
& ~7) == (end
& ~7)) {
879 mask
&= ~(0xff << (end
& 7));
884 start
= (start
+ 8) & ~7;
886 while (start
< end1
) {
891 mask
= ~(0xff << (end
& 7));
897 static void build_page_bitmap(PageDesc
*p
)
899 int n
, tb_start
, tb_end
;
900 TranslationBlock
*tb
;
902 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
907 tb
= (TranslationBlock
*)((long)tb
& ~3);
908 /* NOTE: this is subtle as a TB may span two physical pages */
910 /* NOTE: tb_end may be after the end of the page, but
911 it is not a problem */
912 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
913 tb_end
= tb_start
+ tb
->size
;
914 if (tb_end
> TARGET_PAGE_SIZE
)
915 tb_end
= TARGET_PAGE_SIZE
;
918 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
920 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
921 tb
= tb
->page_next
[n
];
925 TranslationBlock
*tb_gen_code(CPUState
*env
,
926 target_ulong pc
, target_ulong cs_base
,
927 int flags
, int cflags
)
929 TranslationBlock
*tb
;
931 tb_page_addr_t phys_pc
, phys_page2
;
932 target_ulong virt_page2
;
935 phys_pc
= get_page_addr_code(env
, pc
);
938 /* flush must be done */
940 /* cannot fail at this point */
942 /* Don't forget to invalidate previous TB info. */
943 tb_invalidated_flag
= 1;
945 tc_ptr
= code_gen_ptr
;
947 tb
->cs_base
= cs_base
;
950 cpu_gen_code(env
, tb
, &code_gen_size
);
951 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
953 /* check next page if needed */
954 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
956 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
957 phys_page2
= get_page_addr_code(env
, virt_page2
);
959 tb_link_page(tb
, phys_pc
, phys_page2
);
963 /* invalidate all TBs which intersect with the target physical page
964 starting in range [start;end[. NOTE: start and end must refer to
965 the same physical page. 'is_cpu_write_access' should be true if called
966 from a real cpu write access: the virtual CPU will exit the current
967 TB if code is modified inside this TB. */
968 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
969 int is_cpu_write_access
)
971 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
972 CPUState
*env
= cpu_single_env
;
973 tb_page_addr_t tb_start
, tb_end
;
976 #ifdef TARGET_HAS_PRECISE_SMC
977 int current_tb_not_found
= is_cpu_write_access
;
978 TranslationBlock
*current_tb
= NULL
;
979 int current_tb_modified
= 0;
980 target_ulong current_pc
= 0;
981 target_ulong current_cs_base
= 0;
982 int current_flags
= 0;
983 #endif /* TARGET_HAS_PRECISE_SMC */
985 p
= page_find(start
>> TARGET_PAGE_BITS
);
988 if (!p
->code_bitmap
&&
989 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
990 is_cpu_write_access
) {
991 /* build code bitmap */
992 build_page_bitmap(p
);
995 /* we remove all the TBs in the range [start, end[ */
996 /* XXX: see if in some cases it could be faster to invalidate all the code */
1000 tb
= (TranslationBlock
*)((long)tb
& ~3);
1001 tb_next
= tb
->page_next
[n
];
1002 /* NOTE: this is subtle as a TB may span two physical pages */
1004 /* NOTE: tb_end may be after the end of the page, but
1005 it is not a problem */
1006 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1007 tb_end
= tb_start
+ tb
->size
;
1009 tb_start
= tb
->page_addr
[1];
1010 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1012 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1013 #ifdef TARGET_HAS_PRECISE_SMC
1014 if (current_tb_not_found
) {
1015 current_tb_not_found
= 0;
1017 if (env
->mem_io_pc
) {
1018 /* now we have a real cpu fault */
1019 current_tb
= tb_find_pc(env
->mem_io_pc
);
1022 if (current_tb
== tb
&&
1023 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1024 /* If we are modifying the current TB, we must stop
1025 its execution. We could be more precise by checking
1026 that the modification is after the current PC, but it
1027 would require a specialized function to partially
1028 restore the CPU state */
1030 current_tb_modified
= 1;
1031 cpu_restore_state(current_tb
, env
,
1032 env
->mem_io_pc
, NULL
);
1033 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1036 #endif /* TARGET_HAS_PRECISE_SMC */
1037 /* we need to do that to handle the case where a signal
1038 occurs while doing tb_phys_invalidate() */
1041 saved_tb
= env
->current_tb
;
1042 env
->current_tb
= NULL
;
1044 tb_phys_invalidate(tb
, -1);
1046 env
->current_tb
= saved_tb
;
1047 if (env
->interrupt_request
&& env
->current_tb
)
1048 cpu_interrupt(env
, env
->interrupt_request
);
1053 #if !defined(CONFIG_USER_ONLY)
1054 /* if no code remaining, no need to continue to use slow writes */
1056 invalidate_page_bitmap(p
);
1057 if (is_cpu_write_access
) {
1058 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1062 #ifdef TARGET_HAS_PRECISE_SMC
1063 if (current_tb_modified
) {
1064 /* we generate a block containing just the instruction
1065 modifying the memory. It will ensure that it cannot modify
1067 env
->current_tb
= NULL
;
1068 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1069 cpu_resume_from_signal(env
, NULL
);
1074 /* len must be <= 8 and start must be a multiple of len */
1075 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1081 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1082 cpu_single_env
->mem_io_vaddr
, len
,
1083 cpu_single_env
->eip
,
1084 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1087 p
= page_find(start
>> TARGET_PAGE_BITS
);
1090 if (p
->code_bitmap
) {
1091 offset
= start
& ~TARGET_PAGE_MASK
;
1092 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1093 if (b
& ((1 << len
) - 1))
1097 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1101 #if !defined(CONFIG_SOFTMMU)
1102 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1103 unsigned long pc
, void *puc
)
1105 TranslationBlock
*tb
;
1108 #ifdef TARGET_HAS_PRECISE_SMC
1109 TranslationBlock
*current_tb
= NULL
;
1110 CPUState
*env
= cpu_single_env
;
1111 int current_tb_modified
= 0;
1112 target_ulong current_pc
= 0;
1113 target_ulong current_cs_base
= 0;
1114 int current_flags
= 0;
1117 addr
&= TARGET_PAGE_MASK
;
1118 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1122 #ifdef TARGET_HAS_PRECISE_SMC
1123 if (tb
&& pc
!= 0) {
1124 current_tb
= tb_find_pc(pc
);
1127 while (tb
!= NULL
) {
1129 tb
= (TranslationBlock
*)((long)tb
& ~3);
1130 #ifdef TARGET_HAS_PRECISE_SMC
1131 if (current_tb
== tb
&&
1132 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1133 /* If we are modifying the current TB, we must stop
1134 its execution. We could be more precise by checking
1135 that the modification is after the current PC, but it
1136 would require a specialized function to partially
1137 restore the CPU state */
1139 current_tb_modified
= 1;
1140 cpu_restore_state(current_tb
, env
, pc
, puc
);
1141 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1144 #endif /* TARGET_HAS_PRECISE_SMC */
1145 tb_phys_invalidate(tb
, addr
);
1146 tb
= tb
->page_next
[n
];
1149 #ifdef TARGET_HAS_PRECISE_SMC
1150 if (current_tb_modified
) {
1151 /* we generate a block containing just the instruction
1152 modifying the memory. It will ensure that it cannot modify
1154 env
->current_tb
= NULL
;
1155 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1156 cpu_resume_from_signal(env
, puc
);
1162 /* add the tb in the target page and protect it if necessary */
1163 static inline void tb_alloc_page(TranslationBlock
*tb
,
1164 unsigned int n
, tb_page_addr_t page_addr
)
1167 TranslationBlock
*last_first_tb
;
1169 tb
->page_addr
[n
] = page_addr
;
1170 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1171 tb
->page_next
[n
] = p
->first_tb
;
1172 last_first_tb
= p
->first_tb
;
1173 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1174 invalidate_page_bitmap(p
);
1176 #if defined(TARGET_HAS_SMC) || 1
1178 #if defined(CONFIG_USER_ONLY)
1179 if (p
->flags
& PAGE_WRITE
) {
1184 /* force the host page as non writable (writes will have a
1185 page fault + mprotect overhead) */
1186 page_addr
&= qemu_host_page_mask
;
1188 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1189 addr
+= TARGET_PAGE_SIZE
) {
1191 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1195 p2
->flags
&= ~PAGE_WRITE
;
1196 page_get_flags(addr
);
1198 mprotect(g2h(page_addr
), qemu_host_page_size
,
1199 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1200 #ifdef DEBUG_TB_INVALIDATE
1201 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1206 /* if some code is already present, then the pages are already
1207 protected. So we handle the case where only the first TB is
1208 allocated in a physical page */
1209 if (!last_first_tb
) {
1210 tlb_protect_code(page_addr
);
1214 #endif /* TARGET_HAS_SMC */
1217 /* Allocate a new translation block. Flush the translation buffer if
1218 too many translation blocks or too much generated code. */
1219 TranslationBlock
*tb_alloc(target_ulong pc
)
1221 TranslationBlock
*tb
;
1223 if (nb_tbs
>= code_gen_max_blocks
||
1224 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1226 tb
= &tbs
[nb_tbs
++];
1232 void tb_free(TranslationBlock
*tb
)
1234 /* In practice this is mostly used for single use temporary TB
1235 Ignore the hard cases and just back up if this TB happens to
1236 be the last one generated. */
1237 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1238 code_gen_ptr
= tb
->tc_ptr
;
1243 /* add a new TB and link it to the physical page tables. phys_page2 is
1244 (-1) to indicate that only one page contains the TB. */
1245 void tb_link_page(TranslationBlock
*tb
,
1246 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1249 TranslationBlock
**ptb
;
1251 /* Grab the mmap lock to stop another thread invalidating this TB
1252 before we are done. */
1254 /* add in the physical hash table */
1255 h
= tb_phys_hash_func(phys_pc
);
1256 ptb
= &tb_phys_hash
[h
];
1257 tb
->phys_hash_next
= *ptb
;
1260 /* add in the page list */
1261 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1262 if (phys_page2
!= -1)
1263 tb_alloc_page(tb
, 1, phys_page2
);
1265 tb
->page_addr
[1] = -1;
1267 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1268 tb
->jmp_next
[0] = NULL
;
1269 tb
->jmp_next
[1] = NULL
;
1271 /* init original jump addresses */
1272 if (tb
->tb_next_offset
[0] != 0xffff)
1273 tb_reset_jump(tb
, 0);
1274 if (tb
->tb_next_offset
[1] != 0xffff)
1275 tb_reset_jump(tb
, 1);
1277 #ifdef DEBUG_TB_CHECK
1283 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1284 tb[1].tc_ptr. Return NULL if not found */
1285 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1287 int m_min
, m_max
, m
;
1289 TranslationBlock
*tb
;
1293 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1294 tc_ptr
>= (unsigned long)code_gen_ptr
)
1296 /* binary search (cf Knuth) */
1299 while (m_min
<= m_max
) {
1300 m
= (m_min
+ m_max
) >> 1;
1302 v
= (unsigned long)tb
->tc_ptr
;
1305 else if (tc_ptr
< v
) {
1314 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1316 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1318 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1321 tb1
= tb
->jmp_next
[n
];
1323 /* find head of list */
1326 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1329 tb1
= tb1
->jmp_next
[n1
];
1331 /* we are now sure now that tb jumps to tb1 */
1334 /* remove tb from the jmp_first list */
1335 ptb
= &tb_next
->jmp_first
;
1339 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1340 if (n1
== n
&& tb1
== tb
)
1342 ptb
= &tb1
->jmp_next
[n1
];
1344 *ptb
= tb
->jmp_next
[n
];
1345 tb
->jmp_next
[n
] = NULL
;
1347 /* suppress the jump to next tb in generated code */
1348 tb_reset_jump(tb
, n
);
1350 /* suppress jumps in the tb on which we could have jumped */
1351 tb_reset_jump_recursive(tb_next
);
1355 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1357 tb_reset_jump_recursive2(tb
, 0);
1358 tb_reset_jump_recursive2(tb
, 1);
1361 #if defined(TARGET_HAS_ICE)
1362 #if defined(CONFIG_USER_ONLY)
1363 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1365 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1368 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1370 target_phys_addr_t addr
;
1372 ram_addr_t ram_addr
;
1375 addr
= cpu_get_phys_page_debug(env
, pc
);
1376 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1378 pd
= IO_MEM_UNASSIGNED
;
1380 pd
= p
->phys_offset
;
1382 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1383 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1386 #endif /* TARGET_HAS_ICE */
1388 #if defined(CONFIG_USER_ONLY)
1389 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1394 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1395 int flags
, CPUWatchpoint
**watchpoint
)
1400 /* Add a watchpoint. */
1401 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1402 int flags
, CPUWatchpoint
**watchpoint
)
1404 target_ulong len_mask
= ~(len
- 1);
1407 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1408 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1409 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1410 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1413 wp
= qemu_malloc(sizeof(*wp
));
1416 wp
->len_mask
= len_mask
;
1419 /* keep all GDB-injected watchpoints in front */
1421 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1423 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1425 tlb_flush_page(env
, addr
);
1432 /* Remove a specific watchpoint. */
1433 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1436 target_ulong len_mask
= ~(len
- 1);
1439 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1440 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1441 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1442 cpu_watchpoint_remove_by_ref(env
, wp
);
1449 /* Remove a specific watchpoint by reference. */
1450 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1452 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1454 tlb_flush_page(env
, watchpoint
->vaddr
);
1456 qemu_free(watchpoint
);
1459 /* Remove all matching watchpoints. */
1460 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1462 CPUWatchpoint
*wp
, *next
;
1464 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1465 if (wp
->flags
& mask
)
1466 cpu_watchpoint_remove_by_ref(env
, wp
);
1471 /* Add a breakpoint. */
1472 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1473 CPUBreakpoint
**breakpoint
)
1475 #if defined(TARGET_HAS_ICE)
1478 bp
= qemu_malloc(sizeof(*bp
));
1483 /* keep all GDB-injected breakpoints in front */
1485 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1487 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1489 breakpoint_invalidate(env
, pc
);
1499 /* Remove a specific breakpoint. */
1500 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1502 #if defined(TARGET_HAS_ICE)
1505 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1506 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1507 cpu_breakpoint_remove_by_ref(env
, bp
);
1517 /* Remove a specific breakpoint by reference. */
1518 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1520 #if defined(TARGET_HAS_ICE)
1521 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1523 breakpoint_invalidate(env
, breakpoint
->pc
);
1525 qemu_free(breakpoint
);
1529 /* Remove all matching breakpoints. */
1530 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1532 #if defined(TARGET_HAS_ICE)
1533 CPUBreakpoint
*bp
, *next
;
1535 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1536 if (bp
->flags
& mask
)
1537 cpu_breakpoint_remove_by_ref(env
, bp
);
1542 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1543 CPU loop after each instruction */
1544 void cpu_single_step(CPUState
*env
, int enabled
)
1546 #if defined(TARGET_HAS_ICE)
1547 if (env
->singlestep_enabled
!= enabled
) {
1548 env
->singlestep_enabled
= enabled
;
1550 kvm_update_guest_debug(env
, 0);
1552 /* must flush all the translated code to avoid inconsistencies */
1553 /* XXX: only flush what is necessary */
1560 /* enable or disable low levels log */
1561 void cpu_set_log(int log_flags
)
1563 loglevel
= log_flags
;
1564 if (loglevel
&& !logfile
) {
1565 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1567 perror(logfilename
);
1570 #if !defined(CONFIG_SOFTMMU)
1571 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1573 static char logfile_buf
[4096];
1574 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1576 #elif !defined(_WIN32)
1577 /* Win32 doesn't support line-buffering and requires size >= 2 */
1578 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1582 if (!loglevel
&& logfile
) {
1588 void cpu_set_log_filename(const char *filename
)
1590 logfilename
= strdup(filename
);
1595 cpu_set_log(loglevel
);
1598 static void cpu_unlink_tb(CPUState
*env
)
1600 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1601 problem and hope the cpu will stop of its own accord. For userspace
1602 emulation this often isn't actually as bad as it sounds. Often
1603 signals are used primarily to interrupt blocking syscalls. */
1604 TranslationBlock
*tb
;
1605 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1607 spin_lock(&interrupt_lock
);
1608 tb
= env
->current_tb
;
1609 /* if the cpu is currently executing code, we must unlink it and
1610 all the potentially executing TB */
1612 env
->current_tb
= NULL
;
1613 tb_reset_jump_recursive(tb
);
1615 spin_unlock(&interrupt_lock
);
1618 /* mask must never be zero, except for A20 change call */
1619 void cpu_interrupt(CPUState
*env
, int mask
)
1623 old_mask
= env
->interrupt_request
;
1624 env
->interrupt_request
|= mask
;
1625 if (kvm_enabled() && !kvm_irqchip_in_kernel())
1626 kvm_update_interrupt_request(env
);
1628 #ifndef CONFIG_USER_ONLY
1630 * If called from iothread context, wake the target cpu in
1633 if (!qemu_cpu_self(env
)) {
1640 env
->icount_decr
.u16
.high
= 0xffff;
1641 #ifndef CONFIG_USER_ONLY
1643 && (mask
& ~old_mask
) != 0) {
1644 cpu_abort(env
, "Raised interrupt while not in I/O function");
1652 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1654 env
->interrupt_request
&= ~mask
;
1657 void cpu_exit(CPUState
*env
)
1659 env
->exit_request
= 1;
1663 const CPULogItem cpu_log_items
[] = {
1664 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1665 "show generated host assembly code for each compiled TB" },
1666 { CPU_LOG_TB_IN_ASM
, "in_asm",
1667 "show target assembly code for each compiled TB" },
1668 { CPU_LOG_TB_OP
, "op",
1669 "show micro ops for each compiled TB" },
1670 { CPU_LOG_TB_OP_OPT
, "op_opt",
1673 "before eflags optimization and "
1675 "after liveness analysis" },
1676 { CPU_LOG_INT
, "int",
1677 "show interrupts/exceptions in short format" },
1678 { CPU_LOG_EXEC
, "exec",
1679 "show trace before each executed TB (lots of logs)" },
1680 { CPU_LOG_TB_CPU
, "cpu",
1681 "show CPU state before block translation" },
1683 { CPU_LOG_PCALL
, "pcall",
1684 "show protected mode far calls/returns/exceptions" },
1685 { CPU_LOG_RESET
, "cpu_reset",
1686 "show CPU state before CPU resets" },
1689 { CPU_LOG_IOPORT
, "ioport",
1690 "show all i/o ports accesses" },
1695 #ifndef CONFIG_USER_ONLY
1696 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1697 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1699 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1701 ram_addr_t phys_offset
)
1703 CPUPhysMemoryClient
*client
;
1704 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1705 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1709 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1710 target_phys_addr_t end
)
1712 CPUPhysMemoryClient
*client
;
1713 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1714 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1721 static int cpu_notify_migration_log(int enable
)
1723 CPUPhysMemoryClient
*client
;
1724 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1725 int r
= client
->migration_log(client
, enable
);
1732 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1733 int level
, void **lp
)
1741 PhysPageDesc
*pd
= *lp
;
1742 for (i
= 0; i
< L2_SIZE
; ++i
) {
1743 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1744 client
->set_memory(client
, pd
[i
].region_offset
,
1745 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
);
1750 for (i
= 0; i
< L2_SIZE
; ++i
) {
1751 phys_page_for_each_1(client
, level
- 1, pp
+ i
);
1756 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1759 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1760 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1765 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1767 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1768 phys_page_for_each(client
);
1771 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1773 QLIST_REMOVE(client
, list
);
1777 static int cmp1(const char *s1
, int n
, const char *s2
)
1779 if (strlen(s2
) != n
)
1781 return memcmp(s1
, s2
, n
) == 0;
1784 /* takes a comma separated list of log masks. Return 0 if error. */
1785 int cpu_str_to_log_mask(const char *str
)
1787 const CPULogItem
*item
;
1794 p1
= strchr(p
, ',');
1797 if(cmp1(p
,p1
-p
,"all")) {
1798 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1802 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1803 if (cmp1(p
, p1
- p
, item
->name
))
1817 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1824 fprintf(stderr
, "qemu: fatal: ");
1825 vfprintf(stderr
, fmt
, ap
);
1826 fprintf(stderr
, "\n");
1828 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1830 cpu_dump_state(env
, stderr
, fprintf
, 0);
1832 if (qemu_log_enabled()) {
1833 qemu_log("qemu: fatal: ");
1834 qemu_log_vprintf(fmt
, ap2
);
1837 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1839 log_cpu_state(env
, 0);
1846 #if defined(CONFIG_USER_ONLY)
1848 struct sigaction act
;
1849 sigfillset(&act
.sa_mask
);
1850 act
.sa_handler
= SIG_DFL
;
1851 sigaction(SIGABRT
, &act
, NULL
);
1857 CPUState
*cpu_copy(CPUState
*env
)
1859 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1860 CPUState
*next_cpu
= new_env
->next_cpu
;
1861 int cpu_index
= new_env
->cpu_index
;
1862 #if defined(TARGET_HAS_ICE)
1867 memcpy(new_env
, env
, sizeof(CPUState
));
1869 /* Preserve chaining and index. */
1870 new_env
->next_cpu
= next_cpu
;
1871 new_env
->cpu_index
= cpu_index
;
1873 /* Clone all break/watchpoints.
1874 Note: Once we support ptrace with hw-debug register access, make sure
1875 BP_CPU break/watchpoints are handled correctly on clone. */
1876 QTAILQ_INIT(&env
->breakpoints
);
1877 QTAILQ_INIT(&env
->watchpoints
);
1878 #if defined(TARGET_HAS_ICE)
1879 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1880 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1882 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1883 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1891 #if !defined(CONFIG_USER_ONLY)
1893 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1897 /* Discard jump cache entries for any tb which might potentially
1898 overlap the flushed page. */
1899 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1900 memset (&env
->tb_jmp_cache
[i
], 0,
1901 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1903 i
= tb_jmp_cache_hash_page(addr
);
1904 memset (&env
->tb_jmp_cache
[i
], 0,
1905 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1908 static CPUTLBEntry s_cputlb_empty_entry
= {
1915 /* NOTE: if flush_global is true, also flush global entries (not
1917 void tlb_flush(CPUState
*env
, int flush_global
)
1921 #if defined(DEBUG_TLB)
1922 printf("tlb_flush:\n");
1924 /* must reset current TB so that interrupts cannot modify the
1925 links while we are modifying them */
1926 env
->current_tb
= NULL
;
1928 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1930 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1931 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1935 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1940 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1942 if (addr
== (tlb_entry
->addr_read
&
1943 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1944 addr
== (tlb_entry
->addr_write
&
1945 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1946 addr
== (tlb_entry
->addr_code
&
1947 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1948 *tlb_entry
= s_cputlb_empty_entry
;
1952 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1957 #if defined(DEBUG_TLB)
1958 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1960 /* must reset current TB so that interrupts cannot modify the
1961 links while we are modifying them */
1962 env
->current_tb
= NULL
;
1964 addr
&= TARGET_PAGE_MASK
;
1965 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1966 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1967 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1969 tlb_flush_jmp_cache(env
, addr
);
1972 /* update the TLBs so that writes to code in the virtual page 'addr'
1974 static void tlb_protect_code(ram_addr_t ram_addr
)
1976 cpu_physical_memory_reset_dirty(ram_addr
,
1977 ram_addr
+ TARGET_PAGE_SIZE
,
1981 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1982 tested for self modifying code */
1983 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1986 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1989 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1990 unsigned long start
, unsigned long length
)
1993 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1994 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1995 if ((addr
- start
) < length
) {
1996 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2001 /* Note: start and end must be within the same ram block. */
2002 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2006 unsigned long length
, start1
;
2010 start
&= TARGET_PAGE_MASK
;
2011 end
= TARGET_PAGE_ALIGN(end
);
2013 length
= end
- start
;
2016 len
= length
>> TARGET_PAGE_BITS
;
2017 mask
= ~dirty_flags
;
2018 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
2019 for(i
= 0; i
< len
; i
++)
2022 /* we modify the TLB cache so that the dirty bit will be set again
2023 when accessing the range */
2024 start1
= (unsigned long)qemu_get_ram_ptr(start
);
2025 /* Chek that we don't span multiple blocks - this breaks the
2026 address comparisons below. */
2027 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
2028 != (end
- 1) - start
) {
2032 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2034 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2035 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2036 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2042 int cpu_physical_memory_set_dirty_tracking(int enable
)
2045 in_migration
= enable
;
2046 ret
= cpu_notify_migration_log(!!enable
);
2050 int cpu_physical_memory_get_dirty_tracking(void)
2052 return in_migration
;
2055 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2056 target_phys_addr_t end_addr
)
2060 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2064 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2066 ram_addr_t ram_addr
;
2069 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2070 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2071 + tlb_entry
->addend
);
2072 ram_addr
= qemu_ram_addr_from_host(p
);
2073 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2074 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2079 /* update the TLB according to the current state of the dirty bits */
2080 void cpu_tlb_update_dirty(CPUState
*env
)
2084 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2085 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2086 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2090 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2092 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2093 tlb_entry
->addr_write
= vaddr
;
2096 /* update the TLB corresponding to virtual page vaddr
2097 so that it is no longer dirty */
2098 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2103 vaddr
&= TARGET_PAGE_MASK
;
2104 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2105 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2106 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2109 /* add a new TLB entry. At most one entry for a given virtual address
2110 is permitted. Return 0 if OK or 2 if the page could not be mapped
2111 (can only happen in non SOFTMMU mode for I/O pages or pages
2112 conflicting with the host address space). */
2113 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2114 target_phys_addr_t paddr
, int prot
,
2115 int mmu_idx
, int is_softmmu
)
2120 target_ulong address
;
2121 target_ulong code_address
;
2122 target_phys_addr_t addend
;
2126 target_phys_addr_t iotlb
;
2128 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2130 pd
= IO_MEM_UNASSIGNED
;
2132 pd
= p
->phys_offset
;
2134 #if defined(DEBUG_TLB)
2135 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2136 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2141 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2142 /* IO memory case (romd handled later) */
2143 address
|= TLB_MMIO
;
2145 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2146 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2148 iotlb
= pd
& TARGET_PAGE_MASK
;
2149 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2150 iotlb
|= IO_MEM_NOTDIRTY
;
2152 iotlb
|= IO_MEM_ROM
;
2154 /* IO handlers are currently passed a physical address.
2155 It would be nice to pass an offset from the base address
2156 of that region. This would avoid having to special case RAM,
2157 and avoid full address decoding in every device.
2158 We can't use the high bits of pd for this because
2159 IO_MEM_ROMD uses these as a ram address. */
2160 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2162 iotlb
+= p
->region_offset
;
2168 code_address
= address
;
2169 /* Make accesses to pages with watchpoints go via the
2170 watchpoint trap routines. */
2171 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2172 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2173 iotlb
= io_mem_watch
+ paddr
;
2174 /* TODO: The memory case can be optimized by not trapping
2175 reads of pages with a write breakpoint. */
2176 address
|= TLB_MMIO
;
2180 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2181 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2182 te
= &env
->tlb_table
[mmu_idx
][index
];
2183 te
->addend
= addend
- vaddr
;
2184 if (prot
& PAGE_READ
) {
2185 te
->addr_read
= address
;
2190 if (prot
& PAGE_EXEC
) {
2191 te
->addr_code
= code_address
;
2195 if (prot
& PAGE_WRITE
) {
2196 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2197 (pd
& IO_MEM_ROMD
)) {
2198 /* Write access calls the I/O callback. */
2199 te
->addr_write
= address
| TLB_MMIO
;
2200 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2201 !cpu_physical_memory_is_dirty(pd
)) {
2202 te
->addr_write
= address
| TLB_NOTDIRTY
;
2204 te
->addr_write
= address
;
2207 te
->addr_write
= -1;
2214 void tlb_flush(CPUState
*env
, int flush_global
)
2218 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2223 * Walks guest process memory "regions" one by one
2224 * and calls callback function 'fn' for each region.
2227 struct walk_memory_regions_data
2229 walk_memory_regions_fn fn
;
2231 unsigned long start
;
2235 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2236 abi_ulong end
, int new_prot
)
2238 if (data
->start
!= -1ul) {
2239 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2245 data
->start
= (new_prot
? end
: -1ul);
2246 data
->prot
= new_prot
;
2251 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2252 abi_ulong base
, int level
, void **lp
)
2258 return walk_memory_regions_end(data
, base
, 0);
2263 for (i
= 0; i
< L2_SIZE
; ++i
) {
2264 int prot
= pd
[i
].flags
;
2266 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2267 if (prot
!= data
->prot
) {
2268 rc
= walk_memory_regions_end(data
, pa
, prot
);
2276 for (i
= 0; i
< L2_SIZE
; ++i
) {
2277 pa
= base
| ((abi_ulong
)i
<<
2278 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2279 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2289 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2291 struct walk_memory_regions_data data
;
2299 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2300 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2301 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2307 return walk_memory_regions_end(&data
, 0, 0);
2310 static int dump_region(void *priv
, abi_ulong start
,
2311 abi_ulong end
, unsigned long prot
)
2313 FILE *f
= (FILE *)priv
;
2315 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2316 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2317 start
, end
, end
- start
,
2318 ((prot
& PAGE_READ
) ? 'r' : '-'),
2319 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2320 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2325 /* dump memory mappings */
2326 void page_dump(FILE *f
)
2328 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2329 "start", "end", "size", "prot");
2330 walk_memory_regions(f
, dump_region
);
2333 int page_get_flags(target_ulong address
)
2337 p
= page_find(address
>> TARGET_PAGE_BITS
);
2343 /* Modify the flags of a page and invalidate the code if necessary.
2344 The flag PAGE_WRITE_ORG is positioned automatically depending
2345 on PAGE_WRITE. The mmap_lock should already be held. */
2346 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2348 target_ulong addr
, len
;
2350 /* This function should never be called with addresses outside the
2351 guest address space. If this assert fires, it probably indicates
2352 a missing call to h2g_valid. */
2353 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2354 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2356 assert(start
< end
);
2358 start
= start
& TARGET_PAGE_MASK
;
2359 end
= TARGET_PAGE_ALIGN(end
);
2361 if (flags
& PAGE_WRITE
) {
2362 flags
|= PAGE_WRITE_ORG
;
2365 for (addr
= start
, len
= end
- start
;
2367 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2368 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2370 /* If the write protection bit is set, then we invalidate
2372 if (!(p
->flags
& PAGE_WRITE
) &&
2373 (flags
& PAGE_WRITE
) &&
2375 tb_invalidate_phys_page(addr
, 0, NULL
);
2381 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2387 /* This function should never be called with addresses outside the
2388 guest address space. If this assert fires, it probably indicates
2389 a missing call to h2g_valid. */
2390 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2391 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2394 if (start
+ len
- 1 < start
) {
2395 /* We've wrapped around. */
2399 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2400 start
= start
& TARGET_PAGE_MASK
;
2402 for (addr
= start
, len
= end
- start
;
2404 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2405 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2408 if( !(p
->flags
& PAGE_VALID
) )
2411 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2413 if (flags
& PAGE_WRITE
) {
2414 if (!(p
->flags
& PAGE_WRITE_ORG
))
2416 /* unprotect the page if it was put read-only because it
2417 contains translated code */
2418 if (!(p
->flags
& PAGE_WRITE
)) {
2419 if (!page_unprotect(addr
, 0, NULL
))
2428 /* called from signal handler: invalidate the code and unprotect the
2429 page. Return TRUE if the fault was successfully handled. */
2430 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2432 unsigned int page_index
, prot
, pindex
;
2434 target_ulong host_start
, host_end
, addr
;
2436 /* Technically this isn't safe inside a signal handler. However we
2437 know this only ever happens in a synchronous SEGV handler, so in
2438 practice it seems to be ok. */
2441 host_start
= address
& qemu_host_page_mask
;
2442 page_index
= host_start
>> TARGET_PAGE_BITS
;
2443 p1
= page_find(page_index
);
2448 host_end
= host_start
+ qemu_host_page_size
;
2451 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2455 /* if the page was really writable, then we change its
2456 protection back to writable */
2457 if (prot
& PAGE_WRITE_ORG
) {
2458 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2459 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2460 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2461 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2462 p1
[pindex
].flags
|= PAGE_WRITE
;
2463 /* and since the content will be modified, we must invalidate
2464 the corresponding translated code. */
2465 tb_invalidate_phys_page(address
, pc
, puc
);
2466 #ifdef DEBUG_TB_CHECK
2467 tb_invalidate_check(address
);
2477 static inline void tlb_set_dirty(CPUState
*env
,
2478 unsigned long addr
, target_ulong vaddr
)
2481 #endif /* defined(CONFIG_USER_ONLY) */
2483 #if !defined(CONFIG_USER_ONLY)
2485 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2486 typedef struct subpage_t
{
2487 target_phys_addr_t base
;
2488 CPUReadMemoryFunc
* const *mem_read
[TARGET_PAGE_SIZE
][4];
2489 CPUWriteMemoryFunc
* const *mem_write
[TARGET_PAGE_SIZE
][4];
2490 void *opaque
[TARGET_PAGE_SIZE
][2][4];
2491 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
2494 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2495 ram_addr_t memory
, ram_addr_t region_offset
);
2496 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2497 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2498 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2501 if (addr > start_addr) \
2504 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2505 if (start_addr2 > 0) \
2509 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2510 end_addr2 = TARGET_PAGE_SIZE - 1; \
2512 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2513 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2518 /* register physical memory.
2519 For RAM, 'size' must be a multiple of the target page size.
2520 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2521 io memory page. The address used when calling the IO function is
2522 the offset from the start of the region, plus region_offset. Both
2523 start_addr and region_offset are rounded down to a page boundary
2524 before calculating this offset. This should not be a problem unless
2525 the low bits of start_addr and region_offset differ. */
2526 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2528 ram_addr_t phys_offset
,
2529 ram_addr_t region_offset
)
2531 target_phys_addr_t addr
, end_addr
;
2534 ram_addr_t orig_size
= size
;
2537 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2539 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2540 region_offset
= start_addr
;
2542 region_offset
&= TARGET_PAGE_MASK
;
2543 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2544 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2545 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2546 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2547 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2548 ram_addr_t orig_memory
= p
->phys_offset
;
2549 target_phys_addr_t start_addr2
, end_addr2
;
2550 int need_subpage
= 0;
2552 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2554 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2555 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2556 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2557 &p
->phys_offset
, orig_memory
,
2560 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2563 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2565 p
->region_offset
= 0;
2567 p
->phys_offset
= phys_offset
;
2568 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2569 (phys_offset
& IO_MEM_ROMD
))
2570 phys_offset
+= TARGET_PAGE_SIZE
;
2573 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2574 p
->phys_offset
= phys_offset
;
2575 p
->region_offset
= region_offset
;
2576 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2577 (phys_offset
& IO_MEM_ROMD
)) {
2578 phys_offset
+= TARGET_PAGE_SIZE
;
2580 target_phys_addr_t start_addr2
, end_addr2
;
2581 int need_subpage
= 0;
2583 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2584 end_addr2
, need_subpage
);
2586 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2587 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2588 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2589 addr
& TARGET_PAGE_MASK
);
2590 subpage_register(subpage
, start_addr2
, end_addr2
,
2591 phys_offset
, region_offset
);
2592 p
->region_offset
= 0;
2596 region_offset
+= TARGET_PAGE_SIZE
;
2599 /* since each CPU stores ram addresses in its TLB cache, we must
2600 reset the modified entries */
2602 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2607 /* XXX: temporary until new memory mapping API */
2608 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2612 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2614 return IO_MEM_UNASSIGNED
;
2615 return p
->phys_offset
;
2618 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2621 kvm_coalesce_mmio_region(addr
, size
);
2624 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2627 kvm_uncoalesce_mmio_region(addr
, size
);
2630 void qemu_flush_coalesced_mmio_buffer(void)
2633 kvm_flush_coalesced_mmio_buffer();
2636 #if defined(__linux__) && !defined(TARGET_S390X)
2638 #include <sys/vfs.h>
2640 #define HUGETLBFS_MAGIC 0x958458f6
2642 static long gethugepagesize(const char *path
)
2648 ret
= statfs(path
, &fs
);
2649 } while (ret
!= 0 && errno
== EINTR
);
2656 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2657 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2662 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2670 unsigned long hpagesize
;
2672 hpagesize
= gethugepagesize(path
);
2677 if (memory
< hpagesize
) {
2681 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2682 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2686 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2690 fd
= mkstemp(filename
);
2699 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2702 * ftruncate is not supported by hugetlbfs in older
2703 * hosts, so don't bother bailing out on errors.
2704 * If anything goes wrong with it under other filesystems,
2707 if (ftruncate(fd
, memory
))
2708 perror("ftruncate");
2711 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2712 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2713 * to sidestep this quirk.
2715 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2716 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2718 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2720 if (area
== MAP_FAILED
) {
2721 perror("file_ram_alloc: can't mmap RAM pages");
2729 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2731 RAMBlock
*new_block
;
2733 size
= TARGET_PAGE_ALIGN(size
);
2734 new_block
= qemu_malloc(sizeof(*new_block
));
2737 #if defined (__linux__) && !defined(TARGET_S390X)
2738 new_block
->host
= file_ram_alloc(size
, mem_path
);
2739 if (!new_block
->host
)
2742 fprintf(stderr
, "-mem-path option unsupported\n");
2746 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2747 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2748 new_block
->host
= mmap((void*)0x1000000, size
,
2749 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2750 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2752 new_block
->host
= qemu_vmalloc(size
);
2754 #ifdef MADV_MERGEABLE
2755 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2758 new_block
->offset
= last_ram_offset
;
2759 new_block
->length
= size
;
2761 new_block
->next
= ram_blocks
;
2762 ram_blocks
= new_block
;
2764 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2765 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2766 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2767 0xff, size
>> TARGET_PAGE_BITS
);
2769 last_ram_offset
+= size
;
2772 kvm_setup_guest_memory(new_block
->host
, size
);
2774 return new_block
->offset
;
2777 void qemu_ram_free(ram_addr_t addr
)
2779 /* TODO: implement this. */
2782 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2783 With the exception of the softmmu code in this file, this should
2784 only be used for local memory (e.g. video ram) that the device owns,
2785 and knows it isn't going to access beyond the end of the block.
2787 It should not be used for general purpose DMA.
2788 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2790 void *qemu_get_ram_ptr(ram_addr_t addr
)
2797 prevp
= &ram_blocks
;
2799 while (block
&& (block
->offset
> addr
2800 || block
->offset
+ block
->length
<= addr
)) {
2802 prevp
= &prev
->next
;
2804 block
= block
->next
;
2807 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2810 /* Move this entry to to start of the list. */
2812 prev
->next
= block
->next
;
2813 block
->next
= *prevp
;
2816 return block
->host
+ (addr
- block
->offset
);
2819 int do_qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2823 uint8_t *host
= ptr
;
2827 while (block
&& (block
->host
> host
2828 || block
->host
+ block
->length
<= host
)) {
2830 block
= block
->next
;
2834 *ram_addr
= block
->offset
+ (host
- block
->host
);
2838 /* Some of the softmmu routines need to translate from a host pointer
2839 (typically a TLB entry) back to a ram offset. */
2840 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2842 ram_addr_t ram_addr
;
2844 if (do_qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
2845 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2851 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2853 #ifdef DEBUG_UNASSIGNED
2854 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2856 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2857 do_unassigned_access(addr
, 0, 0, 0, 1);
2862 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2864 #ifdef DEBUG_UNASSIGNED
2865 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2867 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2868 do_unassigned_access(addr
, 0, 0, 0, 2);
2873 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2875 #ifdef DEBUG_UNASSIGNED
2876 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2878 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2879 do_unassigned_access(addr
, 0, 0, 0, 4);
2884 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2886 #ifdef DEBUG_UNASSIGNED
2887 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2889 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2890 do_unassigned_access(addr
, 1, 0, 0, 1);
2894 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2896 #ifdef DEBUG_UNASSIGNED
2897 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2899 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2900 do_unassigned_access(addr
, 1, 0, 0, 2);
2904 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2906 #ifdef DEBUG_UNASSIGNED
2907 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2909 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2910 do_unassigned_access(addr
, 1, 0, 0, 4);
2914 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
2915 unassigned_mem_readb
,
2916 unassigned_mem_readw
,
2917 unassigned_mem_readl
,
2920 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
2921 unassigned_mem_writeb
,
2922 unassigned_mem_writew
,
2923 unassigned_mem_writel
,
2926 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2930 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2931 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2932 #if !defined(CONFIG_USER_ONLY)
2933 tb_invalidate_phys_page_fast(ram_addr
, 1);
2934 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2937 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2938 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2939 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2940 /* we remove the notdirty callback only if the code has been
2942 if (dirty_flags
== 0xff)
2943 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2946 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2950 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2951 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2952 #if !defined(CONFIG_USER_ONLY)
2953 tb_invalidate_phys_page_fast(ram_addr
, 2);
2954 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2957 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2958 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2959 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2960 /* we remove the notdirty callback only if the code has been
2962 if (dirty_flags
== 0xff)
2963 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2966 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2970 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2971 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2972 #if !defined(CONFIG_USER_ONLY)
2973 tb_invalidate_phys_page_fast(ram_addr
, 4);
2974 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2977 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2978 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2979 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2980 /* we remove the notdirty callback only if the code has been
2982 if (dirty_flags
== 0xff)
2983 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2986 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
2987 NULL
, /* never used */
2988 NULL
, /* never used */
2989 NULL
, /* never used */
2992 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
2993 notdirty_mem_writeb
,
2994 notdirty_mem_writew
,
2995 notdirty_mem_writel
,
2998 /* Generate a debug exception if a watchpoint has been hit. */
2999 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3001 CPUState
*env
= cpu_single_env
;
3002 target_ulong pc
, cs_base
;
3003 TranslationBlock
*tb
;
3008 if (env
->watchpoint_hit
) {
3009 /* We re-entered the check after replacing the TB. Now raise
3010 * the debug interrupt so that is will trigger after the
3011 * current instruction. */
3012 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3015 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3016 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3017 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3018 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3019 wp
->flags
|= BP_WATCHPOINT_HIT
;
3020 if (!env
->watchpoint_hit
) {
3021 env
->watchpoint_hit
= wp
;
3022 tb
= tb_find_pc(env
->mem_io_pc
);
3024 cpu_abort(env
, "check_watchpoint: could not find TB for "
3025 "pc=%p", (void *)env
->mem_io_pc
);
3027 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
3028 tb_phys_invalidate(tb
, -1);
3029 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3030 env
->exception_index
= EXCP_DEBUG
;
3032 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3033 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3035 cpu_resume_from_signal(env
, NULL
);
3038 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3043 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3044 so these check for a hit then pass through to the normal out-of-line
3046 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3048 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3049 return ldub_phys(addr
);
3052 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3054 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3055 return lduw_phys(addr
);
3058 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3060 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3061 return ldl_phys(addr
);
3064 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3067 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3068 stb_phys(addr
, val
);
3071 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3074 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3075 stw_phys(addr
, val
);
3078 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3081 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3082 stl_phys(addr
, val
);
3085 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3091 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3097 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
3103 idx
= SUBPAGE_IDX(addr
);
3104 #if defined(DEBUG_SUBPAGE)
3105 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3106 mmio
, len
, addr
, idx
);
3108 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
3109 addr
+ mmio
->region_offset
[idx
][0][len
]);
3114 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3115 uint32_t value
, unsigned int len
)
3119 idx
= SUBPAGE_IDX(addr
);
3120 #if defined(DEBUG_SUBPAGE)
3121 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
3122 mmio
, len
, addr
, idx
, value
);
3124 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
3125 addr
+ mmio
->region_offset
[idx
][1][len
],
3129 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3131 #if defined(DEBUG_SUBPAGE)
3132 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3135 return subpage_readlen(opaque
, addr
, 0);
3138 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3141 #if defined(DEBUG_SUBPAGE)
3142 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3144 subpage_writelen(opaque
, addr
, value
, 0);
3147 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3149 #if defined(DEBUG_SUBPAGE)
3150 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3153 return subpage_readlen(opaque
, addr
, 1);
3156 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3159 #if defined(DEBUG_SUBPAGE)
3160 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3162 subpage_writelen(opaque
, addr
, value
, 1);
3165 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3167 #if defined(DEBUG_SUBPAGE)
3168 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3171 return subpage_readlen(opaque
, addr
, 2);
3174 static void subpage_writel (void *opaque
,
3175 target_phys_addr_t addr
, uint32_t value
)
3177 #if defined(DEBUG_SUBPAGE)
3178 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3180 subpage_writelen(opaque
, addr
, value
, 2);
3183 static CPUReadMemoryFunc
* const subpage_read
[] = {
3189 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3195 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3196 ram_addr_t memory
, ram_addr_t region_offset
)
3201 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3203 idx
= SUBPAGE_IDX(start
);
3204 eidx
= SUBPAGE_IDX(end
);
3205 #if defined(DEBUG_SUBPAGE)
3206 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3207 mmio
, start
, end
, idx
, eidx
, memory
);
3209 memory
>>= IO_MEM_SHIFT
;
3210 for (; idx
<= eidx
; idx
++) {
3211 for (i
= 0; i
< 4; i
++) {
3212 if (io_mem_read
[memory
][i
]) {
3213 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
3214 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
3215 mmio
->region_offset
[idx
][0][i
] = region_offset
;
3217 if (io_mem_write
[memory
][i
]) {
3218 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
3219 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
3220 mmio
->region_offset
[idx
][1][i
] = region_offset
;
3228 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3229 ram_addr_t orig_memory
, ram_addr_t region_offset
)
3234 mmio
= qemu_mallocz(sizeof(subpage_t
));
3237 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3238 #if defined(DEBUG_SUBPAGE)
3239 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3240 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3242 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3243 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
3249 static int get_free_io_mem_idx(void)
3253 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3254 if (!io_mem_used
[i
]) {
3258 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3262 /* mem_read and mem_write are arrays of functions containing the
3263 function to access byte (index 0), word (index 1) and dword (index
3264 2). Functions can be omitted with a NULL function pointer.
3265 If io_index is non zero, the corresponding io zone is
3266 modified. If it is zero, a new io zone is allocated. The return
3267 value can be used with cpu_register_physical_memory(). (-1) is
3268 returned if error. */
3269 static int cpu_register_io_memory_fixed(int io_index
,
3270 CPUReadMemoryFunc
* const *mem_read
,
3271 CPUWriteMemoryFunc
* const *mem_write
,
3274 int i
, subwidth
= 0;
3276 if (io_index
<= 0) {
3277 io_index
= get_free_io_mem_idx();
3281 io_index
>>= IO_MEM_SHIFT
;
3282 if (io_index
>= IO_MEM_NB_ENTRIES
)
3286 for(i
= 0;i
< 3; i
++) {
3287 if (!mem_read
[i
] || !mem_write
[i
])
3288 subwidth
= IO_MEM_SUBWIDTH
;
3289 io_mem_read
[io_index
][i
] = mem_read
[i
];
3290 io_mem_write
[io_index
][i
] = mem_write
[i
];
3292 io_mem_opaque
[io_index
] = opaque
;
3293 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3296 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3297 CPUWriteMemoryFunc
* const *mem_write
,
3300 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3303 void cpu_unregister_io_memory(int io_table_address
)
3306 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3308 for (i
=0;i
< 3; i
++) {
3309 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3310 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3312 io_mem_opaque
[io_index
] = NULL
;
3313 io_mem_used
[io_index
] = 0;
3316 static void io_mem_init(void)
3320 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3321 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3322 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3326 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3327 watch_mem_write
, NULL
);
3330 #endif /* !defined(CONFIG_USER_ONLY) */
3332 /* physical memory access (slow version, mainly for debug) */
3333 #if defined(CONFIG_USER_ONLY)
3334 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3335 uint8_t *buf
, int len
, int is_write
)
3342 page
= addr
& TARGET_PAGE_MASK
;
3343 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3346 flags
= page_get_flags(page
);
3347 if (!(flags
& PAGE_VALID
))
3350 if (!(flags
& PAGE_WRITE
))
3352 /* XXX: this code should not depend on lock_user */
3353 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3356 unlock_user(p
, addr
, l
);
3358 if (!(flags
& PAGE_READ
))
3360 /* XXX: this code should not depend on lock_user */
3361 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3364 unlock_user(p
, addr
, 0);
3374 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3375 int len
, int is_write
)
3380 target_phys_addr_t page
;
3385 page
= addr
& TARGET_PAGE_MASK
;
3386 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3389 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3391 pd
= IO_MEM_UNASSIGNED
;
3393 pd
= p
->phys_offset
;
3397 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3398 target_phys_addr_t addr1
= addr
;
3399 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3401 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3402 /* XXX: could force cpu_single_env to NULL to avoid
3404 if (l
>= 4 && ((addr1
& 3) == 0)) {
3405 /* 32 bit write access */
3407 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3409 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3410 /* 16 bit write access */
3412 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3415 /* 8 bit write access */
3417 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3421 unsigned long addr1
;
3422 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3424 ptr
= qemu_get_ram_ptr(addr1
);
3425 memcpy(ptr
, buf
, l
);
3426 if (!cpu_physical_memory_is_dirty(addr1
)) {
3427 /* invalidate code */
3428 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3430 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3431 (0xff & ~CODE_DIRTY_FLAG
);
3433 /* qemu doesn't execute guest code directly, but kvm does
3434 therefore flush instruction caches */
3436 flush_icache_range((unsigned long)ptr
,
3437 ((unsigned long)ptr
)+l
);
3440 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3441 !(pd
& IO_MEM_ROMD
)) {
3442 target_phys_addr_t addr1
= addr
;
3444 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3446 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3447 if (l
>= 4 && ((addr1
& 3) == 0)) {
3448 /* 32 bit read access */
3449 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3452 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3453 /* 16 bit read access */
3454 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3458 /* 8 bit read access */
3459 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3465 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3466 (addr
& ~TARGET_PAGE_MASK
);
3467 memcpy(buf
, ptr
, l
);
3476 /* used for ROM loading : can write in RAM and ROM */
3477 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3478 const uint8_t *buf
, int len
)
3482 target_phys_addr_t page
;
3487 page
= addr
& TARGET_PAGE_MASK
;
3488 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3491 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3493 pd
= IO_MEM_UNASSIGNED
;
3495 pd
= p
->phys_offset
;
3498 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3499 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3500 !(pd
& IO_MEM_ROMD
)) {
3503 unsigned long addr1
;
3504 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3506 ptr
= qemu_get_ram_ptr(addr1
);
3507 memcpy(ptr
, buf
, l
);
3517 target_phys_addr_t addr
;
3518 target_phys_addr_t len
;
3521 static BounceBuffer bounce
;
3523 typedef struct MapClient
{
3525 void (*callback
)(void *opaque
);
3526 QLIST_ENTRY(MapClient
) link
;
3529 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3530 = QLIST_HEAD_INITIALIZER(map_client_list
);
3532 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3534 MapClient
*client
= qemu_malloc(sizeof(*client
));
3536 client
->opaque
= opaque
;
3537 client
->callback
= callback
;
3538 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3542 void cpu_unregister_map_client(void *_client
)
3544 MapClient
*client
= (MapClient
*)_client
;
3546 QLIST_REMOVE(client
, link
);
3550 static void cpu_notify_map_clients(void)
3554 while (!QLIST_EMPTY(&map_client_list
)) {
3555 client
= QLIST_FIRST(&map_client_list
);
3556 client
->callback(client
->opaque
);
3557 cpu_unregister_map_client(client
);
3561 /* Map a physical memory region into a host virtual address.
3562 * May map a subset of the requested range, given by and returned in *plen.
3563 * May return NULL if resources needed to perform the mapping are exhausted.
3564 * Use only for reads OR writes - not for read-modify-write operations.
3565 * Use cpu_register_map_client() to know when retrying the map operation is
3566 * likely to succeed.
3568 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3569 target_phys_addr_t
*plen
,
3572 target_phys_addr_t len
= *plen
;
3573 target_phys_addr_t done
= 0;
3575 uint8_t *ret
= NULL
;
3577 target_phys_addr_t page
;
3580 unsigned long addr1
;
3583 page
= addr
& TARGET_PAGE_MASK
;
3584 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3587 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3589 pd
= IO_MEM_UNASSIGNED
;
3591 pd
= p
->phys_offset
;
3594 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3595 if (done
|| bounce
.buffer
) {
3598 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3602 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3604 ptr
= bounce
.buffer
;
3606 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3607 ptr
= qemu_get_ram_ptr(addr1
);
3611 } else if (ret
+ done
!= ptr
) {
3623 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3624 * Will also mark the memory as dirty if is_write == 1. access_len gives
3625 * the amount of memory that was actually read or written by the caller.
3627 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3628 int is_write
, target_phys_addr_t access_len
)
3630 unsigned long flush_len
= (unsigned long)access_len
;
3632 if (buffer
!= bounce
.buffer
) {
3634 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3635 while (access_len
) {
3637 l
= TARGET_PAGE_SIZE
;
3640 if (!cpu_physical_memory_is_dirty(addr1
)) {
3641 /* invalidate code */
3642 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3644 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3645 (0xff & ~CODE_DIRTY_FLAG
);
3650 dma_flush_range((unsigned long)buffer
,
3651 (unsigned long)buffer
+ flush_len
);
3656 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3658 qemu_vfree(bounce
.buffer
);
3659 bounce
.buffer
= NULL
;
3660 cpu_notify_map_clients();
3663 /* warning: addr must be aligned */
3664 uint32_t ldl_phys(target_phys_addr_t addr
)
3672 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3674 pd
= IO_MEM_UNASSIGNED
;
3676 pd
= p
->phys_offset
;
3679 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3680 !(pd
& IO_MEM_ROMD
)) {
3682 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3684 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3685 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3688 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3689 (addr
& ~TARGET_PAGE_MASK
);
3695 /* warning: addr must be aligned */
3696 uint64_t ldq_phys(target_phys_addr_t addr
)
3704 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3706 pd
= IO_MEM_UNASSIGNED
;
3708 pd
= p
->phys_offset
;
3711 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3712 !(pd
& IO_MEM_ROMD
)) {
3714 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3716 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3717 #ifdef TARGET_WORDS_BIGENDIAN
3718 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3719 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3721 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3722 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3726 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3727 (addr
& ~TARGET_PAGE_MASK
);
3734 uint32_t ldub_phys(target_phys_addr_t addr
)
3737 cpu_physical_memory_read(addr
, &val
, 1);
3742 uint32_t lduw_phys(target_phys_addr_t addr
)
3745 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3746 return tswap16(val
);
3749 /* warning: addr must be aligned. The ram page is not masked as dirty
3750 and the code inside is not invalidated. It is useful if the dirty
3751 bits are used to track modified PTEs */
3752 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3759 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3761 pd
= IO_MEM_UNASSIGNED
;
3763 pd
= p
->phys_offset
;
3766 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3767 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3769 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3770 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3772 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3773 ptr
= qemu_get_ram_ptr(addr1
);
3776 if (unlikely(in_migration
)) {
3777 if (!cpu_physical_memory_is_dirty(addr1
)) {
3778 /* invalidate code */
3779 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3781 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3782 (0xff & ~CODE_DIRTY_FLAG
);
3788 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3795 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3797 pd
= IO_MEM_UNASSIGNED
;
3799 pd
= p
->phys_offset
;
3802 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3803 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3805 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3806 #ifdef TARGET_WORDS_BIGENDIAN
3807 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3808 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3810 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3811 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3814 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3815 (addr
& ~TARGET_PAGE_MASK
);
3820 /* warning: addr must be aligned */
3821 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3828 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3830 pd
= IO_MEM_UNASSIGNED
;
3832 pd
= p
->phys_offset
;
3835 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3836 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3838 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3839 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3841 unsigned long addr1
;
3842 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3844 ptr
= qemu_get_ram_ptr(addr1
);
3846 if (!cpu_physical_memory_is_dirty(addr1
)) {
3847 /* invalidate code */
3848 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3850 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3851 (0xff & ~CODE_DIRTY_FLAG
);
3857 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3860 cpu_physical_memory_write(addr
, &v
, 1);
3864 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3866 uint16_t v
= tswap16(val
);
3867 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3871 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3874 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3877 /* virtual memory access for debug (includes writing to ROM) */
3878 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3879 uint8_t *buf
, int len
, int is_write
)
3882 target_phys_addr_t phys_addr
;
3886 page
= addr
& TARGET_PAGE_MASK
;
3887 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3888 /* if no physical page mapped, return an error */
3889 if (phys_addr
== -1)
3891 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3894 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3896 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3898 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3907 /* in deterministic execution mode, instructions doing device I/Os
3908 must be at the end of the TB */
3909 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3911 TranslationBlock
*tb
;
3913 target_ulong pc
, cs_base
;
3916 tb
= tb_find_pc((unsigned long)retaddr
);
3918 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3921 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3922 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3923 /* Calculate how many instructions had been executed before the fault
3925 n
= n
- env
->icount_decr
.u16
.low
;
3926 /* Generate a new TB ending on the I/O insn. */
3928 /* On MIPS and SH, delay slot instructions can only be restarted if
3929 they were already the first instruction in the TB. If this is not
3930 the first instruction in a TB then re-execute the preceding
3932 #if defined(TARGET_MIPS)
3933 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3934 env
->active_tc
.PC
-= 4;
3935 env
->icount_decr
.u16
.low
++;
3936 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3938 #elif defined(TARGET_SH4)
3939 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3942 env
->icount_decr
.u16
.low
++;
3943 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3946 /* This should never happen. */
3947 if (n
> CF_COUNT_MASK
)
3948 cpu_abort(env
, "TB too big during recompile");
3950 cflags
= n
| CF_LAST_IO
;
3952 cs_base
= tb
->cs_base
;
3954 tb_phys_invalidate(tb
, -1);
3955 /* FIXME: In theory this could raise an exception. In practice
3956 we have already translated the block once so it's probably ok. */
3957 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3958 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3959 the first in the TB) then we end up generating a whole new TB and
3960 repeating the fault, which is horribly inefficient.
3961 Better would be to execute just this insn uncached, or generate a
3963 cpu_resume_from_signal(env
, NULL
);
3966 #if !defined(CONFIG_USER_ONLY)
3968 void dump_exec_info(FILE *f
,
3969 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3971 int i
, target_code_size
, max_target_code_size
;
3972 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3973 TranslationBlock
*tb
;
3975 target_code_size
= 0;
3976 max_target_code_size
= 0;
3978 direct_jmp_count
= 0;
3979 direct_jmp2_count
= 0;
3980 for(i
= 0; i
< nb_tbs
; i
++) {
3982 target_code_size
+= tb
->size
;
3983 if (tb
->size
> max_target_code_size
)
3984 max_target_code_size
= tb
->size
;
3985 if (tb
->page_addr
[1] != -1)
3987 if (tb
->tb_next_offset
[0] != 0xffff) {
3989 if (tb
->tb_next_offset
[1] != 0xffff) {
3990 direct_jmp2_count
++;
3994 /* XXX: avoid using doubles ? */
3995 cpu_fprintf(f
, "Translation buffer state:\n");
3996 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3997 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3998 cpu_fprintf(f
, "TB count %d/%d\n",
3999 nb_tbs
, code_gen_max_blocks
);
4000 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4001 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4002 max_target_code_size
);
4003 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4004 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4005 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4006 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4008 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4009 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4011 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4013 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4014 cpu_fprintf(f
, "\nStatistics:\n");
4015 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4016 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4017 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4018 #ifdef CONFIG_PROFILER
4019 tcg_dump_info(f
, cpu_fprintf
);
4023 #define MMUSUFFIX _cmmu
4024 #define GETPC() NULL
4025 #define env cpu_single_env
4026 #define SOFTMMU_CODE_ACCESS
4029 #include "softmmu_template.h"
4032 #include "softmmu_template.h"
4035 #include "softmmu_template.h"
4038 #include "softmmu_template.h"