2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 //#define DEBUG_TB_INVALIDATE
36 /* make various TB consistency checks */
37 //#define DEBUG_TB_CHECK
39 /* threshold to flush the translated code buffer */
40 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
42 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / 64)
44 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
45 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
47 /* any access to the tbs or the page table must use this lock */
48 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
50 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
51 uint8_t *code_gen_ptr
;
53 /* XXX: pack the flags in the low bits of the pointer ? */
54 typedef struct PageDesc
{
56 TranslationBlock
*first_tb
;
60 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
62 #define L1_SIZE (1 << L1_BITS)
63 #define L2_SIZE (1 << L2_BITS)
65 static void io_mem_init(void);
67 unsigned long real_host_page_size
;
68 unsigned long host_page_bits
;
69 unsigned long host_page_size
;
70 unsigned long host_page_mask
;
72 static PageDesc
*l1_map
[L1_SIZE
];
74 /* io memory support */
75 static unsigned long *l1_physmap
[L1_SIZE
];
76 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
77 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
81 char *logfilename
= "/tmp/qemu.log";
85 static void page_init(void)
87 /* NOTE: we can always suppose that host_page_size >=
89 real_host_page_size
= getpagesize();
90 if (host_page_size
== 0)
91 host_page_size
= real_host_page_size
;
92 if (host_page_size
< TARGET_PAGE_SIZE
)
93 host_page_size
= TARGET_PAGE_SIZE
;
95 while ((1 << host_page_bits
) < host_page_size
)
97 host_page_mask
= ~(host_page_size
- 1);
100 /* dump memory mappings */
101 void page_dump(FILE *f
)
103 unsigned long start
, end
;
104 int i
, j
, prot
, prot1
;
107 fprintf(f
, "%-8s %-8s %-8s %s\n",
108 "start", "end", "size", "prot");
112 for(i
= 0; i
<= L1_SIZE
; i
++) {
117 for(j
= 0;j
< L2_SIZE
; j
++) {
123 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
125 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
126 start
, end
, end
- start
,
127 prot
& PAGE_READ
? 'r' : '-',
128 prot
& PAGE_WRITE
? 'w' : '-',
129 prot
& PAGE_EXEC
? 'x' : '-');
143 static inline PageDesc
*page_find_alloc(unsigned int index
)
147 lp
= &l1_map
[index
>> L2_BITS
];
150 /* allocate if not found */
151 p
= malloc(sizeof(PageDesc
) * L2_SIZE
);
152 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
155 return p
+ (index
& (L2_SIZE
- 1));
158 static inline PageDesc
*page_find(unsigned int index
)
162 p
= l1_map
[index
>> L2_BITS
];
165 return p
+ (index
& (L2_SIZE
- 1));
168 int page_get_flags(unsigned long address
)
172 p
= page_find(address
>> TARGET_PAGE_BITS
);
178 /* modify the flags of a page and invalidate the code if
179 necessary. The flag PAGE_WRITE_ORG is positionned automatically
180 depending on PAGE_WRITE */
181 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
186 start
= start
& TARGET_PAGE_MASK
;
187 end
= TARGET_PAGE_ALIGN(end
);
188 if (flags
& PAGE_WRITE
)
189 flags
|= PAGE_WRITE_ORG
;
191 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
192 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
193 /* if the write protection is set, then we invalidate the code
195 if (!(p
->flags
& PAGE_WRITE
) &&
196 (flags
& PAGE_WRITE
) &&
198 tb_invalidate_page(addr
);
202 spin_unlock(&tb_lock
);
205 void cpu_exec_init(void)
208 code_gen_ptr
= code_gen_buffer
;
214 /* set to NULL all the 'first_tb' fields in all PageDescs */
215 static void page_flush_tb(void)
220 for(i
= 0; i
< L1_SIZE
; i
++) {
223 for(j
= 0; j
< L2_SIZE
; j
++)
224 p
[j
].first_tb
= NULL
;
229 /* flush all the translation blocks */
230 /* XXX: tb_flush is currently not thread safe */
231 void tb_flush(CPUState
*env
)
234 #if defined(DEBUG_FLUSH)
235 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
236 code_gen_ptr
- code_gen_buffer
,
238 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
240 /* must reset current TB so that interrupts cannot modify the
241 links while we are modifying them */
242 env
->current_tb
= NULL
;
245 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
248 code_gen_ptr
= code_gen_buffer
;
249 /* XXX: flush processor icache at this point if cache flush is
253 #ifdef DEBUG_TB_CHECK
255 static void tb_invalidate_check(unsigned long address
)
257 TranslationBlock
*tb
;
259 address
&= TARGET_PAGE_MASK
;
260 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
261 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
262 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
263 address
>= tb
->pc
+ tb
->size
)) {
264 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
265 address
, tb
->pc
, tb
->size
);
271 /* verify that all the pages have correct rights for code */
272 static void tb_page_check(void)
274 TranslationBlock
*tb
;
275 int i
, flags1
, flags2
;
277 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
278 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
279 flags1
= page_get_flags(tb
->pc
);
280 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
281 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
282 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
283 tb
->pc
, tb
->size
, flags1
, flags2
);
289 void tb_jmp_check(TranslationBlock
*tb
)
291 TranslationBlock
*tb1
;
294 /* suppress any remaining jumps to this TB */
298 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
301 tb1
= tb1
->jmp_next
[n1
];
303 /* check end of list */
305 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
311 /* invalidate one TB */
312 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
315 TranslationBlock
*tb1
;
319 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
322 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
326 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
328 TranslationBlock
*tb1
, **ptb
;
331 ptb
= &tb
->jmp_next
[n
];
334 /* find tb(n) in circular list */
338 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
339 if (n1
== n
&& tb1
== tb
)
342 ptb
= &tb1
->jmp_first
;
344 ptb
= &tb1
->jmp_next
[n1
];
347 /* now we can suppress tb(n) from the list */
348 *ptb
= tb
->jmp_next
[n
];
350 tb
->jmp_next
[n
] = NULL
;
354 /* reset the jump entry 'n' of a TB so that it is not chained to
356 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
358 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
361 static inline void tb_invalidate(TranslationBlock
*tb
, int parity
)
364 unsigned int page_index1
, page_index2
;
366 TranslationBlock
*tb1
, *tb2
;
368 tb_invalidated_flag
= 1;
370 /* remove the TB from the hash list */
371 h
= tb_hash_func(tb
->pc
);
372 tb_remove(&tb_hash
[h
], tb
,
373 offsetof(TranslationBlock
, hash_next
));
374 /* remove the TB from the page list */
375 page_index1
= tb
->pc
>> TARGET_PAGE_BITS
;
376 if ((page_index1
& 1) == parity
) {
377 p
= page_find(page_index1
);
378 tb_remove(&p
->first_tb
, tb
,
379 offsetof(TranslationBlock
, page_next
[page_index1
& 1]));
381 page_index2
= (tb
->pc
+ tb
->size
- 1) >> TARGET_PAGE_BITS
;
382 if ((page_index2
& 1) == parity
) {
383 p
= page_find(page_index2
);
384 tb_remove(&p
->first_tb
, tb
,
385 offsetof(TranslationBlock
, page_next
[page_index2
& 1]));
388 /* suppress this TB from the two jump lists */
389 tb_jmp_remove(tb
, 0);
390 tb_jmp_remove(tb
, 1);
392 /* suppress any remaining jumps to this TB */
398 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
399 tb2
= tb1
->jmp_next
[n1
];
400 tb_reset_jump(tb1
, n1
);
401 tb1
->jmp_next
[n1
] = NULL
;
404 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
407 /* invalidate all TBs which intersect with the target page starting at addr */
408 void tb_invalidate_page(unsigned long address
)
410 TranslationBlock
*tb_next
, *tb
;
411 unsigned int page_index
;
412 int parity1
, parity2
;
414 #ifdef DEBUG_TB_INVALIDATE
415 printf("tb_invalidate_page: %lx\n", address
);
418 page_index
= address
>> TARGET_PAGE_BITS
;
419 p
= page_find(page_index
);
423 parity1
= page_index
& 1;
424 parity2
= parity1
^ 1;
426 tb_next
= tb
->page_next
[parity1
];
427 tb_invalidate(tb
, parity2
);
433 /* add the tb in the target page and protect it if necessary */
434 static inline void tb_alloc_page(TranslationBlock
*tb
, unsigned int page_index
)
437 unsigned long host_start
, host_end
, addr
, page_addr
;
440 p
= page_find_alloc(page_index
);
441 tb
->page_next
[page_index
& 1] = p
->first_tb
;
443 if (p
->flags
& PAGE_WRITE
) {
444 /* force the host page as non writable (writes will have a
445 page fault + mprotect overhead) */
446 page_addr
= (page_index
<< TARGET_PAGE_BITS
);
447 host_start
= page_addr
& host_page_mask
;
448 host_end
= host_start
+ host_page_size
;
450 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
451 prot
|= page_get_flags(addr
);
452 #if !defined(CONFIG_SOFTMMU)
453 mprotect((void *)host_start
, host_page_size
,
454 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
456 #if !defined(CONFIG_USER_ONLY)
457 /* suppress soft TLB */
458 /* XXX: must flush on all processor with same address space */
459 tlb_flush_page_write(cpu_single_env
, host_start
);
461 #ifdef DEBUG_TB_INVALIDATE
462 printf("protecting code page: 0x%08lx\n",
465 p
->flags
&= ~PAGE_WRITE
;
469 /* Allocate a new translation block. Flush the translation buffer if
470 too many translation blocks or too much generated code. */
471 TranslationBlock
*tb_alloc(unsigned long pc
)
473 TranslationBlock
*tb
;
475 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
476 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
483 /* link the tb with the other TBs */
484 void tb_link(TranslationBlock
*tb
)
486 unsigned int page_index1
, page_index2
;
488 /* add in the page list */
489 page_index1
= tb
->pc
>> TARGET_PAGE_BITS
;
490 tb_alloc_page(tb
, page_index1
);
491 page_index2
= (tb
->pc
+ tb
->size
- 1) >> TARGET_PAGE_BITS
;
492 if (page_index2
!= page_index1
) {
493 tb_alloc_page(tb
, page_index2
);
495 #ifdef DEBUG_TB_CHECK
498 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
499 tb
->jmp_next
[0] = NULL
;
500 tb
->jmp_next
[1] = NULL
;
502 /* init original jump addresses */
503 if (tb
->tb_next_offset
[0] != 0xffff)
504 tb_reset_jump(tb
, 0);
505 if (tb
->tb_next_offset
[1] != 0xffff)
506 tb_reset_jump(tb
, 1);
509 /* called from signal handler: invalidate the code and unprotect the
510 page. Return TRUE if the fault was succesfully handled. */
511 int page_unprotect(unsigned long address
)
513 unsigned int page_index
, prot
, pindex
;
515 unsigned long host_start
, host_end
, addr
;
517 host_start
= address
& host_page_mask
;
518 page_index
= host_start
>> TARGET_PAGE_BITS
;
519 p1
= page_find(page_index
);
522 host_end
= host_start
+ host_page_size
;
525 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
529 /* if the page was really writable, then we change its
530 protection back to writable */
531 if (prot
& PAGE_WRITE_ORG
) {
532 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
533 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
534 #if !defined(CONFIG_SOFTMMU)
535 mprotect((void *)host_start
, host_page_size
,
536 (prot
& PAGE_BITS
) | PAGE_WRITE
);
538 p1
[pindex
].flags
|= PAGE_WRITE
;
539 /* and since the content will be modified, we must invalidate
540 the corresponding translated code. */
541 tb_invalidate_page(address
);
542 #ifdef DEBUG_TB_CHECK
543 tb_invalidate_check(address
);
551 /* call this function when system calls directly modify a memory area */
552 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
554 unsigned long start
, end
, addr
;
556 start
= (unsigned long)data
;
557 end
= start
+ data_size
;
558 start
&= TARGET_PAGE_MASK
;
559 end
= TARGET_PAGE_ALIGN(end
);
560 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
561 page_unprotect(addr
);
565 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
566 tb[1].tc_ptr. Return NULL if not found */
567 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
571 TranslationBlock
*tb
;
575 if (tc_ptr
< (unsigned long)code_gen_buffer
||
576 tc_ptr
>= (unsigned long)code_gen_ptr
)
578 /* binary search (cf Knuth) */
581 while (m_min
<= m_max
) {
582 m
= (m_min
+ m_max
) >> 1;
584 v
= (unsigned long)tb
->tc_ptr
;
587 else if (tc_ptr
< v
) {
596 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
598 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
600 TranslationBlock
*tb1
, *tb_next
, **ptb
;
603 tb1
= tb
->jmp_next
[n
];
605 /* find head of list */
608 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
611 tb1
= tb1
->jmp_next
[n1
];
613 /* we are now sure now that tb jumps to tb1 */
616 /* remove tb from the jmp_first list */
617 ptb
= &tb_next
->jmp_first
;
621 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
622 if (n1
== n
&& tb1
== tb
)
624 ptb
= &tb1
->jmp_next
[n1
];
626 *ptb
= tb
->jmp_next
[n
];
627 tb
->jmp_next
[n
] = NULL
;
629 /* suppress the jump to next tb in generated code */
630 tb_reset_jump(tb
, n
);
632 /* suppress jumps in the tb on which we could have jumped */
633 tb_reset_jump_recursive(tb_next
);
637 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
639 tb_reset_jump_recursive2(tb
, 0);
640 tb_reset_jump_recursive2(tb
, 1);
643 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
644 breakpoint is reached */
645 int cpu_breakpoint_insert(CPUState
*env
, uint32_t pc
)
647 #if defined(TARGET_I386)
650 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
651 if (env
->breakpoints
[i
] == pc
)
655 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
657 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
658 tb_invalidate_page(pc
);
665 /* remove a breakpoint */
666 int cpu_breakpoint_remove(CPUState
*env
, uint32_t pc
)
668 #if defined(TARGET_I386)
670 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
671 if (env
->breakpoints
[i
] == pc
)
676 memmove(&env
->breakpoints
[i
], &env
->breakpoints
[i
+ 1],
677 (env
->nb_breakpoints
- (i
+ 1)) * sizeof(env
->breakpoints
[0]));
678 env
->nb_breakpoints
--;
679 tb_invalidate_page(pc
);
686 /* enable or disable single step mode. EXCP_DEBUG is returned by the
687 CPU loop after each instruction */
688 void cpu_single_step(CPUState
*env
, int enabled
)
690 #if defined(TARGET_I386)
691 if (env
->singlestep_enabled
!= enabled
) {
692 env
->singlestep_enabled
= enabled
;
693 /* must flush all the translated code to avoid inconsistancies */
699 /* enable or disable low levels log */
700 void cpu_set_log(int log_flags
)
702 loglevel
= log_flags
;
703 if (loglevel
&& !logfile
) {
704 logfile
= fopen(logfilename
, "w");
709 setvbuf(logfile
, NULL
, _IOLBF
, 0);
713 void cpu_set_log_filename(const char *filename
)
715 logfilename
= strdup(filename
);
718 /* mask must never be zero, except for A20 change call */
719 void cpu_interrupt(CPUState
*env
, int mask
)
721 TranslationBlock
*tb
;
723 env
->interrupt_request
|= mask
;
724 /* if the cpu is currently executing code, we must unlink it and
725 all the potentially executing TB */
726 tb
= env
->current_tb
;
728 tb_reset_jump_recursive(tb
);
733 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
738 fprintf(stderr
, "qemu: fatal: ");
739 vfprintf(stderr
, fmt
, ap
);
740 fprintf(stderr
, "\n");
742 cpu_x86_dump_state(env
, stderr
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
748 #if !defined(CONFIG_USER_ONLY)
750 /* unmap all maped pages and flush all associated code */
751 static void page_unmap(CPUState
*env
)
756 for(i
= 0; i
< L1_SIZE
; i
++) {
759 #if !defined(CONFIG_SOFTMMU)
765 for(j
= 0;j
< L2_SIZE
;) {
766 if (p
->flags
& PAGE_VALID
) {
767 addr
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
768 /* we try to find a range to make less syscalls */
772 while (j
< L2_SIZE
&& (p
->flags
& PAGE_VALID
)) {
776 ret
= munmap((void *)addr
, (j
- j1
) << TARGET_PAGE_BITS
);
778 fprintf(stderr
, "Could not unmap page 0x%08lx\n", addr
);
794 void tlb_flush(CPUState
*env
)
798 /* must reset current TB so that interrupts cannot modify the
799 links while we are modifying them */
800 env
->current_tb
= NULL
;
802 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
803 env
->tlb_read
[0][i
].address
= -1;
804 env
->tlb_write
[0][i
].address
= -1;
805 env
->tlb_read
[1][i
].address
= -1;
806 env
->tlb_write
[1][i
].address
= -1;
808 /* XXX: avoid flushing the TBs */
812 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
814 if (addr
== (tlb_entry
->address
&
815 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
816 tlb_entry
->address
= -1;
819 void tlb_flush_page(CPUState
*env
, uint32_t addr
)
823 /* must reset current TB so that interrupts cannot modify the
824 links while we are modifying them */
825 env
->current_tb
= NULL
;
827 addr
&= TARGET_PAGE_MASK
;
828 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
829 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
830 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
831 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
832 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
834 flags
= page_get_flags(addr
);
835 if (flags
& PAGE_VALID
) {
836 #if !defined(CONFIG_SOFTMMU)
837 munmap((void *)addr
, TARGET_PAGE_SIZE
);
839 page_set_flags(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
843 /* make all write to page 'addr' trigger a TLB exception to detect
844 self modifying code */
845 void tlb_flush_page_write(CPUState
*env
, uint32_t addr
)
849 addr
&= TARGET_PAGE_MASK
;
850 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
851 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
852 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
857 void tlb_flush(CPUState
*env
)
861 void tlb_flush_page(CPUState
*env
, uint32_t addr
)
865 void tlb_flush_page_write(CPUState
*env
, uint32_t addr
)
869 #endif /* defined(CONFIG_USER_ONLY) */
871 static inline unsigned long *physpage_find_alloc(unsigned int page
)
873 unsigned long **lp
, *p
;
874 unsigned int index
, i
;
876 index
= page
>> TARGET_PAGE_BITS
;
877 lp
= &l1_physmap
[index
>> L2_BITS
];
880 /* allocate if not found */
881 p
= malloc(sizeof(unsigned long) * L2_SIZE
);
882 for(i
= 0; i
< L2_SIZE
; i
++)
883 p
[i
] = IO_MEM_UNASSIGNED
;
886 return p
+ (index
& (L2_SIZE
- 1));
889 /* return NULL if no page defined (unused memory) */
890 unsigned long physpage_find(unsigned long page
)
894 index
= page
>> TARGET_PAGE_BITS
;
895 p
= l1_physmap
[index
>> L2_BITS
];
897 return IO_MEM_UNASSIGNED
;
898 return p
[index
& (L2_SIZE
- 1)];
901 /* register physical memory. 'size' must be a multiple of the target
902 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
904 void cpu_register_physical_memory(unsigned long start_addr
, unsigned long size
,
907 unsigned long addr
, end_addr
;
910 end_addr
= start_addr
+ size
;
911 for(addr
= start_addr
; addr
< end_addr
; addr
+= TARGET_PAGE_SIZE
) {
912 p
= physpage_find_alloc(addr
);
914 if ((phys_offset
& ~TARGET_PAGE_MASK
) == 0)
915 phys_offset
+= TARGET_PAGE_SIZE
;
919 static uint32_t unassigned_mem_readb(uint32_t addr
)
924 static void unassigned_mem_writeb(uint32_t addr
, uint32_t val
)
928 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
929 unassigned_mem_readb
,
930 unassigned_mem_readb
,
931 unassigned_mem_readb
,
934 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
935 unassigned_mem_writeb
,
936 unassigned_mem_writeb
,
937 unassigned_mem_writeb
,
941 static void io_mem_init(void)
944 cpu_register_io_memory(0, unassigned_mem_read
, unassigned_mem_write
);
947 /* mem_read and mem_write are arrays of functions containing the
948 function to access byte (index 0), word (index 1) and dword (index
949 2). All functions must be supplied. If io_index is non zero, the
950 corresponding io zone is modified. If it is zero, a new io zone is
951 allocated. The return value can be used with
952 cpu_register_physical_memory(). (-1) is returned if error. */
953 int cpu_register_io_memory(int io_index
,
954 CPUReadMemoryFunc
**mem_read
,
955 CPUWriteMemoryFunc
**mem_write
)
960 if (io_index
>= IO_MEM_NB_ENTRIES
)
962 io_index
= io_mem_nb
++;
964 if (io_index
>= IO_MEM_NB_ENTRIES
)
968 for(i
= 0;i
< 3; i
++) {
969 io_mem_read
[io_index
][i
] = mem_read
[i
];
970 io_mem_write
[io_index
][i
] = mem_write
[i
];
972 return io_index
<< IO_MEM_SHIFT
;
975 #if !defined(CONFIG_USER_ONLY)
977 #define MMUSUFFIX _cmmu
979 #define env cpu_single_env
982 #include "softmmu_template.h"
985 #include "softmmu_template.h"
988 #include "softmmu_template.h"
991 #include "softmmu_template.h"