removed test code
[qemu/qemu_0_9_1_stable.git] / exec.c
blob2ef509dcf6595b1066ea09be0b7d017f9b870e84
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <errno.h>
25 #include <unistd.h>
26 #include <inttypes.h>
27 #include <sys/mman.h>
29 #include "config.h"
30 #include "cpu.h"
31 #include "exec-all.h"
33 //#define DEBUG_TB_INVALIDATE
34 //#define DEBUG_FLUSH
36 /* make various TB consistency checks */
37 //#define DEBUG_TB_CHECK
39 /* threshold to flush the translated code buffer */
40 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
42 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / 64)
44 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
45 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
46 int nb_tbs;
47 /* any access to the tbs or the page table must use this lock */
48 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
50 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
51 uint8_t *code_gen_ptr;
53 /* XXX: pack the flags in the low bits of the pointer ? */
54 typedef struct PageDesc {
55 unsigned long flags;
56 TranslationBlock *first_tb;
57 } PageDesc;
59 #define L2_BITS 10
60 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
62 #define L1_SIZE (1 << L1_BITS)
63 #define L2_SIZE (1 << L2_BITS)
65 static void io_mem_init(void);
67 unsigned long real_host_page_size;
68 unsigned long host_page_bits;
69 unsigned long host_page_size;
70 unsigned long host_page_mask;
72 static PageDesc *l1_map[L1_SIZE];
74 /* io memory support */
75 static unsigned long *l1_physmap[L1_SIZE];
76 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
77 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
78 static int io_mem_nb;
80 /* log support */
81 char *logfilename = "/tmp/qemu.log";
82 FILE *logfile;
83 int loglevel;
85 static void page_init(void)
87 /* NOTE: we can always suppose that host_page_size >=
88 TARGET_PAGE_SIZE */
89 real_host_page_size = getpagesize();
90 if (host_page_size == 0)
91 host_page_size = real_host_page_size;
92 if (host_page_size < TARGET_PAGE_SIZE)
93 host_page_size = TARGET_PAGE_SIZE;
94 host_page_bits = 0;
95 while ((1 << host_page_bits) < host_page_size)
96 host_page_bits++;
97 host_page_mask = ~(host_page_size - 1);
100 /* dump memory mappings */
101 void page_dump(FILE *f)
103 unsigned long start, end;
104 int i, j, prot, prot1;
105 PageDesc *p;
107 fprintf(f, "%-8s %-8s %-8s %s\n",
108 "start", "end", "size", "prot");
109 start = -1;
110 end = -1;
111 prot = 0;
112 for(i = 0; i <= L1_SIZE; i++) {
113 if (i < L1_SIZE)
114 p = l1_map[i];
115 else
116 p = NULL;
117 for(j = 0;j < L2_SIZE; j++) {
118 if (!p)
119 prot1 = 0;
120 else
121 prot1 = p[j].flags;
122 if (prot1 != prot) {
123 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
124 if (start != -1) {
125 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
126 start, end, end - start,
127 prot & PAGE_READ ? 'r' : '-',
128 prot & PAGE_WRITE ? 'w' : '-',
129 prot & PAGE_EXEC ? 'x' : '-');
131 if (prot1 != 0)
132 start = end;
133 else
134 start = -1;
135 prot = prot1;
137 if (!p)
138 break;
143 static inline PageDesc *page_find_alloc(unsigned int index)
145 PageDesc **lp, *p;
147 lp = &l1_map[index >> L2_BITS];
148 p = *lp;
149 if (!p) {
150 /* allocate if not found */
151 p = malloc(sizeof(PageDesc) * L2_SIZE);
152 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
153 *lp = p;
155 return p + (index & (L2_SIZE - 1));
158 static inline PageDesc *page_find(unsigned int index)
160 PageDesc *p;
162 p = l1_map[index >> L2_BITS];
163 if (!p)
164 return 0;
165 return p + (index & (L2_SIZE - 1));
168 int page_get_flags(unsigned long address)
170 PageDesc *p;
172 p = page_find(address >> TARGET_PAGE_BITS);
173 if (!p)
174 return 0;
175 return p->flags;
178 /* modify the flags of a page and invalidate the code if
179 necessary. The flag PAGE_WRITE_ORG is positionned automatically
180 depending on PAGE_WRITE */
181 void page_set_flags(unsigned long start, unsigned long end, int flags)
183 PageDesc *p;
184 unsigned long addr;
186 start = start & TARGET_PAGE_MASK;
187 end = TARGET_PAGE_ALIGN(end);
188 if (flags & PAGE_WRITE)
189 flags |= PAGE_WRITE_ORG;
190 spin_lock(&tb_lock);
191 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
192 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
193 /* if the write protection is set, then we invalidate the code
194 inside */
195 if (!(p->flags & PAGE_WRITE) &&
196 (flags & PAGE_WRITE) &&
197 p->first_tb) {
198 tb_invalidate_page(addr);
200 p->flags = flags;
202 spin_unlock(&tb_lock);
205 void cpu_exec_init(void)
207 if (!code_gen_ptr) {
208 code_gen_ptr = code_gen_buffer;
209 page_init();
210 io_mem_init();
214 /* set to NULL all the 'first_tb' fields in all PageDescs */
215 static void page_flush_tb(void)
217 int i, j;
218 PageDesc *p;
220 for(i = 0; i < L1_SIZE; i++) {
221 p = l1_map[i];
222 if (p) {
223 for(j = 0; j < L2_SIZE; j++)
224 p[j].first_tb = NULL;
229 /* flush all the translation blocks */
230 /* XXX: tb_flush is currently not thread safe */
231 void tb_flush(CPUState *env)
233 int i;
234 #if defined(DEBUG_FLUSH)
235 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
236 code_gen_ptr - code_gen_buffer,
237 nb_tbs,
238 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
239 #endif
240 /* must reset current TB so that interrupts cannot modify the
241 links while we are modifying them */
242 env->current_tb = NULL;
244 nb_tbs = 0;
245 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
246 tb_hash[i] = NULL;
247 page_flush_tb();
248 code_gen_ptr = code_gen_buffer;
249 /* XXX: flush processor icache at this point if cache flush is
250 expensive */
253 #ifdef DEBUG_TB_CHECK
255 static void tb_invalidate_check(unsigned long address)
257 TranslationBlock *tb;
258 int i;
259 address &= TARGET_PAGE_MASK;
260 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
261 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
262 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
263 address >= tb->pc + tb->size)) {
264 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
265 address, tb->pc, tb->size);
271 /* verify that all the pages have correct rights for code */
272 static void tb_page_check(void)
274 TranslationBlock *tb;
275 int i, flags1, flags2;
277 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
278 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
279 flags1 = page_get_flags(tb->pc);
280 flags2 = page_get_flags(tb->pc + tb->size - 1);
281 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
282 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
283 tb->pc, tb->size, flags1, flags2);
289 void tb_jmp_check(TranslationBlock *tb)
291 TranslationBlock *tb1;
292 unsigned int n1;
294 /* suppress any remaining jumps to this TB */
295 tb1 = tb->jmp_first;
296 for(;;) {
297 n1 = (long)tb1 & 3;
298 tb1 = (TranslationBlock *)((long)tb1 & ~3);
299 if (n1 == 2)
300 break;
301 tb1 = tb1->jmp_next[n1];
303 /* check end of list */
304 if (tb1 != tb) {
305 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
309 #endif
311 /* invalidate one TB */
312 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
313 int next_offset)
315 TranslationBlock *tb1;
316 for(;;) {
317 tb1 = *ptb;
318 if (tb1 == tb) {
319 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
320 break;
322 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
326 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
328 TranslationBlock *tb1, **ptb;
329 unsigned int n1;
331 ptb = &tb->jmp_next[n];
332 tb1 = *ptb;
333 if (tb1) {
334 /* find tb(n) in circular list */
335 for(;;) {
336 tb1 = *ptb;
337 n1 = (long)tb1 & 3;
338 tb1 = (TranslationBlock *)((long)tb1 & ~3);
339 if (n1 == n && tb1 == tb)
340 break;
341 if (n1 == 2) {
342 ptb = &tb1->jmp_first;
343 } else {
344 ptb = &tb1->jmp_next[n1];
347 /* now we can suppress tb(n) from the list */
348 *ptb = tb->jmp_next[n];
350 tb->jmp_next[n] = NULL;
354 /* reset the jump entry 'n' of a TB so that it is not chained to
355 another TB */
356 static inline void tb_reset_jump(TranslationBlock *tb, int n)
358 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
361 static inline void tb_invalidate(TranslationBlock *tb, int parity)
363 PageDesc *p;
364 unsigned int page_index1, page_index2;
365 unsigned int h, n1;
366 TranslationBlock *tb1, *tb2;
368 tb_invalidated_flag = 1;
370 /* remove the TB from the hash list */
371 h = tb_hash_func(tb->pc);
372 tb_remove(&tb_hash[h], tb,
373 offsetof(TranslationBlock, hash_next));
374 /* remove the TB from the page list */
375 page_index1 = tb->pc >> TARGET_PAGE_BITS;
376 if ((page_index1 & 1) == parity) {
377 p = page_find(page_index1);
378 tb_remove(&p->first_tb, tb,
379 offsetof(TranslationBlock, page_next[page_index1 & 1]));
381 page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
382 if ((page_index2 & 1) == parity) {
383 p = page_find(page_index2);
384 tb_remove(&p->first_tb, tb,
385 offsetof(TranslationBlock, page_next[page_index2 & 1]));
388 /* suppress this TB from the two jump lists */
389 tb_jmp_remove(tb, 0);
390 tb_jmp_remove(tb, 1);
392 /* suppress any remaining jumps to this TB */
393 tb1 = tb->jmp_first;
394 for(;;) {
395 n1 = (long)tb1 & 3;
396 if (n1 == 2)
397 break;
398 tb1 = (TranslationBlock *)((long)tb1 & ~3);
399 tb2 = tb1->jmp_next[n1];
400 tb_reset_jump(tb1, n1);
401 tb1->jmp_next[n1] = NULL;
402 tb1 = tb2;
404 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
407 /* invalidate all TBs which intersect with the target page starting at addr */
408 void tb_invalidate_page(unsigned long address)
410 TranslationBlock *tb_next, *tb;
411 unsigned int page_index;
412 int parity1, parity2;
413 PageDesc *p;
414 #ifdef DEBUG_TB_INVALIDATE
415 printf("tb_invalidate_page: %lx\n", address);
416 #endif
418 page_index = address >> TARGET_PAGE_BITS;
419 p = page_find(page_index);
420 if (!p)
421 return;
422 tb = p->first_tb;
423 parity1 = page_index & 1;
424 parity2 = parity1 ^ 1;
425 while (tb != NULL) {
426 tb_next = tb->page_next[parity1];
427 tb_invalidate(tb, parity2);
428 tb = tb_next;
430 p->first_tb = NULL;
433 /* add the tb in the target page and protect it if necessary */
434 static inline void tb_alloc_page(TranslationBlock *tb, unsigned int page_index)
436 PageDesc *p;
437 unsigned long host_start, host_end, addr, page_addr;
438 int prot;
440 p = page_find_alloc(page_index);
441 tb->page_next[page_index & 1] = p->first_tb;
442 p->first_tb = tb;
443 if (p->flags & PAGE_WRITE) {
444 /* force the host page as non writable (writes will have a
445 page fault + mprotect overhead) */
446 page_addr = (page_index << TARGET_PAGE_BITS);
447 host_start = page_addr & host_page_mask;
448 host_end = host_start + host_page_size;
449 prot = 0;
450 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
451 prot |= page_get_flags(addr);
452 #if !defined(CONFIG_SOFTMMU)
453 mprotect((void *)host_start, host_page_size,
454 (prot & PAGE_BITS) & ~PAGE_WRITE);
455 #endif
456 #if !defined(CONFIG_USER_ONLY)
457 /* suppress soft TLB */
458 /* XXX: must flush on all processor with same address space */
459 tlb_flush_page_write(cpu_single_env, host_start);
460 #endif
461 #ifdef DEBUG_TB_INVALIDATE
462 printf("protecting code page: 0x%08lx\n",
463 host_start);
464 #endif
465 p->flags &= ~PAGE_WRITE;
469 /* Allocate a new translation block. Flush the translation buffer if
470 too many translation blocks or too much generated code. */
471 TranslationBlock *tb_alloc(unsigned long pc)
473 TranslationBlock *tb;
475 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
476 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
477 return NULL;
478 tb = &tbs[nb_tbs++];
479 tb->pc = pc;
480 return tb;
483 /* link the tb with the other TBs */
484 void tb_link(TranslationBlock *tb)
486 unsigned int page_index1, page_index2;
488 /* add in the page list */
489 page_index1 = tb->pc >> TARGET_PAGE_BITS;
490 tb_alloc_page(tb, page_index1);
491 page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
492 if (page_index2 != page_index1) {
493 tb_alloc_page(tb, page_index2);
495 #ifdef DEBUG_TB_CHECK
496 tb_page_check();
497 #endif
498 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
499 tb->jmp_next[0] = NULL;
500 tb->jmp_next[1] = NULL;
502 /* init original jump addresses */
503 if (tb->tb_next_offset[0] != 0xffff)
504 tb_reset_jump(tb, 0);
505 if (tb->tb_next_offset[1] != 0xffff)
506 tb_reset_jump(tb, 1);
509 /* called from signal handler: invalidate the code and unprotect the
510 page. Return TRUE if the fault was succesfully handled. */
511 int page_unprotect(unsigned long address)
513 unsigned int page_index, prot, pindex;
514 PageDesc *p, *p1;
515 unsigned long host_start, host_end, addr;
517 host_start = address & host_page_mask;
518 page_index = host_start >> TARGET_PAGE_BITS;
519 p1 = page_find(page_index);
520 if (!p1)
521 return 0;
522 host_end = host_start + host_page_size;
523 p = p1;
524 prot = 0;
525 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
526 prot |= p->flags;
527 p++;
529 /* if the page was really writable, then we change its
530 protection back to writable */
531 if (prot & PAGE_WRITE_ORG) {
532 pindex = (address - host_start) >> TARGET_PAGE_BITS;
533 if (!(p1[pindex].flags & PAGE_WRITE)) {
534 #if !defined(CONFIG_SOFTMMU)
535 mprotect((void *)host_start, host_page_size,
536 (prot & PAGE_BITS) | PAGE_WRITE);
537 #endif
538 p1[pindex].flags |= PAGE_WRITE;
539 /* and since the content will be modified, we must invalidate
540 the corresponding translated code. */
541 tb_invalidate_page(address);
542 #ifdef DEBUG_TB_CHECK
543 tb_invalidate_check(address);
544 #endif
545 return 1;
548 return 0;
551 /* call this function when system calls directly modify a memory area */
552 void page_unprotect_range(uint8_t *data, unsigned long data_size)
554 unsigned long start, end, addr;
556 start = (unsigned long)data;
557 end = start + data_size;
558 start &= TARGET_PAGE_MASK;
559 end = TARGET_PAGE_ALIGN(end);
560 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
561 page_unprotect(addr);
565 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
566 tb[1].tc_ptr. Return NULL if not found */
567 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
569 int m_min, m_max, m;
570 unsigned long v;
571 TranslationBlock *tb;
573 if (nb_tbs <= 0)
574 return NULL;
575 if (tc_ptr < (unsigned long)code_gen_buffer ||
576 tc_ptr >= (unsigned long)code_gen_ptr)
577 return NULL;
578 /* binary search (cf Knuth) */
579 m_min = 0;
580 m_max = nb_tbs - 1;
581 while (m_min <= m_max) {
582 m = (m_min + m_max) >> 1;
583 tb = &tbs[m];
584 v = (unsigned long)tb->tc_ptr;
585 if (v == tc_ptr)
586 return tb;
587 else if (tc_ptr < v) {
588 m_max = m - 1;
589 } else {
590 m_min = m + 1;
593 return &tbs[m_max];
596 static void tb_reset_jump_recursive(TranslationBlock *tb);
598 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
600 TranslationBlock *tb1, *tb_next, **ptb;
601 unsigned int n1;
603 tb1 = tb->jmp_next[n];
604 if (tb1 != NULL) {
605 /* find head of list */
606 for(;;) {
607 n1 = (long)tb1 & 3;
608 tb1 = (TranslationBlock *)((long)tb1 & ~3);
609 if (n1 == 2)
610 break;
611 tb1 = tb1->jmp_next[n1];
613 /* we are now sure now that tb jumps to tb1 */
614 tb_next = tb1;
616 /* remove tb from the jmp_first list */
617 ptb = &tb_next->jmp_first;
618 for(;;) {
619 tb1 = *ptb;
620 n1 = (long)tb1 & 3;
621 tb1 = (TranslationBlock *)((long)tb1 & ~3);
622 if (n1 == n && tb1 == tb)
623 break;
624 ptb = &tb1->jmp_next[n1];
626 *ptb = tb->jmp_next[n];
627 tb->jmp_next[n] = NULL;
629 /* suppress the jump to next tb in generated code */
630 tb_reset_jump(tb, n);
632 /* suppress jumps in the tb on which we could have jumped */
633 tb_reset_jump_recursive(tb_next);
637 static void tb_reset_jump_recursive(TranslationBlock *tb)
639 tb_reset_jump_recursive2(tb, 0);
640 tb_reset_jump_recursive2(tb, 1);
643 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
644 breakpoint is reached */
645 int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
647 #if defined(TARGET_I386)
648 int i;
650 for(i = 0; i < env->nb_breakpoints; i++) {
651 if (env->breakpoints[i] == pc)
652 return 0;
655 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
656 return -1;
657 env->breakpoints[env->nb_breakpoints++] = pc;
658 tb_invalidate_page(pc);
659 return 0;
660 #else
661 return -1;
662 #endif
665 /* remove a breakpoint */
666 int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
668 #if defined(TARGET_I386)
669 int i;
670 for(i = 0; i < env->nb_breakpoints; i++) {
671 if (env->breakpoints[i] == pc)
672 goto found;
674 return -1;
675 found:
676 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
677 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
678 env->nb_breakpoints--;
679 tb_invalidate_page(pc);
680 return 0;
681 #else
682 return -1;
683 #endif
686 /* enable or disable single step mode. EXCP_DEBUG is returned by the
687 CPU loop after each instruction */
688 void cpu_single_step(CPUState *env, int enabled)
690 #if defined(TARGET_I386)
691 if (env->singlestep_enabled != enabled) {
692 env->singlestep_enabled = enabled;
693 /* must flush all the translated code to avoid inconsistancies */
694 tb_flush(env);
696 #endif
699 /* enable or disable low levels log */
700 void cpu_set_log(int log_flags)
702 loglevel = log_flags;
703 if (loglevel && !logfile) {
704 logfile = fopen(logfilename, "w");
705 if (!logfile) {
706 perror(logfilename);
707 _exit(1);
709 setvbuf(logfile, NULL, _IOLBF, 0);
713 void cpu_set_log_filename(const char *filename)
715 logfilename = strdup(filename);
718 /* mask must never be zero, except for A20 change call */
719 void cpu_interrupt(CPUState *env, int mask)
721 TranslationBlock *tb;
723 env->interrupt_request |= mask;
724 /* if the cpu is currently executing code, we must unlink it and
725 all the potentially executing TB */
726 tb = env->current_tb;
727 if (tb) {
728 tb_reset_jump_recursive(tb);
733 void cpu_abort(CPUState *env, const char *fmt, ...)
735 va_list ap;
737 va_start(ap, fmt);
738 fprintf(stderr, "qemu: fatal: ");
739 vfprintf(stderr, fmt, ap);
740 fprintf(stderr, "\n");
741 #ifdef TARGET_I386
742 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
743 #endif
744 va_end(ap);
745 abort();
748 #if !defined(CONFIG_USER_ONLY)
750 /* unmap all maped pages and flush all associated code */
751 static void page_unmap(CPUState *env)
753 PageDesc *pmap;
754 int i;
756 for(i = 0; i < L1_SIZE; i++) {
757 pmap = l1_map[i];
758 if (pmap) {
759 #if !defined(CONFIG_SOFTMMU)
760 PageDesc *p;
761 unsigned long addr;
762 int j, ret, j1;
764 p = pmap;
765 for(j = 0;j < L2_SIZE;) {
766 if (p->flags & PAGE_VALID) {
767 addr = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
768 /* we try to find a range to make less syscalls */
769 j1 = j;
770 p++;
771 j++;
772 while (j < L2_SIZE && (p->flags & PAGE_VALID)) {
773 p++;
774 j++;
776 ret = munmap((void *)addr, (j - j1) << TARGET_PAGE_BITS);
777 if (ret != 0) {
778 fprintf(stderr, "Could not unmap page 0x%08lx\n", addr);
779 exit(1);
781 } else {
782 p++;
783 j++;
786 #endif
787 free(pmap);
788 l1_map[i] = NULL;
791 tb_flush(env);
794 void tlb_flush(CPUState *env)
796 int i;
798 /* must reset current TB so that interrupts cannot modify the
799 links while we are modifying them */
800 env->current_tb = NULL;
802 for(i = 0; i < CPU_TLB_SIZE; i++) {
803 env->tlb_read[0][i].address = -1;
804 env->tlb_write[0][i].address = -1;
805 env->tlb_read[1][i].address = -1;
806 env->tlb_write[1][i].address = -1;
808 /* XXX: avoid flushing the TBs */
809 page_unmap(env);
812 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
814 if (addr == (tlb_entry->address &
815 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
816 tlb_entry->address = -1;
819 void tlb_flush_page(CPUState *env, uint32_t addr)
821 int i, flags;
823 /* must reset current TB so that interrupts cannot modify the
824 links while we are modifying them */
825 env->current_tb = NULL;
827 addr &= TARGET_PAGE_MASK;
828 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
829 tlb_flush_entry(&env->tlb_read[0][i], addr);
830 tlb_flush_entry(&env->tlb_write[0][i], addr);
831 tlb_flush_entry(&env->tlb_read[1][i], addr);
832 tlb_flush_entry(&env->tlb_write[1][i], addr);
834 flags = page_get_flags(addr);
835 if (flags & PAGE_VALID) {
836 #if !defined(CONFIG_SOFTMMU)
837 munmap((void *)addr, TARGET_PAGE_SIZE);
838 #endif
839 page_set_flags(addr, addr + TARGET_PAGE_SIZE, 0);
843 /* make all write to page 'addr' trigger a TLB exception to detect
844 self modifying code */
845 void tlb_flush_page_write(CPUState *env, uint32_t addr)
847 int i;
849 addr &= TARGET_PAGE_MASK;
850 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
851 tlb_flush_entry(&env->tlb_write[0][i], addr);
852 tlb_flush_entry(&env->tlb_write[1][i], addr);
855 #else
857 void tlb_flush(CPUState *env)
861 void tlb_flush_page(CPUState *env, uint32_t addr)
865 void tlb_flush_page_write(CPUState *env, uint32_t addr)
869 #endif /* defined(CONFIG_USER_ONLY) */
871 static inline unsigned long *physpage_find_alloc(unsigned int page)
873 unsigned long **lp, *p;
874 unsigned int index, i;
876 index = page >> TARGET_PAGE_BITS;
877 lp = &l1_physmap[index >> L2_BITS];
878 p = *lp;
879 if (!p) {
880 /* allocate if not found */
881 p = malloc(sizeof(unsigned long) * L2_SIZE);
882 for(i = 0; i < L2_SIZE; i++)
883 p[i] = IO_MEM_UNASSIGNED;
884 *lp = p;
886 return p + (index & (L2_SIZE - 1));
889 /* return NULL if no page defined (unused memory) */
890 unsigned long physpage_find(unsigned long page)
892 unsigned long *p;
893 unsigned int index;
894 index = page >> TARGET_PAGE_BITS;
895 p = l1_physmap[index >> L2_BITS];
896 if (!p)
897 return IO_MEM_UNASSIGNED;
898 return p[index & (L2_SIZE - 1)];
901 /* register physical memory. 'size' must be a multiple of the target
902 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
903 io memory page */
904 void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
905 long phys_offset)
907 unsigned long addr, end_addr;
908 unsigned long *p;
910 end_addr = start_addr + size;
911 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
912 p = physpage_find_alloc(addr);
913 *p = phys_offset;
914 if ((phys_offset & ~TARGET_PAGE_MASK) == 0)
915 phys_offset += TARGET_PAGE_SIZE;
919 static uint32_t unassigned_mem_readb(uint32_t addr)
921 return 0;
924 static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
928 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
929 unassigned_mem_readb,
930 unassigned_mem_readb,
931 unassigned_mem_readb,
934 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
935 unassigned_mem_writeb,
936 unassigned_mem_writeb,
937 unassigned_mem_writeb,
941 static void io_mem_init(void)
943 io_mem_nb = 1;
944 cpu_register_io_memory(0, unassigned_mem_read, unassigned_mem_write);
947 /* mem_read and mem_write are arrays of functions containing the
948 function to access byte (index 0), word (index 1) and dword (index
949 2). All functions must be supplied. If io_index is non zero, the
950 corresponding io zone is modified. If it is zero, a new io zone is
951 allocated. The return value can be used with
952 cpu_register_physical_memory(). (-1) is returned if error. */
953 int cpu_register_io_memory(int io_index,
954 CPUReadMemoryFunc **mem_read,
955 CPUWriteMemoryFunc **mem_write)
957 int i;
959 if (io_index <= 0) {
960 if (io_index >= IO_MEM_NB_ENTRIES)
961 return -1;
962 io_index = io_mem_nb++;
963 } else {
964 if (io_index >= IO_MEM_NB_ENTRIES)
965 return -1;
968 for(i = 0;i < 3; i++) {
969 io_mem_read[io_index][i] = mem_read[i];
970 io_mem_write[io_index][i] = mem_write[i];
972 return io_index << IO_MEM_SHIFT;
975 #if !defined(CONFIG_USER_ONLY)
977 #define MMUSUFFIX _cmmu
978 #define GETPC() NULL
979 #define env cpu_single_env
981 #define SHIFT 0
982 #include "softmmu_template.h"
984 #define SHIFT 1
985 #include "softmmu_template.h"
987 #define SHIFT 2
988 #include "softmmu_template.h"
990 #define SHIFT 3
991 #include "softmmu_template.h"
993 #undef env
995 #endif